diff options
| author | David S. Miller <davem@nuts.ninka.net> | 2003-05-12 01:51:27 -0700 |
|---|---|---|
| committer | David S. Miller <davem@nuts.ninka.net> | 2003-05-12 01:51:27 -0700 |
| commit | f5bda5bc978626bf1f508370c25702366bc2c214 (patch) | |
| tree | fc9df7eb89c998942c67b76a2a805705b76b9513 /include | |
| parent | d62a3dd4742d6fad314e1f3e15ec9d6b63adacff (diff) | |
| parent | cb52a86c562adbf63abf8bde4d4502a444a64c01 (diff) | |
Merge nuts.ninka.net:/home/davem/src/BK/network-2.5
into nuts.ninka.net:/home/davem/src/BK/net-2.5
Diffstat (limited to 'include')
| -rw-r--r-- | include/asm-sparc/hardirq.h | 1 | ||||
| -rw-r--r-- | include/asm-sparc64/hardirq.h | 1 | ||||
| -rw-r--r-- | include/linux/brlock.h | 222 | ||||
| -rw-r--r-- | include/linux/ipv6.h | 7 | ||||
| -rw-r--r-- | include/linux/netdevice.h | 3 | ||||
| -rw-r--r-- | include/linux/skbuff.h | 9 | ||||
| -rw-r--r-- | include/net/ipv6.h | 20 | ||||
| -rw-r--r-- | include/net/xfrm.h | 13 |
8 files changed, 51 insertions, 225 deletions
diff --git a/include/asm-sparc/hardirq.h b/include/asm-sparc/hardirq.h index 822ec6f1062d..04644f2f2930 100644 --- a/include/asm-sparc/hardirq.h +++ b/include/asm-sparc/hardirq.h @@ -9,7 +9,6 @@ #include <linux/config.h> #include <linux/threads.h> -#include <linux/brlock.h> #include <linux/spinlock.h> #include <linux/cache.h> diff --git a/include/asm-sparc64/hardirq.h b/include/asm-sparc64/hardirq.h index 0d52c3c6c66a..9657368f4ad2 100644 --- a/include/asm-sparc64/hardirq.h +++ b/include/asm-sparc64/hardirq.h @@ -8,7 +8,6 @@ #include <linux/config.h> #include <linux/threads.h> -#include <linux/brlock.h> #include <linux/spinlock.h> #include <linux/cache.h> diff --git a/include/linux/brlock.h b/include/linux/brlock.h deleted file mode 100644 index 59880a3f38c6..000000000000 --- a/include/linux/brlock.h +++ /dev/null @@ -1,222 +0,0 @@ -#ifndef __LINUX_BRLOCK_H -#define __LINUX_BRLOCK_H - -/* - * 'Big Reader' read-write spinlocks. - * - * super-fast read/write locks, with write-side penalty. The point - * is to have a per-CPU read/write lock. Readers lock their CPU-local - * readlock, writers must lock all locks to get write access. These - * CPU-read-write locks are semantically identical to normal rwlocks. - * Memory usage is higher as well. (NR_CPUS*L1_CACHE_BYTES bytes) - * - * The most important feature is that these spinlocks do not cause - * cacheline ping-pong in the 'most readonly data' case. - * - * Copyright 2000, Ingo Molnar <mingo@redhat.com> - * - * Registry idea and naming [ crutial! :-) ] by: - * - * David S. Miller <davem@redhat.com> - * - * David has an implementation that doesn't use atomic operations in - * the read branch via memory ordering tricks - i guess we need to - * split this up into a per-arch thing? The atomicity issue is a - * secondary item in profiles, at least on x86 platforms. - * - * The atomic op version overhead is indeed a big deal on - * load-locked/store-conditional cpus (ALPHA/MIPS/PPC) and - * compare-and-swap cpus (Sparc64). So we control which - * implementation to use with a __BRLOCK_USE_ATOMICS define. -DaveM - * - */ - -/* Register bigreader lock indices here. */ -enum brlock_indices { - BR_NETPROTO_LOCK, - __BR_END -}; - -#include <linux/config.h> - -#ifdef CONFIG_SMP - -#include <linux/cache.h> -#include <linux/spinlock.h> - -#if defined(__i386__) || defined(__ia64__) || defined(__x86_64__) -#define __BRLOCK_USE_ATOMICS -#else -#undef __BRLOCK_USE_ATOMICS -#endif - -#ifdef __BRLOCK_USE_ATOMICS -typedef rwlock_t brlock_read_lock_t; -#else -typedef unsigned int brlock_read_lock_t; -#endif - -/* - * align last allocated index to the next cacheline: - */ -#define __BR_IDX_MAX \ - (((sizeof(brlock_read_lock_t)*__BR_END + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) / sizeof(brlock_read_lock_t)) - -extern brlock_read_lock_t __brlock_array[NR_CPUS][__BR_IDX_MAX]; - -#ifndef __BRLOCK_USE_ATOMICS -struct br_wrlock { - spinlock_t lock; -} __attribute__ ((__aligned__(SMP_CACHE_BYTES))); - -extern struct br_wrlock __br_write_locks[__BR_IDX_MAX]; -#endif - -extern void __br_lock_usage_bug (void); - -#ifdef __BRLOCK_USE_ATOMICS - -static inline void br_read_lock (enum brlock_indices idx) -{ - /* - * This causes a link-time bug message if an - * invalid index is used: - */ - if (idx >= __BR_END) - __br_lock_usage_bug(); - - preempt_disable(); - _raw_read_lock(&__brlock_array[smp_processor_id()][idx]); -} - -static inline void br_read_unlock (enum brlock_indices idx) -{ - if (idx >= __BR_END) - __br_lock_usage_bug(); - - read_unlock(&__brlock_array[smp_processor_id()][idx]); -} - -#else /* ! __BRLOCK_USE_ATOMICS */ -static inline void br_read_lock (enum brlock_indices idx) -{ - unsigned int *ctr; - spinlock_t *lock; - - /* - * This causes a link-time bug message if an - * invalid index is used: - */ - if (idx >= __BR_END) - __br_lock_usage_bug(); - - preempt_disable(); - ctr = &__brlock_array[smp_processor_id()][idx]; - lock = &__br_write_locks[idx].lock; -again: - (*ctr)++; - mb(); - if (spin_is_locked(lock)) { - (*ctr)--; - wmb(); /* - * The release of the ctr must become visible - * to the other cpus eventually thus wmb(), - * we don't care if spin_is_locked is reordered - * before the releasing of the ctr. - * However IMHO this wmb() is superflous even in theory. - * It would not be superflous only if on the - * other CPUs doing a ldl_l instead of an ldl - * would make a difference and I don't think this is - * the case. - * I'd like to clarify this issue further - * but for now this is a slow path so adding the - * wmb() will keep us on the safe side. - */ - while (spin_is_locked(lock)) - barrier(); - goto again; - } -} - -static inline void br_read_unlock (enum brlock_indices idx) -{ - unsigned int *ctr; - - if (idx >= __BR_END) - __br_lock_usage_bug(); - - ctr = &__brlock_array[smp_processor_id()][idx]; - - wmb(); - (*ctr)--; - preempt_enable(); -} -#endif /* __BRLOCK_USE_ATOMICS */ - -/* write path not inlined - it's rare and larger */ - -extern void FASTCALL(__br_write_lock (enum brlock_indices idx)); -extern void FASTCALL(__br_write_unlock (enum brlock_indices idx)); - -static inline void br_write_lock (enum brlock_indices idx) -{ - if (idx >= __BR_END) - __br_lock_usage_bug(); - __br_write_lock(idx); -} - -static inline void br_write_unlock (enum brlock_indices idx) -{ - if (idx >= __BR_END) - __br_lock_usage_bug(); - __br_write_unlock(idx); -} - -#else -# define br_read_lock(idx) ({ (void)(idx); preempt_disable(); }) -# define br_read_unlock(idx) ({ (void)(idx); preempt_enable(); }) -# define br_write_lock(idx) ({ (void)(idx); preempt_disable(); }) -# define br_write_unlock(idx) ({ (void)(idx); preempt_enable(); }) -#endif /* CONFIG_SMP */ - -/* - * Now enumerate all of the possible sw/hw IRQ protected - * versions of the interfaces. - */ -#define br_read_lock_irqsave(idx, flags) \ - do { local_irq_save(flags); br_read_lock(idx); } while (0) - -#define br_read_lock_irq(idx) \ - do { local_irq_disable(); br_read_lock(idx); } while (0) - -#define br_read_lock_bh(idx) \ - do { local_bh_disable(); br_read_lock(idx); } while (0) - -#define br_write_lock_irqsave(idx, flags) \ - do { local_irq_save(flags); br_write_lock(idx); } while (0) - -#define br_write_lock_irq(idx) \ - do { local_irq_disable(); br_write_lock(idx); } while (0) - -#define br_write_lock_bh(idx) \ - do { local_bh_disable(); br_write_lock(idx); } while (0) - -#define br_read_unlock_irqrestore(idx, flags) \ - do { br_read_unlock(irx); local_irq_restore(flags); } while (0) - -#define br_read_unlock_irq(idx) \ - do { br_read_unlock(idx); local_irq_enable(); } while (0) - -#define br_read_unlock_bh(idx) \ - do { br_read_unlock(idx); local_bh_enable(); } while (0) - -#define br_write_unlock_irqrestore(idx, flags) \ - do { br_write_unlock(irx); local_irq_restore(flags); } while (0) - -#define br_write_unlock_irq(idx) \ - do { br_write_unlock(idx); local_irq_enable(); } while (0) - -#define br_write_unlock_bh(idx) \ - do { br_write_unlock(idx); local_bh_enable(); } while (0) - -#endif /* __LINUX_BRLOCK_H */ diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index caab8bbc88fe..f70e91028c96 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -121,6 +121,7 @@ struct ipv6hdr { #include <linux/icmpv6.h> #include <net/if_inet6.h> /* struct ipv6_mc_socklist */ #include <linux/tcp.h> +#include <linux/udp.h> /* This structure contains results of exthdrs parsing @@ -178,6 +179,11 @@ struct ipv6_pinfo { struct ipv6_txoptions *opt; struct sk_buff *pktoptions; + struct { + struct ipv6_txoptions *opt; + struct rt6_info *rt; + struct flowi *fl; + } cork; }; struct raw6_opt { @@ -200,6 +206,7 @@ struct udp6_sock { struct sock sk; struct ipv6_pinfo *pinet6; struct inet_opt inet; + struct udp_opt udp; struct ipv6_pinfo inet6; }; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 080084cc3afc..9ebb267411df 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -456,7 +456,7 @@ struct packet_type int (*func) (struct sk_buff *, struct net_device *, struct packet_type *); void *data; /* Private to the packet type */ - struct packet_type *next; + struct list_head list; }; @@ -472,6 +472,7 @@ extern int netdev_boot_setup_check(struct net_device *dev); extern struct net_device *dev_getbyhwaddr(unsigned short type, char *hwaddr); extern void dev_add_pack(struct packet_type *pt); extern void dev_remove_pack(struct packet_type *pt); +extern void __dev_remove_pack(struct packet_type *pt); extern int dev_get(const char *name); extern struct net_device *dev_get_by_flags(unsigned short flags, unsigned short mask); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3be29f0d15f6..6b8ab6887236 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -792,6 +792,15 @@ static inline int skb_pagelen(const struct sk_buff *skb) return len + skb_headlen(skb); } +static inline void skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + frag->page = page; + frag->page_offset = off; + frag->size = size; + skb_shinfo(skb)->nr_frags = i+1; +} + #define SKB_PAGE_ASSERT(skb) do { if (skb_shinfo(skb)->nr_frags) \ BUG(); } while (0) #define SKB_FRAG_ASSERT(skb) do { if (skb_shinfo(skb)->frag_list) \ diff --git a/include/net/ipv6.h b/include/net/ipv6.h index b6760b3b9f56..754c7f36afda 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -316,6 +316,26 @@ extern int ip6_build_xmit(struct sock *sk, struct ipv6_txoptions *opt, int hlimit, int flags); +extern int ip6_append_data(struct sock *sk, + int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), + void *from, + int length, + int transhdrlen, + int hlimit, + struct ipv6_txoptions *opt, + struct flowi *fl, + struct rt6_info *rt, + unsigned int flags); + +extern int ip6_push_pending_frames(struct sock *sk); + +extern void ip6_flush_pending_frames(struct sock *sk); + +extern int ip6_dst_lookup(struct sock *sk, + struct dst_entry **dst, + struct flowi *fl, + struct in6_addr **saddr); + /* * skb processing functions */ diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 099781cf0725..f11387b308af 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -123,6 +123,12 @@ struct xfrm_state /* Data for encapsulator */ struct xfrm_encap_tmpl *encap; + /* IPComp needs an IPIP tunnel for handling uncompressed packets */ + struct xfrm_state *tunnel; + + /* If a tunnel, number of users + 1 */ + atomic_t tunnel_users; + /* State for replay detection */ struct xfrm_replay_state replay; @@ -196,6 +202,8 @@ extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo); extern struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family); extern void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); +extern void xfrm_state_delete_tunnel(struct xfrm_state *x); + struct xfrm_decap_state; struct xfrm_type { @@ -699,6 +707,11 @@ xfrm_state_addr_check(struct xfrm_state *x, return 0; } +static inline int xfrm_state_kern(struct xfrm_state *x) +{ + return atomic_read(&x->tunnel_users); +} + /* * xfrm algorithm information */ |
