diff options
| author | Anton Altaparmakov <aia21@cantab.net> | 2004-06-26 21:53:53 +0100 |
|---|---|---|
| committer | Anton Altaparmakov <aia21@cantab.net> | 2004-06-26 21:53:53 +0100 |
| commit | 320ed1994ecf7ccadaaa95196467565b34d8d686 (patch) | |
| tree | b36f16a87596469091c97d35002faa1b5c0360ab /include/net | |
| parent | 702fdfcae9a47ec4976d82d3d0b4b4a41bd72a52 (diff) | |
| parent | f6a7507c1714f5cb4faaebc76a1d02260830be01 (diff) | |
Merge cantab.net:/home/src/bklinux-2.6
into cantab.net:/home/src/ntfs-2.6
Diffstat (limited to 'include/net')
| -rw-r--r-- | include/net/dst.h | 6 | ||||
| -rw-r--r-- | include/net/protocol.h | 11 | ||||
| -rw-r--r-- | include/net/route.h | 2 | ||||
| -rw-r--r-- | include/net/sock.h | 96 | ||||
| -rw-r--r-- | include/net/tcp.h | 65 |
5 files changed, 107 insertions, 73 deletions
diff --git a/include/net/dst.h b/include/net/dst.h index ed2504c6b4e9..543ff945bb2f 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -183,6 +183,12 @@ static inline void dst_free(struct dst_entry * dst) __dst_free(dst); } +static inline void dst_rcu_free(struct rcu_head *head) +{ + struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head); + dst_free(dst); +} + static inline void dst_confirm(struct dst_entry *dst) { if (dst) diff --git a/include/net/protocol.h b/include/net/protocol.h index f67327486a9c..357691f6a45f 100644 --- a/include/net/protocol.h +++ b/include/net/protocol.h @@ -34,8 +34,7 @@ /* This is used to register protocols. */ -struct inet_protocol -{ +struct net_protocol { int (*handler)(struct sk_buff *skb); void (*err_handler)(struct sk_buff *skb, u32 info); int no_policy; @@ -78,15 +77,15 @@ struct inet_protosw { #define INET_PROTOSW_REUSE 0x01 /* Are ports automatically reusable? */ #define INET_PROTOSW_PERMANENT 0x02 /* Permanent protocols are unremovable. */ -extern struct inet_protocol *inet_protocol_base; -extern struct inet_protocol *inet_protos[MAX_INET_PROTOS]; +extern struct net_protocol *inet_protocol_base; +extern struct net_protocol *inet_protos[MAX_INET_PROTOS]; #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) extern struct inet6_protocol *inet6_protos[MAX_INET_PROTOS]; #endif -extern int inet_add_protocol(struct inet_protocol *prot, unsigned char num); -extern int inet_del_protocol(struct inet_protocol *prot, unsigned char num); +extern int inet_add_protocol(struct net_protocol *prot, unsigned char num); +extern int inet_del_protocol(struct net_protocol *prot, unsigned char num); extern void inet_register_protosw(struct inet_protosw *p); extern void inet_unregister_protosw(struct inet_protosw *p); diff --git a/include/net/route.h b/include/net/route.h index 0e7210e6d194..a5e9c575ea3e 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -129,7 +129,7 @@ extern void ip_rt_send_redirect(struct sk_buff *skb); extern unsigned inet_addr_type(u32 addr); extern void ip_rt_multicast_event(struct in_device *); -extern int ip_rt_ioctl(unsigned int cmd, void *arg); +extern int ip_rt_ioctl(unsigned int cmd, void __user *arg); extern void ip_rt_get_source(u8 *src, struct rtable *rt); extern int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb); diff --git a/include/net/sock.h b/include/net/sock.h index 0398823e18ed..9da91b9be903 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -167,6 +167,8 @@ struct sock_common { * @sk_socket - Identd and reporting IO signals * @sk_user_data - RPC layer private data * @sk_owner - module that owns this socket + * @sk_sndmsg_page - cached page for sendmsg + * @sk_sndmsg_off - cached offset for sendmsg * @sk_send_head - front of stuff to transmit * @sk_write_pending - a write to stream socket waits to start * @sk_queue_shrunk - write queue has been shrunk recently @@ -249,8 +251,10 @@ struct sock { struct timeval sk_stamp; struct socket *sk_socket; void *sk_user_data; - struct sk_buff *sk_send_head; struct module *sk_owner; + struct page *sk_sndmsg_page; + __u32 sk_sndmsg_off; + struct sk_buff *sk_send_head; int sk_write_pending; void *sk_security; __u8 sk_queue_shrunk; @@ -487,10 +491,11 @@ extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p); extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p); extern void sk_stream_wait_close(struct sock *sk, long timeo_p); extern int sk_stream_error(struct sock *sk, int flags, int err); +extern void sk_stream_kill_queues(struct sock *sk); extern int sk_wait_data(struct sock *sk, long *timeo); -/* IP protocol blocks we attach to sockets. +/* Networking protocol blocks we attach to sockets. * socket layer -> transport layer interface * transport -> network interface is defined by struct inet_proto */ @@ -534,6 +539,22 @@ struct proto { void (*unhash)(struct sock *sk); int (*get_port)(struct sock *sk, unsigned short snum); + /* Memory pressure */ + void (*enter_memory_pressure)(void); + atomic_t *memory_allocated; /* Current allocated memory. */ + atomic_t *sockets_allocated; /* Current number of sockets. */ + /* + * Pressure flag: try to collapse. + * Technical note: it is used by multiple contexts non atomically. + * All the sk_stream_mem_schedule() is of this nature: accounting + * is strict, actions are advisory and have some latency. + */ + int *memory_pressure; + int *sysctl_mem; + int *sysctl_wmem; + int *sysctl_rmem; + int max_header; + char name[32]; struct { @@ -624,6 +645,37 @@ static inline struct inode *SOCK_INODE(struct socket *socket) return &container_of(socket, struct socket_alloc, socket)->vfs_inode; } +extern void __sk_stream_mem_reclaim(struct sock *sk); +extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind); + +#define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE) + +static inline int sk_stream_pages(int amt) +{ + return (amt + SK_STREAM_MEM_QUANTUM - 1) / SK_STREAM_MEM_QUANTUM; +} + +static inline void sk_stream_mem_reclaim(struct sock *sk) +{ + if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM) + __sk_stream_mem_reclaim(sk); +} + +static inline void sk_stream_writequeue_purge(struct sock *sk) +{ + struct sk_buff *skb; + + while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) + sk_stream_free_skb(sk, skb); + sk_stream_mem_reclaim(sk); +} + +static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb) +{ + return (int)skb->truesize <= sk->sk_forward_alloc || + sk_stream_mem_schedule(sk, skb->truesize, 1); +} + /* Used by processes to "lock" a socket state, so that * interrupts and bottom half handlers won't change it * from under us. It essentially blocks any incoming @@ -1105,6 +1157,46 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk) } } +static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, + int size, int mem, int gfp) +{ + struct sk_buff *skb = alloc_skb(size + sk->sk_prot->max_header, gfp); + + if (skb) { + skb->truesize += mem; + if (sk->sk_forward_alloc >= (int)skb->truesize || + sk_stream_mem_schedule(sk, skb->truesize, 0)) { + skb_reserve(skb, sk->sk_prot->max_header); + return skb; + } + __kfree_skb(skb); + } else { + sk->sk_prot->enter_memory_pressure(); + sk_stream_moderate_sndbuf(sk); + } + return NULL; +} + +static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk, + int size, int gfp) +{ + return sk_stream_alloc_pskb(sk, size, 0, gfp); +} + +static inline struct page *sk_stream_alloc_page(struct sock *sk) +{ + struct page *page = NULL; + + if (sk->sk_forward_alloc >= (int)PAGE_SIZE || + sk_stream_mem_schedule(sk, PAGE_SIZE, 0)) + page = alloc_pages(sk->sk_allocation, 0); + else { + sk->sk_prot->enter_memory_pressure(); + sk_stream_moderate_sndbuf(sk); + } + return page; +} + #define sk_stream_for_retrans_queue(skb, sk) \ for (skb = (sk)->sk_write_queue.next; \ (skb != (sk)->sk_send_head) && \ diff --git a/include/net/tcp.h b/include/net/tcp.h index 22a62d58c24d..3f4c524de3e2 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1867,70 +1867,7 @@ static __inline__ void tcp_openreq_init(struct open_request *req, req->rmt_port = skb->h.th->source; } -#define TCP_MEM_QUANTUM ((int)PAGE_SIZE) - -extern void __tcp_mem_reclaim(struct sock *sk); -extern int tcp_mem_schedule(struct sock *sk, int size, int kind); - -static inline void tcp_mem_reclaim(struct sock *sk) -{ - if (sk->sk_forward_alloc >= TCP_MEM_QUANTUM) - __tcp_mem_reclaim(sk); -} - -static inline void tcp_enter_memory_pressure(void) -{ - if (!tcp_memory_pressure) { - NET_INC_STATS(TCPMemoryPressures); - tcp_memory_pressure = 1; - } -} - -static inline struct sk_buff *tcp_alloc_pskb(struct sock *sk, int size, int mem, int gfp) -{ - struct sk_buff *skb = alloc_skb(size+MAX_TCP_HEADER, gfp); - - if (skb) { - skb->truesize += mem; - if (sk->sk_forward_alloc >= (int)skb->truesize || - tcp_mem_schedule(sk, skb->truesize, 0)) { - skb_reserve(skb, MAX_TCP_HEADER); - return skb; - } - __kfree_skb(skb); - } else { - tcp_enter_memory_pressure(); - sk_stream_moderate_sndbuf(sk); - } - return NULL; -} - -static inline struct sk_buff *tcp_alloc_skb(struct sock *sk, int size, int gfp) -{ - return tcp_alloc_pskb(sk, size, 0, gfp); -} - -static inline struct page * tcp_alloc_page(struct sock *sk) -{ - if (sk->sk_forward_alloc >= (int)PAGE_SIZE || - tcp_mem_schedule(sk, PAGE_SIZE, 0)) { - struct page *page = alloc_pages(sk->sk_allocation, 0); - if (page) - return page; - } - tcp_enter_memory_pressure(); - sk_stream_moderate_sndbuf(sk); - return NULL; -} - -static inline void tcp_writequeue_purge(struct sock *sk) -{ - struct sk_buff *skb; - - while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) - sk_stream_free_skb(sk, skb); - tcp_mem_reclaim(sk); -} +extern void tcp_enter_memory_pressure(void); extern void tcp_listen_wlock(void); |
