diff options
Diffstat (limited to 'include')
31 files changed, 391 insertions, 362 deletions
diff --git a/include/linux/atalk.h b/include/linux/atalk.h index f75a25719f3c..9873354f2fd3 100644 --- a/include/linux/atalk.h +++ b/include/linux/atalk.h @@ -196,7 +196,7 @@ extern void aarp_device_down(struct net_device *dev); extern void aarp_cleanup_module(void); #endif /* MODULE */ -#define at_sk(__sk) ((struct atalk_sock *)(__sk)->protinfo) +#define at_sk(__sk) ((struct atalk_sock *)(__sk)->sk_protinfo) extern struct sock *atalk_sockets; extern rwlock_t atalk_sockets_lock; diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 33a81bdd1fb8..16b70b711ffa 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -30,7 +30,7 @@ #define ATM_DS3_PCR (8000*12) /* DS3: 12 cells in a 125 usec time slot */ -#define atm_sk(__sk) ((struct atm_vcc *)(__sk)->protinfo) +#define atm_sk(__sk) ((struct atm_vcc *)(__sk)->sk_protinfo) #define ATM_SD(s) (atm_sk((s)->sk)) @@ -413,19 +413,20 @@ static inline int atm_guess_pdu2truesize(int pdu_size) static inline void atm_force_charge(struct atm_vcc *vcc,int truesize) { - atomic_add(truesize, &vcc->sk->rmem_alloc); + atomic_add(truesize, &vcc->sk->sk_rmem_alloc); } static inline void atm_return(struct atm_vcc *vcc,int truesize) { - atomic_sub(truesize, &vcc->sk->rmem_alloc); + atomic_sub(truesize, &vcc->sk->sk_rmem_alloc); } static inline int atm_may_send(struct atm_vcc *vcc,unsigned int size) { - return (size + atomic_read(&vcc->sk->wmem_alloc)) < vcc->sk->sndbuf; + return (size + atomic_read(&vcc->sk->sk_wmem_alloc)) < + vcc->sk->sk_sndbuf; } diff --git a/include/linux/if_ec.h b/include/linux/if_ec.h index b5ee8425325f..d654666f1067 100644 --- a/include/linux/if_ec.h +++ b/include/linux/if_ec.h @@ -56,7 +56,7 @@ struct econet_opt unsigned short num; }; -#define ec_sk(__sk) ((struct econet_opt *)(__sk)->protinfo) +#define ec_sk(__sk) ((struct econet_opt *)(__sk)->sk_protinfo) struct ec_device { diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index 78ffd78ddb9e..581531924426 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h @@ -132,7 +132,7 @@ struct pppox_opt { #define pppoe_pa proto.pppoe.pa #define pppoe_relay proto.pppoe.relay -#define pppox_sk(__sk) ((struct pppox_opt *)(__sk)->protinfo) +#define pppox_sk(__sk) ((struct pppox_opt *)(__sk)->sk_protinfo) struct module; diff --git a/include/linux/if_wanpipe.h b/include/linux/if_wanpipe.h index 166e0e62c8e2..e594ca6069e5 100644 --- a/include/linux/if_wanpipe.h +++ b/include/linux/if_wanpipe.h @@ -34,7 +34,7 @@ struct wan_sockaddr_ll typedef struct { unsigned char free; - unsigned char sk_state; + unsigned char state_sk; int rcvbuf; int sndbuf; int rmem; @@ -117,7 +117,7 @@ struct wanpipe_opt unsigned short num; }; -#define wp_sk(__sk) ((struct wanpipe_opt *)(__sk)->protinfo) +#define wp_sk(__sk) ((struct wanpipe_opt *)(__sk)->sk_protinfo) #endif diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 50fdde2d8a9f..0bc21b4e19eb 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -229,7 +229,7 @@ struct tcp6_sock { #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #define __ipv6_only_sock(sk) (inet6_sk(sk)->ipv6only) -#define ipv6_only_sock(sk) ((sk)->family == PF_INET6 && __ipv6_only_sock(sk)) +#define ipv6_only_sock(sk) ((sk)->sk_family == PF_INET6 && __ipv6_only_sock(sk)) #else #define __ipv6_only_sock(sk) 0 #define ipv6_only_sock(sk) 0 diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index f231a7ca0fa9..19b88f2c7c43 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -603,8 +603,8 @@ extern struct semaphore rtnl_sem; #define rtnl_shlock_nowait() down_trylock(&rtnl_sem) #define rtnl_shunlock() do { up(&rtnl_sem); \ - if (rtnl && rtnl->receive_queue.qlen) \ - rtnl->data_ready(rtnl, 0); \ + if (rtnl && rtnl->sk_receive_queue.qlen) \ + rtnl->sk_data_ready(rtnl, 0); \ } while(0) extern void rtnl_lock(void); diff --git a/include/net/af_unix.h b/include/net/af_unix.h index db82099b2b17..9bec0adddd5a 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -24,8 +24,8 @@ static inline unix_socket *first_unix_socket(int *i) static inline unix_socket *next_unix_socket(int *i, unix_socket *s) { /* More in this chain? */ - if (s->next) - return s->next; + if (s->sk_next) + return s->sk_next; /* Look for next non-empty chain. */ for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) { if (unix_socket_table[*i]) diff --git a/include/net/ax25.h b/include/net/ax25.h index a3344d406e24..661e1ea63c02 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -201,7 +201,7 @@ typedef struct ax25_cb { struct sock *sk; /* Backlink to socket */ } ax25_cb; -#define ax25_sk(__sk) ((ax25_cb *)(__sk)->protinfo) +#define ax25_sk(__sk) ((ax25_cb *)(__sk)->sk_protinfo) /* af_ax25.c */ extern ax25_cb *ax25_list; diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 5bf98447775c..fd010a9dc75e 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -485,7 +485,7 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data); void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb); /* HCI info for socket */ -#define hci_pi(sk) ((struct hci_pinfo *) sk->protinfo) +#define hci_pi(sk) ((struct hci_pinfo *)sk->sk_protinfo) struct hci_pinfo { struct hci_dev *hdev; struct hci_filter filter; diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 3758bc57d7b8..d4e45740b109 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -206,7 +206,7 @@ struct l2cap_conn { }; /* ----- L2CAP channel and socket info ----- */ -#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk->protinfo) +#define l2cap_pi(sk) ((struct l2cap_pinfo *)sk->sk_protinfo) struct l2cap_pinfo { __u16 psm; diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h index c2a2098b4369..7d410fed2520 100644 --- a/include/net/bluetooth/rfcomm.h +++ b/include/net/bluetooth/rfcomm.h @@ -302,7 +302,7 @@ struct sockaddr_rc { u8 rc_channel; }; -#define rfcomm_pi(sk) ((struct rfcomm_pinfo *) sk->protinfo) +#define rfcomm_pi(sk) ((struct rfcomm_pinfo *)sk->sk_protinfo) struct rfcomm_pinfo { struct rfcomm_dlc *dlc; diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h index 968f56602da6..7f83037a0880 100644 --- a/include/net/bluetooth/sco.h +++ b/include/net/bluetooth/sco.h @@ -71,7 +71,7 @@ struct sco_conn { #define sco_conn_unlock(c) spin_unlock(&c->lock); /* ----- SCO socket info ----- */ -#define sco_pi(sk) ((struct sco_pinfo *) sk->protinfo) +#define sco_pi(sk) ((struct sco_pinfo *)sk->sk_protinfo) struct sco_pinfo { __u32 flags; diff --git a/include/net/dn.h b/include/net/dn.h index c364cf8cecd7..da2239862a1f 100644 --- a/include/net/dn.h +++ b/include/net/dn.h @@ -133,7 +133,7 @@ struct dn_scp /* Session Control Port */ }; -#define DN_SK(__sk) ((struct dn_scp *)(__sk)->protinfo) +#define DN_SK(__sk) ((struct dn_scp *)(__sk)->sk_protinfo) /* * src,dst : Source and Destination DECnet addresses diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h index 3826c0579000..a564fb8161a8 100644 --- a/include/net/dn_nsp.h +++ b/include/net/dn_nsp.h @@ -201,7 +201,7 @@ static __inline__ int sendack(unsigned short seq) */ static __inline__ int dn_congested(struct sock *sk) { - return atomic_read(&sk->rmem_alloc) > (sk->rcvbuf >> 1); + return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); } #define DN_MAX_NSP_DATA_HEADER (11) diff --git a/include/net/ip.h b/include/net/ip.h index f6a4e017f7bf..aa8dd8855dfa 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -238,7 +238,7 @@ static __inline__ void inet_reset_saddr(struct sock *sk) { inet_sk(sk)->rcv_saddr = inet_sk(sk)->saddr = 0; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) - if (sk->family == PF_INET6) { + if (sk->sk_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); memset(&np->saddr, 0, sizeof(np->saddr)); diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index d9f7ad0b9f71..752b31601ee4 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -107,11 +107,11 @@ static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, struct ipv6_pinfo *np = inet6_sk(sk); struct rt6_info *rt = (struct rt6_info *) dst; - write_lock(&sk->dst_lock); + write_lock(&sk->sk_dst_lock); __sk_dst_set(sk, dst); np->daddr_cache = daddr; np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; - write_unlock(&sk->dst_lock); + write_unlock(&sk->sk_dst_lock); } #endif diff --git a/include/net/ipx.h b/include/net/ipx.h index dd20531f04dd..6da0ea4d7cca 100644 --- a/include/net/ipx.h +++ b/include/net/ipx.h @@ -105,7 +105,7 @@ struct ipx_opt { unsigned short ipx_ncp_conn; }; -#define ipx_sk(__sk) ((struct ipx_opt *)(__sk)->protinfo) +#define ipx_sk(__sk) ((struct ipx_opt *)(__sk)->sk_protinfo) #define IPX_SKB_CB(__skb) ((struct ipx_cb *)&((__skb)->cb[0])) #endif #define IPX_MIN_EPHEMERAL_SOCKET 0x4000 diff --git a/include/net/irda/af_irda.h b/include/net/irda/af_irda.h index 6b278e1a5c69..0f6dafad4907 100644 --- a/include/net/irda/af_irda.h +++ b/include/net/irda/af_irda.h @@ -77,6 +77,6 @@ struct irda_sock { LOCAL_FLOW rx_flow; }; -#define irda_sk(__sk) ((struct irda_sock *)(__sk)->protinfo) +#define irda_sk(__sk) ((struct irda_sock *)(__sk)->sk_protinfo) #endif /* AF_IRDA_H */ diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h index 20fbfeec34c4..3deeb4235605 100644 --- a/include/net/llc_c_ev.h +++ b/include/net/llc_c_ev.h @@ -275,7 +275,7 @@ extern int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, static __inline__ int llc_conn_space(struct sock *sk, struct sk_buff *skb) { - return atomic_read(&sk->rmem_alloc) + skb->truesize < - (unsigned)sk->rcvbuf; + return atomic_read(&sk->sk_rmem_alloc) + skb->truesize < + (unsigned)sk->sk_rcvbuf; } #endif /* LLC_C_EV_H */ diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h index acf62b4e3321..3cefd5ce2642 100644 --- a/include/net/llc_conn.h +++ b/include/net/llc_conn.h @@ -67,7 +67,7 @@ struct llc_opt { Used for resending FRMR */ }; -#define llc_sk(__sk) ((struct llc_opt *)(__sk)->protinfo) +#define llc_sk(__sk) ((struct llc_opt *)(__sk)->sk_protinfo) extern struct sock *llc_sk_alloc(int family, int priority); extern void llc_sk_free(struct sock *sk); diff --git a/include/net/netrom.h b/include/net/netrom.h index b83879018c6f..09a0c75d40b3 100644 --- a/include/net/netrom.h +++ b/include/net/netrom.h @@ -74,7 +74,7 @@ typedef struct { struct sock *sk; /* Backlink to socket */ } nr_cb; -#define nr_sk(__sk) ((nr_cb *)(__sk)->protinfo) +#define nr_sk(__sk) ((nr_cb *)(__sk)->sk_protinfo) struct nr_neigh { struct nr_neigh *next; diff --git a/include/net/rose.h b/include/net/rose.h index fffbd5d5ee82..96d561e15a26 100644 --- a/include/net/rose.h +++ b/include/net/rose.h @@ -138,7 +138,7 @@ typedef struct { struct sock *sk; /* Backlink to socket */ } rose_cb; -#define rose_sk(__sk) ((rose_cb *)(__sk)->protinfo) +#define rose_sk(__sk) ((rose_cb *)(__sk)->sk_protinfo) /* af_rose.c */ extern ax25_address rose_callsign; diff --git a/include/net/route.h b/include/net/route.h index acdfaaf6afb9..7fff45937e71 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -44,7 +44,7 @@ /* RTO_CONN is not used (being alias for 0), but preserved not to break * some modules referring to it. */ -#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sk->localroute) +#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sk->sk_localroute) struct inet_peer; struct rtable diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 501e41da70a3..ed453f38f12d 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -597,7 +597,7 @@ int static inline __sctp_state(const struct sctp_association *asoc, #define sctp_sstate(sk, state) __sctp_sstate((sk), (SCTP_SS_##state)) int static inline __sctp_sstate(const struct sock *sk, sctp_sock_state_t state) { - return sk->state == state; + return sk->sk_state == state; } #endif /* __net_sctp_h__ */ diff --git a/include/net/sock.h b/include/net/sock.h index f9fb0af964c2..373278101ec1 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -59,10 +59,11 @@ * the other protocols. */ -/* Define this to get the sk->debug debugging facility. */ +/* Define this to get the sk->sk_debug debugging facility. */ #define SOCK_DEBUGGING #ifdef SOCK_DEBUGGING -#define SOCK_DEBUG(sk, msg...) do { if((sk) && ((sk)->debug)) printk(KERN_DEBUG msg); } while (0) +#define SOCK_DEBUG(sk, msg...) do { if ((sk) && ((sk)->sk_debug)) \ + printk(KERN_DEBUG msg); } while (0) #else #define SOCK_DEBUG(sk, msg...) do { } while (0) #endif @@ -79,123 +80,147 @@ typedef struct { } socket_lock_t; #define sock_lock_init(__sk) \ -do { spin_lock_init(&((__sk)->lock.slock)); \ - (__sk)->lock.owner = NULL; \ - init_waitqueue_head(&((__sk)->lock.wq)); \ +do { spin_lock_init(&((__sk)->sk_lock.slock)); \ + (__sk)->sk_lock.owner = NULL; \ + init_waitqueue_head(&((__sk)->sk_lock.wq)); \ } while(0) +struct sock; + +/** + * struct sock_common - minimal network layer representation of sockets + * @skc_family - network address family + * @skc_state - Connection state + * @skc_reuse - %SO_REUSEADDR setting + * @skc_bound_dev_if - bound device index if != 0 + * @skc_next - main hash linkage for various protocol lookup tables + * @skc_pprev - main hash linkage for various protocol lookup tables + * @skc_bind_next - main hash linkage for various protocol lookup tables + * @skc_bind_pprev - main hash linkage for various protocol lookup tables + * @skc_refcnt - reference count + * + * This is the minimal network layer representation of sockets, the header + * for struct sock and struct tcp_tw_bucket. + */ +struct sock_common { + unsigned short skc_family; + volatile unsigned char skc_state; + unsigned char skc_reuse; + int skc_bound_dev_if; + struct sock *skc_next; + struct sock **skc_pprev; + struct sock *skc_bind_next; + struct sock **skc_bind_pprev; + atomic_t skc_refcnt; +}; + /** * struct sock - network layer representation of sockets - * @state - Connection state - * @zapped - ax25 & ipx means !linked - * @reuse - %SO_REUSEADDR setting - * @shutdown - mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN - * @bound_dev_if - bound device index if != 0 - * @next - main hash linkage for various protocol lookup tables - * @pprev - main hash linkage for various protocol lookup tables - * @bind_next - main hash linkage for various protocol lookup tables - * @bind_pprev - main hash linkage for various protocol lookup tables - * @refcnt - reference count - * @family - network address family - * @use_write_queue - wheter to call sk->write_space(sk) in sock_wfree - * @userlocks - %SO_SNDBUF and %SO_RCVBUF settings - * @lock - synchronizer - * @rcvbuf - size of receive buffer in bytes - * @sleep - sock wait queue - * @dst_cache - destination cache - * @dst_lock - destination cache lock - * @policy - flow policy - * @rmem_alloc - receive queue bytes committed - * @receive_queue - incoming packets - * @wmem_alloc - transmit queue bytes committed - * @write_queue - Packet sending queue - * @omem_alloc - "o" is "option" or "other" - * @wmem_queued - persistent queue size - * @forward_alloc - space allocated forward - * @allocation - allocation mode - * @sndbuf - size of send buffer in bytes - * @prev - pointer to previous sock in the list this sock is in - * @flags - %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings - * @no_check - %SO_NO_CHECK setting, wether or not checkup packets - * @debug - %SO_DEBUG setting - * @rcvtstamp - %SO_TIMESTAMP setting - * @no_largesend - whether to sent large segments or not - * @route_caps - route capabilities (e.g. %NETIF_F_TSO) - * @lingertime - %SO_LINGER l_linger setting - * @hashent - hash entry in several tables (e.g. tcp_ehash) - * @pair - socket pair (e.g. AF_UNIX/unix_peer) - * @backlog - always used with the per-socket spinlock held - * @callback_lock - used with the callbacks in the end of this struct - * @error_queue - rarely used - * @prot - protocol handlers inside a network family - * @err - last error - * @err_soft - errors that don't cause failure but are the cause of a persistent failure not just 'timed out' - * @ack_backlog - current listen backlog - * @max_ack_backlog - listen backlog set in listen() - * @priority - %SO_PRIORITY setting - * @type - socket type (%SOCK_STREAM, etc) - * @localroute - route locally only, %SO_DONTROUTE setting - * @protocol - which protocol this socket belongs in this network family - * @peercred - %SO_PEERCRED setting - * @rcvlowat - %SO_RCVLOWAT setting - * @rcvtimeo - %SO_RCVTIMEO setting - * @sndtimeo - %SO_SNDTIMEO setting - * @filter - socket filtering instructions - * @protinfo - private area, net family specific, when not using slab - * @slab - the slabcache this instance was allocated from - * @timer - sock cleanup timer - * @stamp - time stamp of last packet received - * @socket - Identd and reporting IO signals - * @user_data - RPC layer private data - * @owner - module that owns this socket - * @state_change - callback to indicate change in the state of the sock - * @data_ready - callback to indicate there is data to be processed - * @write_space - callback to indicate there is bf sending space available - * @error_report - callback to indicate errors (e.g. %MSG_ERRQUEUE) - * @backlog_rcv - callback to process the backlog - * @destruct - called at sock freeing time, i.e. when all refcnt == 0 + * @__sk_common - shared layout with tcp_tw_bucket + * @sk_zapped - ax25 & ipx means !linked + * @sk_shutdown - mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN + * @sk_use_write_queue - wheter to call sk->sk_write_space in sock_wfree + * @sk_userlocks - %SO_SNDBUF and %SO_RCVBUF settings + * @sk_lock - synchronizer + * @sk_rcvbuf - size of receive buffer in bytes + * @sk_sleep - sock wait queue + * @sk_dst_cache - destination cache + * @sk_dst_lock - destination cache lock + * @sk_policy - flow policy + * @sk_rmem_alloc - receive queue bytes committed + * @sk_receive_queue - incoming packets + * @sk_wmem_alloc - transmit queue bytes committed + * @sk_write_queue - Packet sending queue + * @sk_omem_alloc - "o" is "option" or "other" + * @sk_wmem_queued - persistent queue size + * @sk_forward_alloc - space allocated forward + * @sk_allocation - allocation mode + * @sk_sndbuf - size of send buffer in bytes + * @sk_prev - pointer to previous sock in the list this sock is in + * @sk_flags - %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings + * @sk_no_check - %SO_NO_CHECK setting, wether or not checkup packets + * @sk_debug - %SO_DEBUG setting + * @sk_rcvtstamp - %SO_TIMESTAMP setting + * @sk_no_largesend - whether to sent large segments or not + * @sk_route_caps - route capabilities (e.g. %NETIF_F_TSO) + * @sk_lingertime - %SO_LINGER l_linger setting + * @sk_hashent - hash entry in several tables (e.g. tcp_ehash) + * @sk_pair - socket pair (e.g. AF_UNIX/unix_peer) + * @sk_backlog - always used with the per-socket spinlock held + * @sk_callback_lock - used with the callbacks in the end of this struct + * @sk_error_queue - rarely used + * @sk_prot - protocol handlers inside a network family + * @sk_err - last error + * @sk_err_soft - errors that don't cause failure but are the cause of a persistent failure not just 'timed out' + * @sk_ack_backlog - current listen backlog + * @sk_max_ack_backlog - listen backlog set in listen() + * @sk_priority - %SO_PRIORITY setting + * @sk_type - socket type (%SOCK_STREAM, etc) + * @sk_localroute - route locally only, %SO_DONTROUTE setting + * @sk_protocol - which protocol this socket belongs in this network family + * @sk_peercred - %SO_PEERCRED setting + * @sk_rcvlowat - %SO_RCVLOWAT setting + * @sk_rcvtimeo - %SO_RCVTIMEO setting + * @sk_sndtimeo - %SO_SNDTIMEO setting + * @sk_filter - socket filtering instructions + * @sk_protinfo - private area, net family specific, when not using slab + * @sk_slab - the slabcache this instance was allocated from + * @sk_timer - sock cleanup timer + * @sk_stamp - time stamp of last packet received + * @sk_socket - Identd and reporting IO signals + * @sk_user_data - RPC layer private data + * @sk_owner - module that owns this socket + * @sk_state_change - callback to indicate change in the state of the sock + * @sk_data_ready - callback to indicate there is data to be processed + * @sk_write_space - callback to indicate there is bf sending space available + * @sk_error_report - callback to indicate errors (e.g. %MSG_ERRQUEUE) + * @sk_backlog_rcv - callback to process the backlog + * @sk_destruct - called at sock freeing time, i.e. when all refcnt == 0 */ struct sock { - /* Begin of struct sock/struct tcp_tw_bucket shared layout */ - unsigned short family; - volatile unsigned char state; - unsigned char reuse; - int bound_dev_if; - struct sock *next; - struct sock **pprev; - struct sock *bind_next; - struct sock **bind_pprev; - atomic_t refcnt; - /* End of struct sock/struct tcp_tw_bucket shared layout */ - volatile unsigned char zapped; - unsigned char shutdown; - unsigned char use_write_queue; - unsigned char userlocks; - socket_lock_t lock; - int rcvbuf; - wait_queue_head_t *sleep; - struct dst_entry *dst_cache; - rwlock_t dst_lock; - struct xfrm_policy *policy[2]; - atomic_t rmem_alloc; - struct sk_buff_head receive_queue; - atomic_t wmem_alloc; - struct sk_buff_head write_queue; - atomic_t omem_alloc; - int wmem_queued; - int forward_alloc; - unsigned int allocation; - int sndbuf; - struct sock *prev; - unsigned long flags; - char no_check; - unsigned char debug; - unsigned char rcvtstamp; - unsigned char no_largesend; - int route_caps; - unsigned long lingertime; - int hashent; - struct sock *pair; + /* + * Now struct tcp_tw_bucket also uses sock_common, so please just + * don't add nothing before this first member (__sk_common) --acme + */ + struct sock_common __sk_common; +#define sk_family __sk_common.skc_family +#define sk_state __sk_common.skc_state +#define sk_reuse __sk_common.skc_reuse +#define sk_bound_dev_if __sk_common.skc_bound_dev_if +#define sk_next __sk_common.skc_next +#define sk_pprev __sk_common.skc_pprev +#define sk_bind_next __sk_common.skc_bind_next +#define sk_bind_pprev __sk_common.skc_bind_pprev +#define sk_refcnt __sk_common.skc_refcnt + volatile unsigned char sk_zapped; + unsigned char sk_shutdown; + unsigned char sk_use_write_queue; + unsigned char sk_userlocks; + socket_lock_t sk_lock; + int sk_rcvbuf; + wait_queue_head_t *sk_sleep; + struct dst_entry *sk_dst_cache; + rwlock_t sk_dst_lock; + struct xfrm_policy *sk_policy[2]; + atomic_t sk_rmem_alloc; + struct sk_buff_head sk_receive_queue; + atomic_t sk_wmem_alloc; + struct sk_buff_head sk_write_queue; + atomic_t sk_omem_alloc; + int sk_wmem_queued; + int sk_forward_alloc; + unsigned int sk_allocation; + int sk_sndbuf; + struct sock *sk_prev; + unsigned long sk_flags; + char sk_no_check; + unsigned char sk_debug; + unsigned char sk_rcvtstamp; + unsigned char sk_no_largesend; + int sk_route_caps; + unsigned long sk_lingertime; + int sk_hashent; + struct sock *sk_pair; /* * The backlog queue is special, it is always used with * the per-socket spinlock held and requires low latency @@ -204,37 +229,37 @@ struct sock { struct { struct sk_buff *head; struct sk_buff *tail; - } backlog; - rwlock_t callback_lock; - struct sk_buff_head error_queue; - struct proto *prot; - int err, - err_soft; - unsigned short ack_backlog; - unsigned short max_ack_backlog; - __u32 priority; - unsigned short type; - unsigned char localroute; - unsigned char protocol; - struct ucred peercred; - int rcvlowat; - long rcvtimeo; - long sndtimeo; - struct sk_filter *filter; - void *protinfo; - kmem_cache_t *slab; - struct timer_list timer; - struct timeval stamp; - struct socket *socket; - void *user_data; - struct module *owner; - void (*state_change)(struct sock *sk); - void (*data_ready)(struct sock *sk, int bytes); - void (*write_space)(struct sock *sk); - void (*error_report)(struct sock *sk); - int (*backlog_rcv) (struct sock *sk, - struct sk_buff *skb); - void (*destruct)(struct sock *sk); + } sk_backlog; + rwlock_t sk_callback_lock; + struct sk_buff_head sk_error_queue; + struct proto *sk_prot; + int sk_err, + sk_err_soft; + unsigned short sk_ack_backlog; + unsigned short sk_max_ack_backlog; + __u32 sk_priority; + unsigned short sk_type; + unsigned char sk_localroute; + unsigned char sk_protocol; + struct ucred sk_peercred; + int sk_rcvlowat; + long sk_rcvtimeo; + long sk_sndtimeo; + struct sk_filter *sk_filter; + void *sk_protinfo; + kmem_cache_t *sk_slab; + struct timer_list sk_timer; + struct timeval sk_stamp; + struct socket *sk_socket; + void *sk_user_data; + struct module *sk_owner; + void (*sk_state_change)(struct sock *sk); + void (*sk_data_ready)(struct sock *sk, int bytes); + void (*sk_write_space)(struct sock *sk); + void (*sk_error_report)(struct sock *sk); + int (*sk_backlog_rcv)(struct sock *sk, + struct sk_buff *skb); + void (*sk_destruct)(struct sock *sk); }; /* Sock flags */ @@ -250,29 +275,29 @@ enum sock_flags { static inline void sock_set_flag(struct sock *sk, enum sock_flags flag) { - __set_bit(flag, &sk->flags); + __set_bit(flag, &sk->sk_flags); } static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) { - __clear_bit(flag, &sk->flags); + __clear_bit(flag, &sk->sk_flags); } static inline int sock_flag(struct sock *sk, enum sock_flags flag) { - return test_bit(flag, &sk->flags); + return test_bit(flag, &sk->sk_flags); } /* The per-socket spinlock must be held here. */ -#define sk_add_backlog(__sk, __skb) \ -do { if((__sk)->backlog.tail == NULL) { \ - (__sk)->backlog.head = \ - (__sk)->backlog.tail = (__skb); \ - } else { \ - ((__sk)->backlog.tail)->next = (__skb); \ - (__sk)->backlog.tail = (__skb); \ - } \ - (__skb)->next = NULL; \ +#define sk_add_backlog(__sk, __skb) \ +do { if (!(__sk)->sk_backlog.tail) { \ + (__sk)->sk_backlog.head = \ + (__sk)->sk_backlog.tail = (__skb); \ + } else { \ + ((__sk)->sk_backlog.tail)->next = (__skb); \ + (__sk)->sk_backlog.tail = (__skb); \ + } \ + (__skb)->next = NULL; \ } while(0) /* IP protocol blocks we attach to sockets. @@ -337,9 +362,9 @@ static __inline__ void sk_set_owner(struct sock *sk, struct module *owner) * change the ownership of this struct sock, with one not needed * transient sk_set_owner call. */ - if (unlikely(sk->owner != NULL)) + if (unlikely(sk->sk_owner != NULL)) BUG(); - sk->owner = owner; + sk->sk_owner = owner; __module_get(owner); } @@ -423,28 +448,29 @@ static inline struct inode *SOCK_INODE(struct socket *socket) */ extern void __lock_sock(struct sock *sk); extern void __release_sock(struct sock *sk); -#define sock_owned_by_user(sk) (NULL != (sk)->lock.owner) +#define sock_owned_by_user(sk) ((sk)->sk_lock.owner) #define lock_sock(__sk) \ do { might_sleep(); \ - spin_lock_bh(&((__sk)->lock.slock)); \ - if ((__sk)->lock.owner != NULL) \ + spin_lock_bh(&((__sk)->sk_lock.slock)); \ + if ((__sk)->sk_lock.owner) \ __lock_sock(__sk); \ - (__sk)->lock.owner = (void *)1; \ - spin_unlock_bh(&((__sk)->lock.slock)); \ + (__sk)->sk_lock.owner = (void *)1; \ + spin_unlock_bh(&((__sk)->sk_lock.slock)); \ } while(0) #define release_sock(__sk) \ -do { spin_lock_bh(&((__sk)->lock.slock)); \ - if ((__sk)->backlog.tail != NULL) \ +do { spin_lock_bh(&((__sk)->sk_lock.slock)); \ + if ((__sk)->sk_backlog.tail) \ __release_sock(__sk); \ - (__sk)->lock.owner = NULL; \ - if (waitqueue_active(&((__sk)->lock.wq))) wake_up(&((__sk)->lock.wq)); \ - spin_unlock_bh(&((__sk)->lock.slock)); \ + (__sk)->sk_lock.owner = NULL; \ + if (waitqueue_active(&((__sk)->sk_lock.wq))) \ + wake_up(&((__sk)->sk_lock.wq)); \ + spin_unlock_bh(&((__sk)->sk_lock.slock)); \ } while(0) /* BH context may only use the following locking interface. */ -#define bh_lock_sock(__sk) spin_lock(&((__sk)->lock.slock)) -#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->lock.slock)) +#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) +#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) extern struct sock * sk_alloc(int family, int priority, int zero_it, kmem_cache_t *slab); @@ -547,13 +573,13 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock) if (err) return err; - if (sk->filter) { + if (sk->sk_filter) { struct sk_filter *filter; if (needlock) bh_lock_sock(sk); - filter = sk->filter; + filter = sk->sk_filter; if (filter) { int pkt_len = sk_run_filter(skb, filter->insns, filter->len); @@ -581,7 +607,7 @@ static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp) { unsigned int size = sk_filter_len(fp); - atomic_sub(size, &sk->omem_alloc); + atomic_sub(size, &sk->sk_omem_alloc); if (atomic_dec_and_test(&fp->refcnt)) kfree(fp); @@ -590,7 +616,7 @@ static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp) static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp) { atomic_inc(&fp->refcnt); - atomic_add(sk_filter_len(fp), &sk->omem_alloc); + atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc); } /* @@ -626,7 +652,7 @@ static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp) static inline void sock_hold(struct sock *sk) { - atomic_inc(&sk->refcnt); + atomic_inc(&sk->sk_refcnt); } /* Ungrab socket in the context, which assumes that socket refcnt @@ -634,13 +660,13 @@ static inline void sock_hold(struct sock *sk) */ static inline void __sock_put(struct sock *sk) { - atomic_dec(&sk->refcnt); + atomic_dec(&sk->sk_refcnt); } /* Ungrab socket and destroy it, if it was the last reference. */ static inline void sock_put(struct sock *sk) { - if (atomic_dec_and_test(&sk->refcnt)) + if (atomic_dec_and_test(&sk->sk_refcnt)) sk_free(sk); } @@ -653,29 +679,29 @@ static inline void sock_put(struct sock *sk) */ static inline void sock_orphan(struct sock *sk) { - write_lock_bh(&sk->callback_lock); + write_lock_bh(&sk->sk_callback_lock); sock_set_flag(sk, SOCK_DEAD); - sk->socket = NULL; - sk->sleep = NULL; - write_unlock_bh(&sk->callback_lock); + sk->sk_socket = NULL; + sk->sk_sleep = NULL; + write_unlock_bh(&sk->sk_callback_lock); } static inline void sock_graft(struct sock *sk, struct socket *parent) { - write_lock_bh(&sk->callback_lock); - sk->sleep = &parent->wait; + write_lock_bh(&sk->sk_callback_lock); + sk->sk_sleep = &parent->wait; parent->sk = sk; - sk->socket = parent; - write_unlock_bh(&sk->callback_lock); + sk->sk_socket = parent; + write_unlock_bh(&sk->sk_callback_lock); } static inline int sock_i_uid(struct sock *sk) { int uid; - read_lock(&sk->callback_lock); - uid = sk->socket ? SOCK_INODE(sk->socket)->i_uid : 0; - read_unlock(&sk->callback_lock); + read_lock(&sk->sk_callback_lock); + uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; + read_unlock(&sk->sk_callback_lock); return uid; } @@ -683,16 +709,16 @@ static inline unsigned long sock_i_ino(struct sock *sk) { unsigned long ino; - read_lock(&sk->callback_lock); - ino = sk->socket ? SOCK_INODE(sk->socket)->i_ino : 0; - read_unlock(&sk->callback_lock); + read_lock(&sk->sk_callback_lock); + ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; + read_unlock(&sk->sk_callback_lock); return ino; } static inline struct dst_entry * __sk_dst_get(struct sock *sk) { - return sk->dst_cache; + return sk->sk_dst_cache; } static inline struct dst_entry * @@ -700,11 +726,11 @@ sk_dst_get(struct sock *sk) { struct dst_entry *dst; - read_lock(&sk->dst_lock); - dst = sk->dst_cache; + read_lock(&sk->sk_dst_lock); + dst = sk->sk_dst_cache; if (dst) dst_hold(dst); - read_unlock(&sk->dst_lock); + read_unlock(&sk->sk_dst_lock); return dst; } @@ -713,17 +739,17 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst) { struct dst_entry *old_dst; - old_dst = sk->dst_cache; - sk->dst_cache = dst; + old_dst = sk->sk_dst_cache; + sk->sk_dst_cache = dst; dst_release(old_dst); } static inline void sk_dst_set(struct sock *sk, struct dst_entry *dst) { - write_lock(&sk->dst_lock); + write_lock(&sk->sk_dst_lock); __sk_dst_set(sk, dst); - write_unlock(&sk->dst_lock); + write_unlock(&sk->sk_dst_lock); } static inline void @@ -731,26 +757,26 @@ __sk_dst_reset(struct sock *sk) { struct dst_entry *old_dst; - old_dst = sk->dst_cache; - sk->dst_cache = NULL; + old_dst = sk->sk_dst_cache; + sk->sk_dst_cache = NULL; dst_release(old_dst); } static inline void sk_dst_reset(struct sock *sk) { - write_lock(&sk->dst_lock); + write_lock(&sk->sk_dst_lock); __sk_dst_reset(sk); - write_unlock(&sk->dst_lock); + write_unlock(&sk->sk_dst_lock); } static inline struct dst_entry * __sk_dst_check(struct sock *sk, u32 cookie) { - struct dst_entry *dst = sk->dst_cache; + struct dst_entry *dst = sk->sk_dst_cache; if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { - sk->dst_cache = NULL; + sk->sk_dst_cache = NULL; return NULL; } @@ -785,14 +811,14 @@ static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) sock_hold(sk); skb->sk = sk; skb->destructor = sock_wfree; - atomic_add(skb->truesize, &sk->wmem_alloc); + atomic_add(skb->truesize, &sk->sk_wmem_alloc); } static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) { skb->sk = sk; skb->destructor = sock_rfree; - atomic_add(skb->truesize, &sk->rmem_alloc); + atomic_add(skb->truesize, &sk->sk_rmem_alloc); } static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) @@ -802,7 +828,8 @@ static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces number of warnings when compiling with -W --ANK */ - if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf) { + if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= + (unsigned)sk->sk_rcvbuf) { err = -ENOMEM; goto out; } @@ -817,9 +844,9 @@ static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) skb->dev = NULL; skb_set_owner_r(skb, sk); - skb_queue_tail(&sk->receive_queue, skb); + skb_queue_tail(&sk->sk_receive_queue, skb); if (!sock_flag(sk, SOCK_DEAD)) - sk->data_ready(sk,skb->len); + sk->sk_data_ready(sk, skb->len); out: return err; } @@ -829,12 +856,13 @@ static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces number of warnings when compiling with -W --ANK */ - if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf) + if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= + (unsigned)sk->sk_rcvbuf) return -ENOMEM; skb_set_owner_r(skb, sk); - skb_queue_tail(&sk->error_queue,skb); + skb_queue_tail(&sk->sk_error_queue, skb); if (!sock_flag(sk, SOCK_DEAD)) - sk->data_ready(sk,skb->len); + sk->sk_data_ready(sk, skb->len); return 0; } @@ -844,7 +872,7 @@ static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) static inline int sock_error(struct sock *sk) { - int err=xchg(&sk->err,0); + int err = xchg(&sk->sk_err, 0); return -err; } @@ -852,8 +880,8 @@ static inline unsigned long sock_wspace(struct sock *sk) { int amt = 0; - if (!(sk->shutdown & SEND_SHUTDOWN)) { - amt = sk->sndbuf - atomic_read(&sk->wmem_alloc); + if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { + amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); if (amt < 0) amt = 0; } @@ -862,8 +890,8 @@ static inline unsigned long sock_wspace(struct sock *sk) static inline void sk_wake_async(struct sock *sk, int how, int band) { - if (sk->socket && sk->socket->fasync_list) - sock_wake_async(sk->socket, how, band); + if (sk->sk_socket && sk->sk_socket->fasync_list) + sock_wake_async(sk->sk_socket, how, band); } #define SOCK_MIN_SNDBUF 2048 @@ -874,7 +902,7 @@ static inline void sk_wake_async(struct sock *sk, int how, int band) */ static inline int sock_writeable(struct sock *sk) { - return atomic_read(&sk->wmem_alloc) < (sk->sndbuf / 2); + return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2); } static inline int gfp_any(void) @@ -884,17 +912,17 @@ static inline int gfp_any(void) static inline long sock_rcvtimeo(struct sock *sk, int noblock) { - return noblock ? 0 : sk->rcvtimeo; + return noblock ? 0 : sk->sk_rcvtimeo; } static inline long sock_sndtimeo(struct sock *sk, int noblock) { - return noblock ? 0 : sk->sndtimeo; + return noblock ? 0 : sk->sk_sndtimeo; } static inline int sock_rcvlowat(struct sock *sk, int waitall, int len) { - return (waitall ? len : min_t(int, sk->rcvlowat, len)) ? : 1; + return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1; } /* Alas, with timeout socket operations are not restartable. @@ -908,10 +936,10 @@ static inline int sock_intr_errno(long timeo) static __inline__ void sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { - if (sk->rcvtstamp) + if (sk->sk_rcvtstamp) put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(skb->stamp), &skb->stamp); else - sk->stamp = skb->stamp; + sk->sk_stamp = skb->stamp; } /* @@ -940,11 +968,11 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) #define SOCK_SLEEP_PRE(sk) { struct task_struct *tsk = current; \ DECLARE_WAITQUEUE(wait, tsk); \ tsk->state = TASK_INTERRUPTIBLE; \ - add_wait_queue((sk)->sleep, &wait); \ + add_wait_queue((sk)->sk_sleep, &wait); \ release_sock(sk); #define SOCK_SLEEP_POST(sk) tsk->state = TASK_RUNNING; \ - remove_wait_queue((sk)->sleep, &wait); \ + remove_wait_queue((sk)->sk_sleep, &wait); \ lock_sock(sk); \ } diff --git a/include/net/tcp.h b/include/net/tcp.h index 10d509e3ecf0..14530760241e 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -54,7 +54,7 @@ struct tcp_ehash_bucket { * * 1) Sockets bound to different interfaces may share a local port. * Failing that, goto test 2. - * 2) If all sockets have sk->reuse set, and none of them are in + * 2) If all sockets have sk->sk_reuse set, and none of them are in * TCP_LISTEN state, the port may be shared. * Failing that, goto test 3. * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local @@ -65,12 +65,12 @@ struct tcp_ehash_bucket { * The interesting point, is test #2. This is what an FTP server does * all day. To optimize this case we use a specific flag bit defined * below. As we add sockets to a bind bucket list, we perform a - * check of: (newsk->reuse && (newsk->state != TCP_LISTEN)) + * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN)) * As long as all sockets added to a bind bucket pass this test, * the flag bit will be set. * The resulting situation is that tcp_v[46]_verify_bind() can just check * for this flag bit, if it is set and the socket trying to bind has - * sk->reuse set, we don't even have to walk the owners list at all, + * sk->sk_reuse set, we don't even have to walk the owners list at all, * we return that it is ok to bind this socket to the requested local port. * * Sounds like a lot of work, but it is worth it. In a more naive @@ -97,7 +97,7 @@ extern struct tcp_hashinfo { /* This is for sockets with full identity only. Sockets here will * always be without wildcards and will have the following invariant: * - * TCP_ESTABLISHED <= sk->state < TCP_CLOSE + * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE * * First half of the table is for sockets not in TIME_WAIT, second half * is for TIME_WAIT sockets only. @@ -165,46 +165,45 @@ extern void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb, * without violating the protocol specification. */ struct tcp_tw_bucket { - /* These _must_ match the beginning of struct sock precisely. - * XXX Yes I know this is gross, but I'd have to edit every single - * XXX networking file if I created a "struct sock_header". -DaveM + /* + * Now struct sock also uses sock_common, so please just + * don't add nothing before this first member (__tw_common) --acme */ - unsigned short family; - volatile unsigned char state; /* Connection state */ - unsigned char reuse; /* SO_REUSEADDR setting */ - int bound_dev_if; - /* Main hash linkage for various protocol lookup tables. */ - struct sock *next; - struct sock **pprev; - struct sock *bind_next; - struct sock **bind_pprev; - atomic_t refcnt; - /* End of struct sock/struct tcp_tw_bucket shared layout */ - volatile unsigned char substate; - unsigned char rcv_wscale; - __u16 sport; + struct sock_common __tw_common; +#define tw_family __tw_common.skc_family +#define tw_state __tw_common.skc_state +#define tw_reuse __tw_common.skc_reuse +#define tw_bound_dev_if __tw_common.skc_bound_dev_if +#define tw_next __tw_common.skc_next +#define tw_pprev __tw_common.skc_pprev +#define tw_bind_next __tw_common.skc_bind_next +#define tw_bind_pprev __tw_common.skc_bind_pprev +#define tw_refcnt __tw_common.skc_refcnt + volatile unsigned char tw_substate; + unsigned char tw_rcv_wscale; + __u16 tw_sport; /* Socket demultiplex comparisons on incoming packets. */ /* these five are in inet_opt */ - __u32 daddr; - __u32 rcv_saddr; - __u16 dport; - __u16 num; + __u32 tw_daddr; + __u32 tw_rcv_saddr; + __u16 tw_dport; + __u16 tw_num; /* And these are ours. */ - int hashent; - int timeout; - __u32 rcv_nxt; - __u32 snd_nxt; - __u32 rcv_wnd; - __u32 ts_recent; - long ts_recent_stamp; - unsigned long ttd; - struct tcp_bind_bucket *tb; - struct tcp_tw_bucket *next_death; - struct tcp_tw_bucket **pprev_death; + int tw_hashent; + int tw_timeout; + __u32 tw_rcv_nxt; + __u32 tw_snd_nxt; + __u32 tw_rcv_wnd; + __u32 tw_ts_recent; + long tw_ts_recent_stamp; + unsigned long tw_ttd; + struct tcp_bind_bucket *tw_tb; + struct tcp_tw_bucket *tw_next_death; + struct tcp_tw_bucket **tw_pprev_death; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) - struct in6_addr v6_daddr; - struct in6_addr v6_rcv_saddr; + struct in6_addr tw_v6_daddr; + struct in6_addr tw_v6_rcv_saddr; #endif }; @@ -214,7 +213,7 @@ extern kmem_cache_t *tcp_timewait_cachep; static inline void tcp_tw_put(struct tcp_tw_bucket *tw) { - if (atomic_dec_and_test(&tw->refcnt)) { + if (atomic_dec_and_test(&tw->tw_refcnt)) { #ifdef INET_REFCNT_DEBUG printk(KERN_DEBUG "tw_bucket %p released\n", tw); #endif @@ -249,31 +248,31 @@ extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw); #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ (((*((__u64 *)&(inet_sk(__sk)->daddr)))== (__cookie)) && \ ((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \ - (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif)))) + (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) #define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ - (((*((__u64 *)&(tcptw_sk(__sk)->daddr)))== (__cookie)) && \ - ((*((__u32 *)&(tcptw_sk(__sk)->dport)))== (__ports)) && \ - (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif)))) + (((*((__u64 *)&(tcptw_sk(__sk)->tw_daddr))) == (__cookie)) && \ + ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \ + (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) #else /* 32-bit arch */ #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ ((inet_sk(__sk)->daddr == (__saddr)) && \ (inet_sk(__sk)->rcv_saddr == (__daddr)) && \ ((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \ - (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif)))) + (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) #define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ - ((tcptw_sk(__sk)->daddr == (__saddr)) && \ - (tcptw_sk(__sk)->rcv_saddr == (__daddr)) && \ - ((*((__u32 *)&(tcptw_sk(__sk)->dport)))== (__ports)) && \ - (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif)))) + ((tcptw_sk(__sk)->tw_daddr == (__saddr)) && \ + (tcptw_sk(__sk)->tw_rcv_saddr == (__daddr)) && \ + ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \ + (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) #endif /* 64-bit arch */ #define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \ (((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \ - ((__sk)->family == AF_INET6) && \ + ((__sk)->sk_family == AF_INET6) && \ !ipv6_addr_cmp(&inet6_sk(__sk)->daddr, (__saddr)) && \ !ipv6_addr_cmp(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \ - (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif)))) + (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) /* These can have wildcards, don't try too hard. */ static __inline__ int tcp_lhashfn(unsigned short num) @@ -932,7 +931,8 @@ static __inline__ unsigned int tcp_current_mss(struct sock *sk, int large) { struct tcp_opt *tp = tcp_sk(sk); struct dst_entry *dst = __sk_dst_get(sk); - int mss_now = large && (sk->route_caps&NETIF_F_TSO) && !tp->urg_mode ? + int mss_now = large && (sk->sk_route_caps & NETIF_F_TSO) && + !tp->urg_mode ? tp->mss_cache : tp->mss_cache_std; if (dst) { @@ -983,7 +983,7 @@ static inline void tcp_fast_path_check(struct sock *sk, struct tcp_opt *tp) { if (skb_queue_len(&tp->out_of_order_queue) == 0 && tp->rcv_wnd && - atomic_read(&sk->rmem_alloc) < sk->rcvbuf && + atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && !tp->urg_data) tcp_fast_path_on(tp); } @@ -1066,9 +1066,9 @@ struct tcp_skb_cb { #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) #define for_retrans_queue(skb, sk, tp) \ - for (skb = (sk)->write_queue.next; \ + for (skb = (sk)->sk_write_queue.next; \ (skb != (tp)->send_head) && \ - (skb != (struct sk_buff *)&(sk)->write_queue); \ + (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ skb=skb->next) @@ -1080,12 +1080,12 @@ struct tcp_skb_cb { */ static inline int tcp_min_write_space(struct sock *sk) { - return sk->wmem_queued/2; + return sk->sk_wmem_queued / 2; } static inline int tcp_wspace(struct sock *sk) { - return sk->sndbuf - sk->wmem_queued; + return sk->sk_sndbuf - sk->sk_wmem_queued; } @@ -1267,7 +1267,7 @@ static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_opt *tp static __inline__ int tcp_skb_is_last(struct sock *sk, struct sk_buff *skb) { - return (skb->next == (struct sk_buff*)&sk->write_queue); + return skb->next == (struct sk_buff *)&sk->sk_write_queue; } /* Push out any pending frames which were held back due to @@ -1365,19 +1365,19 @@ static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb) if (!sysctl_tcp_low_latency && tp->ucopy.task) { __skb_queue_tail(&tp->ucopy.prequeue, skb); tp->ucopy.memory += skb->truesize; - if (tp->ucopy.memory > sk->rcvbuf) { + if (tp->ucopy.memory > sk->sk_rcvbuf) { struct sk_buff *skb1; if (sock_owned_by_user(sk)) BUG(); while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { - sk->backlog_rcv(sk, skb1); + sk->sk_backlog_rcv(sk, skb1); NET_INC_STATS_BH(TCPPrequeueDropped); } tp->ucopy.memory = 0; } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { - wake_up_interruptible(sk->sleep); + wake_up_interruptible(sk->sk_sleep); if (!tcp_ack_scheduled(tp)) tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4); } @@ -1399,7 +1399,7 @@ static char *statename[]={ static __inline__ void tcp_set_state(struct sock *sk, int state) { - int oldstate = sk->state; + int oldstate = sk->sk_state; switch (state) { case TCP_ESTABLISHED: @@ -1411,8 +1411,8 @@ static __inline__ void tcp_set_state(struct sock *sk, int state) if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) TCP_INC_STATS(TcpEstabResets); - sk->prot->unhash(sk); - if (sk->prev && !(sk->userlocks&SOCK_BINDPORT_LOCK)) + sk->sk_prot->unhash(sk); + if (sk->sk_prev && !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) tcp_put_port(sk); /* fall through */ default: @@ -1423,7 +1423,7 @@ static __inline__ void tcp_set_state(struct sock *sk, int state) /* Change state AFTER socket is unhashed to avoid closed * socket sitting in hash tables. */ - sk->state = state; + sk->sk_state = state; #ifdef STATE_TRACE SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]); @@ -1435,10 +1435,10 @@ static __inline__ void tcp_done(struct sock *sk) tcp_set_state(sk, TCP_CLOSE); tcp_clear_xmit_timers(sk); - sk->shutdown = SHUTDOWN_MASK; + sk->sk_shutdown = SHUTDOWN_MASK; if (!sock_flag(sk, SOCK_DEAD)) - sk->state_change(sk); + sk->sk_state_change(sk); else tcp_destroy_sock(sk); } @@ -1588,27 +1588,28 @@ static inline int tcp_win_from_space(int space) /* Note: caller must be prepared to deal with negative returns */ static inline int tcp_space(struct sock *sk) { - return tcp_win_from_space(sk->rcvbuf - atomic_read(&sk->rmem_alloc)); + return tcp_win_from_space(sk->sk_rcvbuf - + atomic_read(&sk->sk_rmem_alloc)); } static inline int tcp_full_space( struct sock *sk) { - return tcp_win_from_space(sk->rcvbuf); + return tcp_win_from_space(sk->sk_rcvbuf); } static inline void tcp_acceptq_removed(struct sock *sk) { - sk->ack_backlog--; + sk->sk_ack_backlog--; } static inline void tcp_acceptq_added(struct sock *sk) { - sk->ack_backlog++; + sk->sk_ack_backlog++; } static inline int tcp_acceptq_is_full(struct sock *sk) { - return sk->ack_backlog > sk->max_ack_backlog; + return sk->sk_ack_backlog > sk->sk_max_ack_backlog; } static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req, @@ -1711,15 +1712,15 @@ static __inline__ void tcp_openreq_init(struct open_request *req, static inline void tcp_free_skb(struct sock *sk, struct sk_buff *skb) { tcp_sk(sk)->queue_shrunk = 1; - sk->wmem_queued -= skb->truesize; - sk->forward_alloc += skb->truesize; + sk->sk_wmem_queued -= skb->truesize; + sk->sk_forward_alloc += skb->truesize; __kfree_skb(skb); } static inline void tcp_charge_skb(struct sock *sk, struct sk_buff *skb) { - sk->wmem_queued += skb->truesize; - sk->forward_alloc -= skb->truesize; + sk->sk_wmem_queued += skb->truesize; + sk->sk_forward_alloc -= skb->truesize; } extern void __tcp_mem_reclaim(struct sock *sk); @@ -1727,7 +1728,7 @@ extern int tcp_mem_schedule(struct sock *sk, int size, int kind); static inline void tcp_mem_reclaim(struct sock *sk) { - if (sk->forward_alloc >= TCP_MEM_QUANTUM) + if (sk->sk_forward_alloc >= TCP_MEM_QUANTUM) __tcp_mem_reclaim(sk); } @@ -1741,9 +1742,9 @@ static inline void tcp_enter_memory_pressure(void) static inline void tcp_moderate_sndbuf(struct sock *sk) { - if (!(sk->userlocks&SOCK_SNDBUF_LOCK)) { - sk->sndbuf = min(sk->sndbuf, sk->wmem_queued/2); - sk->sndbuf = max(sk->sndbuf, SOCK_MIN_SNDBUF); + if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { + sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2); + sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF); } } @@ -1753,7 +1754,7 @@ static inline struct sk_buff *tcp_alloc_pskb(struct sock *sk, int size, int mem, if (skb) { skb->truesize += mem; - if (sk->forward_alloc >= (int)skb->truesize || + if (sk->sk_forward_alloc >= (int)skb->truesize || tcp_mem_schedule(sk, skb->truesize, 0)) { skb_reserve(skb, MAX_TCP_HEADER); return skb; @@ -1773,9 +1774,9 @@ static inline struct sk_buff *tcp_alloc_skb(struct sock *sk, int size, int gfp) static inline struct page * tcp_alloc_page(struct sock *sk) { - if (sk->forward_alloc >= (int)PAGE_SIZE || + if (sk->sk_forward_alloc >= (int)PAGE_SIZE || tcp_mem_schedule(sk, PAGE_SIZE, 0)) { - struct page *page = alloc_pages(sk->allocation, 0); + struct page *page = alloc_pages(sk->sk_allocation, 0); if (page) return page; } @@ -1788,7 +1789,7 @@ static inline void tcp_writequeue_purge(struct sock *sk) { struct sk_buff *skb; - while ((skb = __skb_dequeue(&sk->write_queue)) != NULL) + while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) tcp_free_skb(sk, skb); tcp_mem_reclaim(sk); } @@ -1799,8 +1800,8 @@ static inline void tcp_set_owner_r(struct sk_buff *skb, struct sock *sk) { skb->sk = sk; skb->destructor = tcp_rfree; - atomic_add(skb->truesize, &sk->rmem_alloc); - sk->forward_alloc -= skb->truesize; + atomic_add(skb->truesize, &sk->sk_rmem_alloc); + sk->sk_forward_alloc -= skb->truesize; } extern void tcp_listen_wlock(void); @@ -1870,10 +1871,10 @@ static inline int tcp_paws_check(struct tcp_opt *tp, int rst) static inline void tcp_v4_setup_caps(struct sock *sk, struct dst_entry *dst) { - sk->route_caps = dst->dev->features; - if (sk->route_caps & NETIF_F_TSO) { - if (sk->no_largesend || dst->header_len) - sk->route_caps &= ~NETIF_F_TSO; + sk->sk_route_caps = dst->dev->features; + if (sk->sk_route_caps & NETIF_F_TSO) { + if (sk->sk_no_largesend || dst->header_len) + sk->sk_route_caps &= ~NETIF_F_TSO; } } diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h index 3785af8bbe74..06105757262d 100644 --- a/include/net/tcp_ecn.h +++ b/include/net/tcp_ecn.h @@ -31,10 +31,10 @@ static __inline__ void TCP_ECN_send_syn(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb) { tp->ecn_flags = 0; - if (sysctl_tcp_ecn && !(sk->route_caps&NETIF_F_TSO)) { + if (sysctl_tcp_ecn && !(sk->sk_route_caps & NETIF_F_TSO)) { TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR; tp->ecn_flags = TCP_ECN_OK; - sk->no_largesend = 1; + sk->sk_no_largesend = 1; } } diff --git a/include/net/udp.h b/include/net/udp.h index 766330ad75b0..fd43c169bcbe 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -43,10 +43,9 @@ static inline int udp_lport_inuse(u16 num) { struct sock *sk = udp_hash[num & (UDP_HTABLE_SIZE - 1)]; - for(; sk != NULL; sk = sk->next) { + for (; sk; sk = sk->sk_next) if (inet_sk(sk)->num == num) return 1; - } return 0; } diff --git a/include/net/x25.h b/include/net/x25.h index 98fb713f3121..9fa00b961475 100644 --- a/include/net/x25.h +++ b/include/net/x25.h @@ -149,7 +149,7 @@ struct x25_opt { unsigned long vc_facil_mask; /* inc_call facilities mask */ }; -#define x25_sk(__sk) ((struct x25_opt *)(__sk)->protinfo) +#define x25_sk(__sk) ((struct x25_opt *)(__sk)->sk_protinfo) /* af_x25.c */ extern int sysctl_x25_restart_request_timeout; diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 1a00ffc2e363..21542c6a1f17 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -78,7 +78,7 @@ extern struct semaphore xfrm_cfg_sem; We add genid to each dst plus pointer to genid of raw IP route, pmtu disc will update pmtu on raw IP route and increase its genid. dst_check() will see this for top level and trigger resyncing - metrics. Plus, it will be made via sk->dst_cache. Solved. + metrics. Plus, it will be made via sk->sk_dst_cache. Solved. */ /* Full description of state of transformer. */ @@ -586,7 +586,7 @@ extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsi static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family) { - if (sk && sk->policy[XFRM_POLICY_IN]) + if (sk && sk->sk_policy[XFRM_POLICY_IN]) return __xfrm_policy_check(sk, dir, skb, family); return !xfrm_policy_list[dir] || @@ -628,7 +628,7 @@ extern int __xfrm_sk_clone_policy(struct sock *sk); static inline int xfrm_sk_clone_policy(struct sock *sk) { - if (unlikely(sk->policy[0] || sk->policy[1])) + if (unlikely(sk->sk_policy[0] || sk->sk_policy[1])) return __xfrm_sk_clone_policy(sk); return 0; } @@ -637,13 +637,13 @@ extern void __xfrm_sk_free_policy(struct xfrm_policy *, int dir); static inline void xfrm_sk_free_policy(struct sock *sk) { - if (unlikely(sk->policy[0] != NULL)) { - __xfrm_sk_free_policy(sk->policy[0], 0); - sk->policy[0] = NULL; + if (unlikely(sk->sk_policy[0] != NULL)) { + __xfrm_sk_free_policy(sk->sk_policy[0], 0); + sk->sk_policy[0] = NULL; } - if (unlikely(sk->policy[1] != NULL)) { - __xfrm_sk_free_policy(sk->policy[1], 1); - sk->policy[1] = NULL; + if (unlikely(sk->sk_policy[1] != NULL)) { + __xfrm_sk_free_policy(sk->sk_policy[1], 1); + sk->sk_policy[1] = NULL; } } |
