summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/atm/he.c2
-rw-r--r--drivers/atm/idt77252.c5
-rw-r--r--drivers/atm/iphase.c8
-rw-r--r--drivers/net/pppoe.c61
-rw-r--r--drivers/net/pppox.c13
-rw-r--r--fs/ncpfs/inode.c24
-rw-r--r--fs/ncpfs/sock.c8
-rw-r--r--include/linux/atalk.h2
-rw-r--r--include/linux/atmdev.h9
-rw-r--r--include/linux/if_ec.h2
-rw-r--r--include/linux/if_pppox.h2
-rw-r--r--include/linux/if_wanpipe.h4
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/rtnetlink.h4
-rw-r--r--include/net/af_unix.h4
-rw-r--r--include/net/ax25.h2
-rw-r--r--include/net/bluetooth/hci_core.h2
-rw-r--r--include/net/bluetooth/l2cap.h2
-rw-r--r--include/net/bluetooth/rfcomm.h2
-rw-r--r--include/net/bluetooth/sco.h2
-rw-r--r--include/net/dn.h2
-rw-r--r--include/net/dn_nsp.h2
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/ip6_route.h4
-rw-r--r--include/net/ipx.h2
-rw-r--r--include/net/irda/af_irda.h2
-rw-r--r--include/net/llc_c_ev.h4
-rw-r--r--include/net/llc_conn.h2
-rw-r--r--include/net/netrom.h2
-rw-r--r--include/net/rose.h2
-rw-r--r--include/net/route.h2
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/net/sock.h480
-rw-r--r--include/net/tcp.h179
-rw-r--r--include/net/tcp_ecn.h4
-rw-r--r--include/net/udp.h3
-rw-r--r--include/net/x25.h2
-rw-r--r--include/net/xfrm.h18
-rw-r--r--net/appletalk/aarp.c2
-rw-r--r--net/appletalk/atalk_proc.c11
-rw-r--r--net/appletalk/ddp.c103
-rw-r--r--net/atm/atm_misc.c8
-rw-r--r--net/atm/br2684.c4
-rw-r--r--net/atm/clip.c10
-rw-r--r--net/atm/common.c44
-rw-r--r--net/atm/lec.c18
-rw-r--r--net/atm/mpc.c13
-rw-r--r--net/atm/pppoatm.c2
-rw-r--r--net/atm/proc.c15
-rw-r--r--net/atm/raw.c7
-rw-r--r--net/atm/signaling.c17
-rw-r--r--net/atm/svc.c10
-rw-r--r--net/ax25/af_ax25.c170
-rw-r--r--net/ax25/ax25_ds_in.c4
-rw-r--r--net/ax25/ax25_ds_timer.c13
-rw-r--r--net/ax25/ax25_in.c18
-rw-r--r--net/ax25/ax25_route.c2
-rw-r--r--net/ax25/ax25_std_in.c4
-rw-r--r--net/ax25/ax25_std_timer.c13
-rw-r--r--net/ax25/ax25_subr.c8
-rw-r--r--net/bluetooth/af_bluetooth.c52
-rw-r--r--net/bluetooth/bnep/core.c14
-rw-r--r--net/bluetooth/bnep/netdev.c12
-rw-r--r--net/bluetooth/bnep/sock.c6
-rw-r--r--net/bluetooth/hci_sock.c22
-rw-r--r--net/bluetooth/l2cap.c182
-rw-r--r--net/bluetooth/rfcomm/core.c22
-rw-r--r--net/bluetooth/rfcomm/sock.c128
-rw-r--r--net/bluetooth/rfcomm/tty.c4
-rw-r--r--net/bluetooth/sco.c118
-rw-r--r--net/core/datagram.c43
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/netfilter.c4
-rw-r--r--net/core/rtnetlink.c7
-rw-r--r--net/core/sock.c291
-rw-r--r--net/decnet/af_decnet.c207
-rw-r--r--net/decnet/dn_nsp_in.c57
-rw-r--r--net/decnet/dn_nsp_out.c37
-rw-r--r--net/decnet/dn_timer.c18
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c6
-rw-r--r--net/econet/af_econet.c55
-rw-r--r--net/ipv4/af_inet.c143
-rw-r--r--net/ipv4/icmp.c16
-rw-r--r--net/ipv4/ip_input.c6
-rw-r--r--net/ipv4/ip_output.c57
-rw-r--r--net/ipv4/ip_sockglue.c47
-rw-r--r--net/ipv4/ipmr.c3
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c2
-rw-r--r--net/ipv4/netfilter/ip_queue.c6
-rw-r--r--net/ipv4/netfilter/ipchains_core.c2
-rw-r--r--net/ipv4/netfilter/ipfwadm_core.c2
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c4
-rw-r--r--net/ipv4/netfilter/ipt_owner.c14
-rw-r--r--net/ipv4/raw.c69
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/tcp.c297
-rw-r--r--net/ipv4/tcp_diag.c59
-rw-r--r--net/ipv4/tcp_input.c141
-rw-r--r--net/ipv4/tcp_ipv4.c286
-rw-r--r--net/ipv4/tcp_minisocks.c261
-rw-r--r--net/ipv4/tcp_output.c52
-rw-r--r--net/ipv4/tcp_timer.c50
-rw-r--r--net/ipv4/udp.c120
-rw-r--r--net/ipv6/af_inet6.c54
-rw-r--r--net/ipv6/datagram.c16
-rw-r--r--net/ipv6/icmp.c18
-rw-r--r--net/ipv6/ip6_output.c28
-rw-r--r--net/ipv6/ipv6_sockglue.c61
-rw-r--r--net/ipv6/mcast.c4
-rw-r--r--net/ipv6/ndisc.c4
-rw-r--r--net/ipv6/netfilter/ip6_queue.c4
-rw-r--r--net/ipv6/raw.c88
-rw-r--r--net/ipv6/tcp_ipv6.c244
-rw-r--r--net/ipv6/udp.c111
-rw-r--r--net/ipx/af_ipx.c72
-rw-r--r--net/ipx/ipx_proc.c13
-rw-r--r--net/ipx/ipx_route.c2
-rw-r--r--net/irda/af_irda.c179
-rw-r--r--net/key/af_key.c38
-rw-r--r--net/llc/af_llc.c118
-rw-r--r--net/llc/llc_c_ac.c2
-rw-r--r--net/llc/llc_conn.c60
-rw-r--r--net/llc/llc_if.c6
-rw-r--r--net/llc/llc_mac.c2
-rw-r--r--net/llc/llc_main.c16
-rw-r--r--net/llc/llc_proc.c16
-rw-r--r--net/llc/llc_sap.c22
-rw-r--r--net/netlink/af_netlink.c101
-rw-r--r--net/netrom/af_netrom.c168
-rw-r--r--net/netrom/nr_in.c4
-rw-r--r--net/netrom/nr_out.c13
-rw-r--r--net/netrom/nr_subr.c12
-rw-r--r--net/netrom/nr_timer.c24
-rw-r--r--net/packet/af_packet.c116
-rw-r--r--net/rose/af_rose.c175
-rw-r--r--net/rose/rose_in.c7
-rw-r--r--net/rose/rose_out.c9
-rw-r--r--net/rose/rose_route.c8
-rw-r--r--net/rose/rose_subr.c12
-rw-r--r--net/rose/rose_timer.c24
-rw-r--r--net/sched/sch_atm.c3
-rw-r--r--net/sctp/associola.c12
-rw-r--r--net/sctp/endpointola.c14
-rw-r--r--net/sctp/input.c6
-rw-r--r--net/sctp/ipv6.c46
-rw-r--r--net/sctp/output.c4
-rw-r--r--net/sctp/outqueue.c2
-rw-r--r--net/sctp/protocol.c30
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/sctp/sm_sideeffect.c14
-rw-r--r--net/sctp/sm_statefuns.c3
-rw-r--r--net/sctp/socket.c149
-rw-r--r--net/sctp/transport.c2
-rw-r--r--net/sctp/ulpqueue.c18
-rw-r--r--net/socket.c16
-rw-r--r--net/sunrpc/svcsock.c80
-rw-r--r--net/sunrpc/xprt.c91
-rw-r--r--net/unix/af_unix.c287
-rw-r--r--net/unix/garbage.c27
-rw-r--r--net/wanrouter/af_wanpipe.c436
-rw-r--r--net/x25/af_x25.c170
-rw-r--r--net/x25/x25_in.c11
-rw-r--r--net/x25/x25_out.c13
-rw-r--r--net/x25/x25_proc.c9
-rw-r--r--net/x25/x25_subr.c14
-rw-r--r--net/x25/x25_timer.c14
-rw-r--r--net/xfrm/xfrm_policy.c28
-rw-r--r--net/xfrm/xfrm_state.c3
-rw-r--r--net/xfrm/xfrm_user.c9
169 files changed, 3919 insertions, 3747 deletions
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 81e548c9c706..2bacc8f75e1c 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -2656,7 +2656,7 @@ he_close(struct atm_vcc *vcc)
* TBRQ, the host issues the close command to the adapter.
*/
- while (((tx_inuse = atomic_read(&vcc->sk->wmem_alloc)) > 0) &&
+ while (((tx_inuse = atomic_read(&vcc->sk->sk_wmem_alloc)) > 0) &&
(retry < MAX_RETRY)) {
set_current_state(TASK_UNINTERRUPTIBLE);
(void) schedule_timeout(sleep);
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index df77e05eb528..d165489bdec7 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -728,7 +728,8 @@ push_on_scq(struct idt77252_dev *card, struct vc_map *vc, struct sk_buff *skb)
struct atm_vcc *vcc = vc->tx_vcc;
vc->estimator->cells += (skb->len + 47) / 48;
- if (atomic_read(&vcc->sk->wmem_alloc) > (vcc->sk->sndbuf >> 1)) {
+ if (atomic_read(&vcc->sk->sk_wmem_alloc) >
+ (vcc->sk->sk_sndbuf >> 1)) {
u32 cps = vc->estimator->maxcps;
vc->estimator->cps = cps;
@@ -2023,7 +2024,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
atomic_inc(&vcc->stats->tx_err);
return -ENOMEM;
}
- atomic_add(skb->truesize, &vcc->sk->wmem_alloc);
+ atomic_add(skb->truesize, &vcc->sk->sk_wmem_alloc);
memcpy(skb_put(skb, 52), cell, 52);
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 680ccf7f4a53..58a00bf51d67 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1782,14 +1782,14 @@ static int open_tx(struct atm_vcc *vcc)
if (ia_vcc->pcr < iadev->rate_limit) {
if (vcc->qos.txtp.max_sdu != 0) {
if (ia_vcc->pcr > 60000)
- vcc->sk->sndbuf = vcc->qos.txtp.max_sdu * 5;
+ vcc->sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
else if (ia_vcc->pcr > 2000)
- vcc->sk->sndbuf = vcc->qos.txtp.max_sdu * 4;
+ vcc->sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
else
- vcc->sk->sndbuf = 3*vcc->qos.txtp.max_sdu;
+ vcc->sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
}
else
- vcc->sk->sndbuf = 24576;
+ vcc->sk->sk_sndbuf = 24576;
}
vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 489da3c1953c..f0945464e76e 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -277,11 +277,12 @@ static void pppoe_flush_dev(struct net_device *dev)
lock_sock(sk);
- if (sk->state & (PPPOX_CONNECTED|PPPOX_BOUND)){
+ if (sk->sk_state &
+ (PPPOX_CONNECTED | PPPOX_BOUND)) {
pppox_unbind_sock(sk);
dev_put(dev);
- sk->state = PPPOX_ZOMBIE;
- sk->state_change(sk);
+ sk->sk_state = PPPOX_ZOMBIE;
+ sk->sk_state_change(sk);
}
release_sock(sk);
@@ -347,16 +348,16 @@ int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
struct pppox_opt *po = pppox_sk(sk);
struct pppox_opt *relay_po = NULL;
- if (sk->state & PPPOX_BOUND) {
+ if (sk->sk_state & PPPOX_BOUND) {
skb_pull(skb, sizeof(struct pppoe_hdr));
ppp_input(&po->chan, skb);
- } else if (sk->state & PPPOX_RELAY) {
+ } else if (sk->sk_state & PPPOX_RELAY) {
relay_po = get_item_by_addr(&po->pppoe_relay);
if (relay_po == NULL)
goto abort_kfree;
- if ((relay_po->sk->state & PPPOX_CONNECTED) == 0)
+ if ((relay_po->sk->sk_state & PPPOX_CONNECTED) == 0)
goto abort_put;
skb_pull(skb, sizeof(struct pppoe_hdr));
@@ -447,7 +448,7 @@ static int pppoe_disc_rcv(struct sk_buff *skb,
/* We're no longer connect at the PPPOE layer,
* and must wait for ppp channel to disconnect us.
*/
- sk->state = PPPOX_ZOMBIE;
+ sk->sk_state = PPPOX_ZOMBIE;
}
bh_unlock_sock(sk);
@@ -503,12 +504,12 @@ static int pppoe_create(struct socket *sock)
sock->state = SS_UNCONNECTED;
sock->ops = &pppoe_ops;
- sk->backlog_rcv = pppoe_rcv_core;
- sk->state = PPPOX_NONE;
- sk->type = SOCK_STREAM;
- sk->family = PF_PPPOX;
- sk->protocol = PX_PROTO_OE;
- sk->destruct = pppoe_sk_free;
+ sk->sk_backlog_rcv = pppoe_rcv_core;
+ sk->sk_state = PPPOX_NONE;
+ sk->sk_type = SOCK_STREAM;
+ sk->sk_family = PF_PPPOX;
+ sk->sk_protocol = PX_PROTO_OE;
+ sk->sk_destruct = pppoe_sk_free;
po = pppox_sk(sk) = kmalloc(sizeof(*po), GFP_KERNEL);
if (!po)
@@ -536,7 +537,7 @@ int pppoe_release(struct socket *sock)
pppox_unbind_sock(sk);
/* Signal the death of the socket. */
- sk->state = PPPOX_DEAD;
+ sk->sk_state = PPPOX_DEAD;
po = pppox_sk(sk);
if (po->pppoe_pa.sid) {
@@ -551,7 +552,7 @@ int pppoe_release(struct socket *sock)
sock_orphan(sk);
sock->sk = NULL;
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
sock_put(sk);
return error;
@@ -575,12 +576,12 @@ int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
/* Check for already bound sockets */
error = -EBUSY;
- if ((sk->state & PPPOX_CONNECTED) && sp->sa_addr.pppoe.sid)
+ if ((sk->sk_state & PPPOX_CONNECTED) && sp->sa_addr.pppoe.sid)
goto end;
/* Check for already disconnected sockets, on attempts to disconnect */
error = -EALREADY;
- if((sk->state & PPPOX_DEAD) && !sp->sa_addr.pppoe.sid )
+ if ((sk->sk_state & PPPOX_DEAD) && !sp->sa_addr.pppoe.sid )
goto end;
error = 0;
@@ -596,7 +597,7 @@ int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
memset(po, 0, sizeof(struct pppox_opt));
po->sk = sk;
- sk->state = PPPOX_NONE;
+ sk->sk_state = PPPOX_NONE;
}
/* Don't re-bind if sid==0 */
@@ -630,7 +631,7 @@ int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
if (error)
goto err_put;
- sk->state = PPPOX_CONNECTED;
+ sk->sk_state = PPPOX_CONNECTED;
}
po->num = sp->sa_addr.pppoe.sid;
@@ -678,7 +679,7 @@ int pppoe_ioctl(struct socket *sock, unsigned int cmd,
case PPPIOCGMRU:
err = -ENXIO;
- if (!(sk->state & PPPOX_CONNECTED))
+ if (!(sk->sk_state & PPPOX_CONNECTED))
break;
err = -EFAULT;
@@ -692,7 +693,7 @@ int pppoe_ioctl(struct socket *sock, unsigned int cmd,
case PPPIOCSMRU:
err = -ENXIO;
- if (!(sk->state & PPPOX_CONNECTED))
+ if (!(sk->sk_state & PPPOX_CONNECTED))
break;
err = -EFAULT;
@@ -719,11 +720,11 @@ int pppoe_ioctl(struct socket *sock, unsigned int cmd,
struct pppox_opt *relay_po;
err = -EBUSY;
- if (sk->state & (PPPOX_BOUND|PPPOX_ZOMBIE|PPPOX_DEAD))
+ if (sk->sk_state & (PPPOX_BOUND | PPPOX_ZOMBIE | PPPOX_DEAD))
break;
err = -ENOTCONN;
- if (!(sk->state & PPPOX_CONNECTED))
+ if (!(sk->sk_state & PPPOX_CONNECTED))
break;
/* PPPoE address from the user specifies an outbound
@@ -747,17 +748,17 @@ int pppoe_ioctl(struct socket *sock, unsigned int cmd,
break;
sock_put(relay_po->sk);
- sk->state |= PPPOX_RELAY;
+ sk->sk_state |= PPPOX_RELAY;
err = 0;
break;
}
case PPPOEIOCDFWD:
err = -EALREADY;
- if (!(sk->state & PPPOX_RELAY))
+ if (!(sk->sk_state & PPPOX_RELAY))
break;
- sk->state &= ~PPPOX_RELAY;
+ sk->sk_state &= ~PPPOX_RELAY;
err = 0;
break;
@@ -780,7 +781,7 @@ int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
struct net_device *dev;
char *start;
- if (sock_flag(sk, SOCK_DEAD) || !(sk->state & PPPOX_CONNECTED)) {
+ if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
error = -ENOTCONN;
goto end;
}
@@ -812,7 +813,7 @@ int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
skb->dev = dev;
- skb->priority = sk->priority;
+ skb->priority = sk->sk_priority;
skb->protocol = __constant_htons(ETH_P_PPP_SES);
ph = (struct pppoe_hdr *) skb_put(skb, total_len + sizeof(struct pppoe_hdr));
@@ -856,7 +857,7 @@ int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
int data_len = skb->len;
struct sk_buff *skb2;
- if (sock_flag(sk, SOCK_DEAD) || !(sk->state & PPPOX_CONNECTED))
+ if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
goto abort;
hdr.ver = 1;
@@ -938,7 +939,7 @@ int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
int len;
struct pppoe_hdr *ph = NULL;
- if (sk->state & PPPOX_BOUND) {
+ if (sk->sk_state & PPPOX_BOUND) {
error = -EIO;
goto end;
}
diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c
index a5633d50c35a..9a29158473d8 100644
--- a/drivers/net/pppox.c
+++ b/drivers/net/pppox.c
@@ -58,9 +58,9 @@ void pppox_unbind_sock(struct sock *sk)
{
/* Clear connection to ppp device, if attached. */
- if (sk->state & (PPPOX_BOUND|PPPOX_ZOMBIE)) {
+ if (sk->sk_state & (PPPOX_BOUND | PPPOX_ZOMBIE)) {
ppp_unregister_channel(&pppox_sk(sk)->chan);
- sk->state = PPPOX_DEAD;
+ sk->sk_state = PPPOX_DEAD;
}
}
@@ -81,7 +81,7 @@ static int pppox_ioctl(struct socket* sock, unsigned int cmd,
case PPPIOCGCHAN: {
int index;
rc = -ENOTCONN;
- if (!(sk->state & PPPOX_CONNECTED))
+ if (!(sk->sk_state & PPPOX_CONNECTED))
break;
rc = -EINVAL;
@@ -90,12 +90,13 @@ static int pppox_ioctl(struct socket* sock, unsigned int cmd,
break;
rc = 0;
- sk->state |= PPPOX_BOUND;
+ sk->sk_state |= PPPOX_BOUND;
break;
}
default:
- if (pppox_protos[sk->protocol]->ioctl)
- rc = pppox_protos[sk->protocol]->ioctl(sock, cmd, arg);
+ if (pppox_protos[sk->sk_protocol]->ioctl)
+ rc = pppox_protos[sk->sk_protocol]->ioctl(sock, cmd,
+ arg);
break;
};
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 0744ac038279..a982e1f14dc0 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -295,9 +295,9 @@ ncp_delete_inode(struct inode *inode)
static void ncp_stop_tasks(struct ncp_server *server) {
struct sock* sk = server->ncp_sock->sk;
- sk->error_report = server->error_report;
- sk->data_ready = server->data_ready;
- sk->write_space = server->write_space;
+ sk->sk_error_report = server->error_report;
+ sk->sk_data_ready = server->data_ready;
+ sk->sk_write_space = server->write_space;
del_timer_sync(&server->timeout_tm);
flush_scheduled_work();
}
@@ -550,12 +550,12 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
INIT_LIST_HEAD(&server->tx.requests);
init_MUTEX(&server->rcv.creq_sem);
- server->tx.creq = NULL;
- server->rcv.creq = NULL;
- server->data_ready = sock->sk->data_ready;
- server->write_space = sock->sk->write_space;
- server->error_report = sock->sk->error_report;
- sock->sk->user_data = server;
+ server->tx.creq = NULL;
+ server->rcv.creq = NULL;
+ server->data_ready = sock->sk->sk_data_ready;
+ server->write_space = sock->sk->sk_write_space;
+ server->error_report = sock->sk->sk_error_report;
+ sock->sk->sk_user_data = server;
init_timer(&server->timeout_tm);
#undef NCP_PACKET_SIZE
@@ -566,15 +566,15 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
if (server->packet == NULL)
goto out_nls;
- sock->sk->data_ready = ncp_tcp_data_ready;
- sock->sk->error_report = ncp_tcp_error_report;
+ sock->sk->sk_data_ready = ncp_tcp_data_ready;
+ sock->sk->sk_error_report = ncp_tcp_error_report;
if (sock->type == SOCK_STREAM) {
server->rcv.ptr = (unsigned char*)&server->rcv.buf;
server->rcv.len = 10;
server->rcv.state = 0;
INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc, server);
INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc, server);
- sock->sk->write_space = ncp_tcp_write_space;
+ sock->sk->sk_write_space = ncp_tcp_write_space;
} else {
INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc, server);
INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc, server);
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
index a29294f3987c..128d3188cd43 100644
--- a/fs/ncpfs/sock.c
+++ b/fs/ncpfs/sock.c
@@ -83,21 +83,21 @@ struct ncp_request_reply {
};
void ncp_tcp_data_ready(struct sock *sk, int len) {
- struct ncp_server *server = sk->user_data;
+ struct ncp_server *server = sk->sk_user_data;
server->data_ready(sk, len);
schedule_work(&server->rcv.tq);
}
void ncp_tcp_error_report(struct sock *sk) {
- struct ncp_server *server = sk->user_data;
+ struct ncp_server *server = sk->sk_user_data;
server->error_report(sk);
schedule_work(&server->rcv.tq);
}
void ncp_tcp_write_space(struct sock *sk) {
- struct ncp_server *server = sk->user_data;
+ struct ncp_server *server = sk->sk_user_data;
/* We do not need any locking: we first set tx.creq, and then we do sendmsg,
not vice versa... */
@@ -427,7 +427,7 @@ static void __ncpdgram_rcv_proc(void *s) {
unsigned int hdrl;
result -= 8;
- hdrl = sock->sk->family == AF_INET ? 8 : 6;
+ hdrl = sock->sk->sk_family == AF_INET ? 8 : 6;
if (sign_verify_reply(server, ((char*)req->reply_buf) + hdrl, result - hdrl, cpu_to_le32(result), ((char*)req->reply_buf) + result)) {
printk(KERN_INFO "ncpfs: Signature violation\n");
result = -EIO;
diff --git a/include/linux/atalk.h b/include/linux/atalk.h
index f75a25719f3c..9873354f2fd3 100644
--- a/include/linux/atalk.h
+++ b/include/linux/atalk.h
@@ -196,7 +196,7 @@ extern void aarp_device_down(struct net_device *dev);
extern void aarp_cleanup_module(void);
#endif /* MODULE */
-#define at_sk(__sk) ((struct atalk_sock *)(__sk)->protinfo)
+#define at_sk(__sk) ((struct atalk_sock *)(__sk)->sk_protinfo)
extern struct sock *atalk_sockets;
extern rwlock_t atalk_sockets_lock;
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 33a81bdd1fb8..16b70b711ffa 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -30,7 +30,7 @@
#define ATM_DS3_PCR (8000*12)
/* DS3: 12 cells in a 125 usec time slot */
-#define atm_sk(__sk) ((struct atm_vcc *)(__sk)->protinfo)
+#define atm_sk(__sk) ((struct atm_vcc *)(__sk)->sk_protinfo)
#define ATM_SD(s) (atm_sk((s)->sk))
@@ -413,19 +413,20 @@ static inline int atm_guess_pdu2truesize(int pdu_size)
static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
{
- atomic_add(truesize, &vcc->sk->rmem_alloc);
+ atomic_add(truesize, &vcc->sk->sk_rmem_alloc);
}
static inline void atm_return(struct atm_vcc *vcc,int truesize)
{
- atomic_sub(truesize, &vcc->sk->rmem_alloc);
+ atomic_sub(truesize, &vcc->sk->sk_rmem_alloc);
}
static inline int atm_may_send(struct atm_vcc *vcc,unsigned int size)
{
- return (size + atomic_read(&vcc->sk->wmem_alloc)) < vcc->sk->sndbuf;
+ return (size + atomic_read(&vcc->sk->sk_wmem_alloc)) <
+ vcc->sk->sk_sndbuf;
}
diff --git a/include/linux/if_ec.h b/include/linux/if_ec.h
index b5ee8425325f..d654666f1067 100644
--- a/include/linux/if_ec.h
+++ b/include/linux/if_ec.h
@@ -56,7 +56,7 @@ struct econet_opt
unsigned short num;
};
-#define ec_sk(__sk) ((struct econet_opt *)(__sk)->protinfo)
+#define ec_sk(__sk) ((struct econet_opt *)(__sk)->sk_protinfo)
struct ec_device
{
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 78ffd78ddb9e..581531924426 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -132,7 +132,7 @@ struct pppox_opt {
#define pppoe_pa proto.pppoe.pa
#define pppoe_relay proto.pppoe.relay
-#define pppox_sk(__sk) ((struct pppox_opt *)(__sk)->protinfo)
+#define pppox_sk(__sk) ((struct pppox_opt *)(__sk)->sk_protinfo)
struct module;
diff --git a/include/linux/if_wanpipe.h b/include/linux/if_wanpipe.h
index 166e0e62c8e2..e594ca6069e5 100644
--- a/include/linux/if_wanpipe.h
+++ b/include/linux/if_wanpipe.h
@@ -34,7 +34,7 @@ struct wan_sockaddr_ll
typedef struct
{
unsigned char free;
- unsigned char sk_state;
+ unsigned char state_sk;
int rcvbuf;
int sndbuf;
int rmem;
@@ -117,7 +117,7 @@ struct wanpipe_opt
unsigned short num;
};
-#define wp_sk(__sk) ((struct wanpipe_opt *)(__sk)->protinfo)
+#define wp_sk(__sk) ((struct wanpipe_opt *)(__sk)->sk_protinfo)
#endif
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 50fdde2d8a9f..0bc21b4e19eb 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -229,7 +229,7 @@ struct tcp6_sock {
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#define __ipv6_only_sock(sk) (inet6_sk(sk)->ipv6only)
-#define ipv6_only_sock(sk) ((sk)->family == PF_INET6 && __ipv6_only_sock(sk))
+#define ipv6_only_sock(sk) ((sk)->sk_family == PF_INET6 && __ipv6_only_sock(sk))
#else
#define __ipv6_only_sock(sk) 0
#define ipv6_only_sock(sk) 0
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index f231a7ca0fa9..19b88f2c7c43 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -603,8 +603,8 @@ extern struct semaphore rtnl_sem;
#define rtnl_shlock_nowait() down_trylock(&rtnl_sem)
#define rtnl_shunlock() do { up(&rtnl_sem); \
- if (rtnl && rtnl->receive_queue.qlen) \
- rtnl->data_ready(rtnl, 0); \
+ if (rtnl && rtnl->sk_receive_queue.qlen) \
+ rtnl->sk_data_ready(rtnl, 0); \
} while(0)
extern void rtnl_lock(void);
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index db82099b2b17..9bec0adddd5a 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -24,8 +24,8 @@ static inline unix_socket *first_unix_socket(int *i)
static inline unix_socket *next_unix_socket(int *i, unix_socket *s)
{
/* More in this chain? */
- if (s->next)
- return s->next;
+ if (s->sk_next)
+ return s->sk_next;
/* Look for next non-empty chain. */
for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
if (unix_socket_table[*i])
diff --git a/include/net/ax25.h b/include/net/ax25.h
index a3344d406e24..661e1ea63c02 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -201,7 +201,7 @@ typedef struct ax25_cb {
struct sock *sk; /* Backlink to socket */
} ax25_cb;
-#define ax25_sk(__sk) ((ax25_cb *)(__sk)->protinfo)
+#define ax25_sk(__sk) ((ax25_cb *)(__sk)->sk_protinfo)
/* af_ax25.c */
extern ax25_cb *ax25_list;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 5bf98447775c..fd010a9dc75e 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -485,7 +485,7 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
/* HCI info for socket */
-#define hci_pi(sk) ((struct hci_pinfo *) sk->protinfo)
+#define hci_pi(sk) ((struct hci_pinfo *)sk->sk_protinfo)
struct hci_pinfo {
struct hci_dev *hdev;
struct hci_filter filter;
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 3758bc57d7b8..d4e45740b109 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -206,7 +206,7 @@ struct l2cap_conn {
};
/* ----- L2CAP channel and socket info ----- */
-#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk->protinfo)
+#define l2cap_pi(sk) ((struct l2cap_pinfo *)sk->sk_protinfo)
struct l2cap_pinfo {
__u16 psm;
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index c2a2098b4369..7d410fed2520 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -302,7 +302,7 @@ struct sockaddr_rc {
u8 rc_channel;
};
-#define rfcomm_pi(sk) ((struct rfcomm_pinfo *) sk->protinfo)
+#define rfcomm_pi(sk) ((struct rfcomm_pinfo *)sk->sk_protinfo)
struct rfcomm_pinfo {
struct rfcomm_dlc *dlc;
diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h
index 968f56602da6..7f83037a0880 100644
--- a/include/net/bluetooth/sco.h
+++ b/include/net/bluetooth/sco.h
@@ -71,7 +71,7 @@ struct sco_conn {
#define sco_conn_unlock(c) spin_unlock(&c->lock);
/* ----- SCO socket info ----- */
-#define sco_pi(sk) ((struct sco_pinfo *) sk->protinfo)
+#define sco_pi(sk) ((struct sco_pinfo *)sk->sk_protinfo)
struct sco_pinfo {
__u32 flags;
diff --git a/include/net/dn.h b/include/net/dn.h
index c364cf8cecd7..da2239862a1f 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -133,7 +133,7 @@ struct dn_scp /* Session Control Port */
};
-#define DN_SK(__sk) ((struct dn_scp *)(__sk)->protinfo)
+#define DN_SK(__sk) ((struct dn_scp *)(__sk)->sk_protinfo)
/*
* src,dst : Source and Destination DECnet addresses
diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h
index 3826c0579000..a564fb8161a8 100644
--- a/include/net/dn_nsp.h
+++ b/include/net/dn_nsp.h
@@ -201,7 +201,7 @@ static __inline__ int sendack(unsigned short seq)
*/
static __inline__ int dn_congested(struct sock *sk)
{
- return atomic_read(&sk->rmem_alloc) > (sk->rcvbuf >> 1);
+ return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
}
#define DN_MAX_NSP_DATA_HEADER (11)
diff --git a/include/net/ip.h b/include/net/ip.h
index f6a4e017f7bf..aa8dd8855dfa 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -238,7 +238,7 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
{
inet_sk(sk)->rcv_saddr = inet_sk(sk)->saddr = 0;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- if (sk->family == PF_INET6) {
+ if (sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
memset(&np->saddr, 0, sizeof(np->saddr));
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index d9f7ad0b9f71..752b31601ee4 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -107,11 +107,11 @@ static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
struct ipv6_pinfo *np = inet6_sk(sk);
struct rt6_info *rt = (struct rt6_info *) dst;
- write_lock(&sk->dst_lock);
+ write_lock(&sk->sk_dst_lock);
__sk_dst_set(sk, dst);
np->daddr_cache = daddr;
np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
- write_unlock(&sk->dst_lock);
+ write_unlock(&sk->sk_dst_lock);
}
#endif
diff --git a/include/net/ipx.h b/include/net/ipx.h
index dd20531f04dd..6da0ea4d7cca 100644
--- a/include/net/ipx.h
+++ b/include/net/ipx.h
@@ -105,7 +105,7 @@ struct ipx_opt {
unsigned short ipx_ncp_conn;
};
-#define ipx_sk(__sk) ((struct ipx_opt *)(__sk)->protinfo)
+#define ipx_sk(__sk) ((struct ipx_opt *)(__sk)->sk_protinfo)
#define IPX_SKB_CB(__skb) ((struct ipx_cb *)&((__skb)->cb[0]))
#endif
#define IPX_MIN_EPHEMERAL_SOCKET 0x4000
diff --git a/include/net/irda/af_irda.h b/include/net/irda/af_irda.h
index 6b278e1a5c69..0f6dafad4907 100644
--- a/include/net/irda/af_irda.h
+++ b/include/net/irda/af_irda.h
@@ -77,6 +77,6 @@ struct irda_sock {
LOCAL_FLOW rx_flow;
};
-#define irda_sk(__sk) ((struct irda_sock *)(__sk)->protinfo)
+#define irda_sk(__sk) ((struct irda_sock *)(__sk)->sk_protinfo)
#endif /* AF_IRDA_H */
diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
index 20fbfeec34c4..3deeb4235605 100644
--- a/include/net/llc_c_ev.h
+++ b/include/net/llc_c_ev.h
@@ -275,7 +275,7 @@ extern int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk,
static __inline__ int llc_conn_space(struct sock *sk, struct sk_buff *skb)
{
- return atomic_read(&sk->rmem_alloc) + skb->truesize <
- (unsigned)sk->rcvbuf;
+ return atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
+ (unsigned)sk->sk_rcvbuf;
}
#endif /* LLC_C_EV_H */
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index acf62b4e3321..3cefd5ce2642 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -67,7 +67,7 @@ struct llc_opt {
Used for resending FRMR */
};
-#define llc_sk(__sk) ((struct llc_opt *)(__sk)->protinfo)
+#define llc_sk(__sk) ((struct llc_opt *)(__sk)->sk_protinfo)
extern struct sock *llc_sk_alloc(int family, int priority);
extern void llc_sk_free(struct sock *sk);
diff --git a/include/net/netrom.h b/include/net/netrom.h
index b83879018c6f..09a0c75d40b3 100644
--- a/include/net/netrom.h
+++ b/include/net/netrom.h
@@ -74,7 +74,7 @@ typedef struct {
struct sock *sk; /* Backlink to socket */
} nr_cb;
-#define nr_sk(__sk) ((nr_cb *)(__sk)->protinfo)
+#define nr_sk(__sk) ((nr_cb *)(__sk)->sk_protinfo)
struct nr_neigh {
struct nr_neigh *next;
diff --git a/include/net/rose.h b/include/net/rose.h
index fffbd5d5ee82..96d561e15a26 100644
--- a/include/net/rose.h
+++ b/include/net/rose.h
@@ -138,7 +138,7 @@ typedef struct {
struct sock *sk; /* Backlink to socket */
} rose_cb;
-#define rose_sk(__sk) ((rose_cb *)(__sk)->protinfo)
+#define rose_sk(__sk) ((rose_cb *)(__sk)->sk_protinfo)
/* af_rose.c */
extern ax25_address rose_callsign;
diff --git a/include/net/route.h b/include/net/route.h
index acdfaaf6afb9..7fff45937e71 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -44,7 +44,7 @@
/* RTO_CONN is not used (being alias for 0), but preserved not to break
* some modules referring to it. */
-#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sk->localroute)
+#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sk->sk_localroute)
struct inet_peer;
struct rtable
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 501e41da70a3..ed453f38f12d 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -597,7 +597,7 @@ int static inline __sctp_state(const struct sctp_association *asoc,
#define sctp_sstate(sk, state) __sctp_sstate((sk), (SCTP_SS_##state))
int static inline __sctp_sstate(const struct sock *sk, sctp_sock_state_t state)
{
- return sk->state == state;
+ return sk->sk_state == state;
}
#endif /* __net_sctp_h__ */
diff --git a/include/net/sock.h b/include/net/sock.h
index f9fb0af964c2..373278101ec1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -59,10 +59,11 @@
* the other protocols.
*/
-/* Define this to get the sk->debug debugging facility. */
+/* Define this to get the sk->sk_debug debugging facility. */
#define SOCK_DEBUGGING
#ifdef SOCK_DEBUGGING
-#define SOCK_DEBUG(sk, msg...) do { if((sk) && ((sk)->debug)) printk(KERN_DEBUG msg); } while (0)
+#define SOCK_DEBUG(sk, msg...) do { if ((sk) && ((sk)->sk_debug)) \
+ printk(KERN_DEBUG msg); } while (0)
#else
#define SOCK_DEBUG(sk, msg...) do { } while (0)
#endif
@@ -79,123 +80,147 @@ typedef struct {
} socket_lock_t;
#define sock_lock_init(__sk) \
-do { spin_lock_init(&((__sk)->lock.slock)); \
- (__sk)->lock.owner = NULL; \
- init_waitqueue_head(&((__sk)->lock.wq)); \
+do { spin_lock_init(&((__sk)->sk_lock.slock)); \
+ (__sk)->sk_lock.owner = NULL; \
+ init_waitqueue_head(&((__sk)->sk_lock.wq)); \
} while(0)
+struct sock;
+
+/**
+ * struct sock_common - minimal network layer representation of sockets
+ * @skc_family - network address family
+ * @skc_state - Connection state
+ * @skc_reuse - %SO_REUSEADDR setting
+ * @skc_bound_dev_if - bound device index if != 0
+ * @skc_next - main hash linkage for various protocol lookup tables
+ * @skc_pprev - main hash linkage for various protocol lookup tables
+ * @skc_bind_next - main hash linkage for various protocol lookup tables
+ * @skc_bind_pprev - main hash linkage for various protocol lookup tables
+ * @skc_refcnt - reference count
+ *
+ * This is the minimal network layer representation of sockets, the header
+ * for struct sock and struct tcp_tw_bucket.
+ */
+struct sock_common {
+ unsigned short skc_family;
+ volatile unsigned char skc_state;
+ unsigned char skc_reuse;
+ int skc_bound_dev_if;
+ struct sock *skc_next;
+ struct sock **skc_pprev;
+ struct sock *skc_bind_next;
+ struct sock **skc_bind_pprev;
+ atomic_t skc_refcnt;
+};
+
/**
* struct sock - network layer representation of sockets
- * @state - Connection state
- * @zapped - ax25 & ipx means !linked
- * @reuse - %SO_REUSEADDR setting
- * @shutdown - mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
- * @bound_dev_if - bound device index if != 0
- * @next - main hash linkage for various protocol lookup tables
- * @pprev - main hash linkage for various protocol lookup tables
- * @bind_next - main hash linkage for various protocol lookup tables
- * @bind_pprev - main hash linkage for various protocol lookup tables
- * @refcnt - reference count
- * @family - network address family
- * @use_write_queue - wheter to call sk->write_space(sk) in sock_wfree
- * @userlocks - %SO_SNDBUF and %SO_RCVBUF settings
- * @lock - synchronizer
- * @rcvbuf - size of receive buffer in bytes
- * @sleep - sock wait queue
- * @dst_cache - destination cache
- * @dst_lock - destination cache lock
- * @policy - flow policy
- * @rmem_alloc - receive queue bytes committed
- * @receive_queue - incoming packets
- * @wmem_alloc - transmit queue bytes committed
- * @write_queue - Packet sending queue
- * @omem_alloc - "o" is "option" or "other"
- * @wmem_queued - persistent queue size
- * @forward_alloc - space allocated forward
- * @allocation - allocation mode
- * @sndbuf - size of send buffer in bytes
- * @prev - pointer to previous sock in the list this sock is in
- * @flags - %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings
- * @no_check - %SO_NO_CHECK setting, wether or not checkup packets
- * @debug - %SO_DEBUG setting
- * @rcvtstamp - %SO_TIMESTAMP setting
- * @no_largesend - whether to sent large segments or not
- * @route_caps - route capabilities (e.g. %NETIF_F_TSO)
- * @lingertime - %SO_LINGER l_linger setting
- * @hashent - hash entry in several tables (e.g. tcp_ehash)
- * @pair - socket pair (e.g. AF_UNIX/unix_peer)
- * @backlog - always used with the per-socket spinlock held
- * @callback_lock - used with the callbacks in the end of this struct
- * @error_queue - rarely used
- * @prot - protocol handlers inside a network family
- * @err - last error
- * @err_soft - errors that don't cause failure but are the cause of a persistent failure not just 'timed out'
- * @ack_backlog - current listen backlog
- * @max_ack_backlog - listen backlog set in listen()
- * @priority - %SO_PRIORITY setting
- * @type - socket type (%SOCK_STREAM, etc)
- * @localroute - route locally only, %SO_DONTROUTE setting
- * @protocol - which protocol this socket belongs in this network family
- * @peercred - %SO_PEERCRED setting
- * @rcvlowat - %SO_RCVLOWAT setting
- * @rcvtimeo - %SO_RCVTIMEO setting
- * @sndtimeo - %SO_SNDTIMEO setting
- * @filter - socket filtering instructions
- * @protinfo - private area, net family specific, when not using slab
- * @slab - the slabcache this instance was allocated from
- * @timer - sock cleanup timer
- * @stamp - time stamp of last packet received
- * @socket - Identd and reporting IO signals
- * @user_data - RPC layer private data
- * @owner - module that owns this socket
- * @state_change - callback to indicate change in the state of the sock
- * @data_ready - callback to indicate there is data to be processed
- * @write_space - callback to indicate there is bf sending space available
- * @error_report - callback to indicate errors (e.g. %MSG_ERRQUEUE)
- * @backlog_rcv - callback to process the backlog
- * @destruct - called at sock freeing time, i.e. when all refcnt == 0
+ * @__sk_common - shared layout with tcp_tw_bucket
+ * @sk_zapped - ax25 & ipx means !linked
+ * @sk_shutdown - mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
+ * @sk_use_write_queue - wheter to call sk->sk_write_space in sock_wfree
+ * @sk_userlocks - %SO_SNDBUF and %SO_RCVBUF settings
+ * @sk_lock - synchronizer
+ * @sk_rcvbuf - size of receive buffer in bytes
+ * @sk_sleep - sock wait queue
+ * @sk_dst_cache - destination cache
+ * @sk_dst_lock - destination cache lock
+ * @sk_policy - flow policy
+ * @sk_rmem_alloc - receive queue bytes committed
+ * @sk_receive_queue - incoming packets
+ * @sk_wmem_alloc - transmit queue bytes committed
+ * @sk_write_queue - Packet sending queue
+ * @sk_omem_alloc - "o" is "option" or "other"
+ * @sk_wmem_queued - persistent queue size
+ * @sk_forward_alloc - space allocated forward
+ * @sk_allocation - allocation mode
+ * @sk_sndbuf - size of send buffer in bytes
+ * @sk_prev - pointer to previous sock in the list this sock is in
+ * @sk_flags - %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings
+ * @sk_no_check - %SO_NO_CHECK setting, wether or not checkup packets
+ * @sk_debug - %SO_DEBUG setting
+ * @sk_rcvtstamp - %SO_TIMESTAMP setting
+ * @sk_no_largesend - whether to sent large segments or not
+ * @sk_route_caps - route capabilities (e.g. %NETIF_F_TSO)
+ * @sk_lingertime - %SO_LINGER l_linger setting
+ * @sk_hashent - hash entry in several tables (e.g. tcp_ehash)
+ * @sk_pair - socket pair (e.g. AF_UNIX/unix_peer)
+ * @sk_backlog - always used with the per-socket spinlock held
+ * @sk_callback_lock - used with the callbacks in the end of this struct
+ * @sk_error_queue - rarely used
+ * @sk_prot - protocol handlers inside a network family
+ * @sk_err - last error
+ * @sk_err_soft - errors that don't cause failure but are the cause of a persistent failure not just 'timed out'
+ * @sk_ack_backlog - current listen backlog
+ * @sk_max_ack_backlog - listen backlog set in listen()
+ * @sk_priority - %SO_PRIORITY setting
+ * @sk_type - socket type (%SOCK_STREAM, etc)
+ * @sk_localroute - route locally only, %SO_DONTROUTE setting
+ * @sk_protocol - which protocol this socket belongs in this network family
+ * @sk_peercred - %SO_PEERCRED setting
+ * @sk_rcvlowat - %SO_RCVLOWAT setting
+ * @sk_rcvtimeo - %SO_RCVTIMEO setting
+ * @sk_sndtimeo - %SO_SNDTIMEO setting
+ * @sk_filter - socket filtering instructions
+ * @sk_protinfo - private area, net family specific, when not using slab
+ * @sk_slab - the slabcache this instance was allocated from
+ * @sk_timer - sock cleanup timer
+ * @sk_stamp - time stamp of last packet received
+ * @sk_socket - Identd and reporting IO signals
+ * @sk_user_data - RPC layer private data
+ * @sk_owner - module that owns this socket
+ * @sk_state_change - callback to indicate change in the state of the sock
+ * @sk_data_ready - callback to indicate there is data to be processed
+ * @sk_write_space - callback to indicate there is bf sending space available
+ * @sk_error_report - callback to indicate errors (e.g. %MSG_ERRQUEUE)
+ * @sk_backlog_rcv - callback to process the backlog
+ * @sk_destruct - called at sock freeing time, i.e. when all refcnt == 0
*/
struct sock {
- /* Begin of struct sock/struct tcp_tw_bucket shared layout */
- unsigned short family;
- volatile unsigned char state;
- unsigned char reuse;
- int bound_dev_if;
- struct sock *next;
- struct sock **pprev;
- struct sock *bind_next;
- struct sock **bind_pprev;
- atomic_t refcnt;
- /* End of struct sock/struct tcp_tw_bucket shared layout */
- volatile unsigned char zapped;
- unsigned char shutdown;
- unsigned char use_write_queue;
- unsigned char userlocks;
- socket_lock_t lock;
- int rcvbuf;
- wait_queue_head_t *sleep;
- struct dst_entry *dst_cache;
- rwlock_t dst_lock;
- struct xfrm_policy *policy[2];
- atomic_t rmem_alloc;
- struct sk_buff_head receive_queue;
- atomic_t wmem_alloc;
- struct sk_buff_head write_queue;
- atomic_t omem_alloc;
- int wmem_queued;
- int forward_alloc;
- unsigned int allocation;
- int sndbuf;
- struct sock *prev;
- unsigned long flags;
- char no_check;
- unsigned char debug;
- unsigned char rcvtstamp;
- unsigned char no_largesend;
- int route_caps;
- unsigned long lingertime;
- int hashent;
- struct sock *pair;
+ /*
+ * Now struct tcp_tw_bucket also uses sock_common, so please just
+ * don't add nothing before this first member (__sk_common) --acme
+ */
+ struct sock_common __sk_common;
+#define sk_family __sk_common.skc_family
+#define sk_state __sk_common.skc_state
+#define sk_reuse __sk_common.skc_reuse
+#define sk_bound_dev_if __sk_common.skc_bound_dev_if
+#define sk_next __sk_common.skc_next
+#define sk_pprev __sk_common.skc_pprev
+#define sk_bind_next __sk_common.skc_bind_next
+#define sk_bind_pprev __sk_common.skc_bind_pprev
+#define sk_refcnt __sk_common.skc_refcnt
+ volatile unsigned char sk_zapped;
+ unsigned char sk_shutdown;
+ unsigned char sk_use_write_queue;
+ unsigned char sk_userlocks;
+ socket_lock_t sk_lock;
+ int sk_rcvbuf;
+ wait_queue_head_t *sk_sleep;
+ struct dst_entry *sk_dst_cache;
+ rwlock_t sk_dst_lock;
+ struct xfrm_policy *sk_policy[2];
+ atomic_t sk_rmem_alloc;
+ struct sk_buff_head sk_receive_queue;
+ atomic_t sk_wmem_alloc;
+ struct sk_buff_head sk_write_queue;
+ atomic_t sk_omem_alloc;
+ int sk_wmem_queued;
+ int sk_forward_alloc;
+ unsigned int sk_allocation;
+ int sk_sndbuf;
+ struct sock *sk_prev;
+ unsigned long sk_flags;
+ char sk_no_check;
+ unsigned char sk_debug;
+ unsigned char sk_rcvtstamp;
+ unsigned char sk_no_largesend;
+ int sk_route_caps;
+ unsigned long sk_lingertime;
+ int sk_hashent;
+ struct sock *sk_pair;
/*
* The backlog queue is special, it is always used with
* the per-socket spinlock held and requires low latency
@@ -204,37 +229,37 @@ struct sock {
struct {
struct sk_buff *head;
struct sk_buff *tail;
- } backlog;
- rwlock_t callback_lock;
- struct sk_buff_head error_queue;
- struct proto *prot;
- int err,
- err_soft;
- unsigned short ack_backlog;
- unsigned short max_ack_backlog;
- __u32 priority;
- unsigned short type;
- unsigned char localroute;
- unsigned char protocol;
- struct ucred peercred;
- int rcvlowat;
- long rcvtimeo;
- long sndtimeo;
- struct sk_filter *filter;
- void *protinfo;
- kmem_cache_t *slab;
- struct timer_list timer;
- struct timeval stamp;
- struct socket *socket;
- void *user_data;
- struct module *owner;
- void (*state_change)(struct sock *sk);
- void (*data_ready)(struct sock *sk, int bytes);
- void (*write_space)(struct sock *sk);
- void (*error_report)(struct sock *sk);
- int (*backlog_rcv) (struct sock *sk,
- struct sk_buff *skb);
- void (*destruct)(struct sock *sk);
+ } sk_backlog;
+ rwlock_t sk_callback_lock;
+ struct sk_buff_head sk_error_queue;
+ struct proto *sk_prot;
+ int sk_err,
+ sk_err_soft;
+ unsigned short sk_ack_backlog;
+ unsigned short sk_max_ack_backlog;
+ __u32 sk_priority;
+ unsigned short sk_type;
+ unsigned char sk_localroute;
+ unsigned char sk_protocol;
+ struct ucred sk_peercred;
+ int sk_rcvlowat;
+ long sk_rcvtimeo;
+ long sk_sndtimeo;
+ struct sk_filter *sk_filter;
+ void *sk_protinfo;
+ kmem_cache_t *sk_slab;
+ struct timer_list sk_timer;
+ struct timeval sk_stamp;
+ struct socket *sk_socket;
+ void *sk_user_data;
+ struct module *sk_owner;
+ void (*sk_state_change)(struct sock *sk);
+ void (*sk_data_ready)(struct sock *sk, int bytes);
+ void (*sk_write_space)(struct sock *sk);
+ void (*sk_error_report)(struct sock *sk);
+ int (*sk_backlog_rcv)(struct sock *sk,
+ struct sk_buff *skb);
+ void (*sk_destruct)(struct sock *sk);
};
/* Sock flags */
@@ -250,29 +275,29 @@ enum sock_flags {
static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
{
- __set_bit(flag, &sk->flags);
+ __set_bit(flag, &sk->sk_flags);
}
static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
{
- __clear_bit(flag, &sk->flags);
+ __clear_bit(flag, &sk->sk_flags);
}
static inline int sock_flag(struct sock *sk, enum sock_flags flag)
{
- return test_bit(flag, &sk->flags);
+ return test_bit(flag, &sk->sk_flags);
}
/* The per-socket spinlock must be held here. */
-#define sk_add_backlog(__sk, __skb) \
-do { if((__sk)->backlog.tail == NULL) { \
- (__sk)->backlog.head = \
- (__sk)->backlog.tail = (__skb); \
- } else { \
- ((__sk)->backlog.tail)->next = (__skb); \
- (__sk)->backlog.tail = (__skb); \
- } \
- (__skb)->next = NULL; \
+#define sk_add_backlog(__sk, __skb) \
+do { if (!(__sk)->sk_backlog.tail) { \
+ (__sk)->sk_backlog.head = \
+ (__sk)->sk_backlog.tail = (__skb); \
+ } else { \
+ ((__sk)->sk_backlog.tail)->next = (__skb); \
+ (__sk)->sk_backlog.tail = (__skb); \
+ } \
+ (__skb)->next = NULL; \
} while(0)
/* IP protocol blocks we attach to sockets.
@@ -337,9 +362,9 @@ static __inline__ void sk_set_owner(struct sock *sk, struct module *owner)
* change the ownership of this struct sock, with one not needed
* transient sk_set_owner call.
*/
- if (unlikely(sk->owner != NULL))
+ if (unlikely(sk->sk_owner != NULL))
BUG();
- sk->owner = owner;
+ sk->sk_owner = owner;
__module_get(owner);
}
@@ -423,28 +448,29 @@ static inline struct inode *SOCK_INODE(struct socket *socket)
*/
extern void __lock_sock(struct sock *sk);
extern void __release_sock(struct sock *sk);
-#define sock_owned_by_user(sk) (NULL != (sk)->lock.owner)
+#define sock_owned_by_user(sk) ((sk)->sk_lock.owner)
#define lock_sock(__sk) \
do { might_sleep(); \
- spin_lock_bh(&((__sk)->lock.slock)); \
- if ((__sk)->lock.owner != NULL) \
+ spin_lock_bh(&((__sk)->sk_lock.slock)); \
+ if ((__sk)->sk_lock.owner) \
__lock_sock(__sk); \
- (__sk)->lock.owner = (void *)1; \
- spin_unlock_bh(&((__sk)->lock.slock)); \
+ (__sk)->sk_lock.owner = (void *)1; \
+ spin_unlock_bh(&((__sk)->sk_lock.slock)); \
} while(0)
#define release_sock(__sk) \
-do { spin_lock_bh(&((__sk)->lock.slock)); \
- if ((__sk)->backlog.tail != NULL) \
+do { spin_lock_bh(&((__sk)->sk_lock.slock)); \
+ if ((__sk)->sk_backlog.tail) \
__release_sock(__sk); \
- (__sk)->lock.owner = NULL; \
- if (waitqueue_active(&((__sk)->lock.wq))) wake_up(&((__sk)->lock.wq)); \
- spin_unlock_bh(&((__sk)->lock.slock)); \
+ (__sk)->sk_lock.owner = NULL; \
+ if (waitqueue_active(&((__sk)->sk_lock.wq))) \
+ wake_up(&((__sk)->sk_lock.wq)); \
+ spin_unlock_bh(&((__sk)->sk_lock.slock)); \
} while(0)
/* BH context may only use the following locking interface. */
-#define bh_lock_sock(__sk) spin_lock(&((__sk)->lock.slock))
-#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->lock.slock))
+#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
+#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
extern struct sock * sk_alloc(int family, int priority, int zero_it,
kmem_cache_t *slab);
@@ -547,13 +573,13 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock)
if (err)
return err;
- if (sk->filter) {
+ if (sk->sk_filter) {
struct sk_filter *filter;
if (needlock)
bh_lock_sock(sk);
- filter = sk->filter;
+ filter = sk->sk_filter;
if (filter) {
int pkt_len = sk_run_filter(skb, filter->insns,
filter->len);
@@ -581,7 +607,7 @@ static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp)
{
unsigned int size = sk_filter_len(fp);
- atomic_sub(size, &sk->omem_alloc);
+ atomic_sub(size, &sk->sk_omem_alloc);
if (atomic_dec_and_test(&fp->refcnt))
kfree(fp);
@@ -590,7 +616,7 @@ static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp)
static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
{
atomic_inc(&fp->refcnt);
- atomic_add(sk_filter_len(fp), &sk->omem_alloc);
+ atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
}
/*
@@ -626,7 +652,7 @@ static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
static inline void sock_hold(struct sock *sk)
{
- atomic_inc(&sk->refcnt);
+ atomic_inc(&sk->sk_refcnt);
}
/* Ungrab socket in the context, which assumes that socket refcnt
@@ -634,13 +660,13 @@ static inline void sock_hold(struct sock *sk)
*/
static inline void __sock_put(struct sock *sk)
{
- atomic_dec(&sk->refcnt);
+ atomic_dec(&sk->sk_refcnt);
}
/* Ungrab socket and destroy it, if it was the last reference. */
static inline void sock_put(struct sock *sk)
{
- if (atomic_dec_and_test(&sk->refcnt))
+ if (atomic_dec_and_test(&sk->sk_refcnt))
sk_free(sk);
}
@@ -653,29 +679,29 @@ static inline void sock_put(struct sock *sk)
*/
static inline void sock_orphan(struct sock *sk)
{
- write_lock_bh(&sk->callback_lock);
+ write_lock_bh(&sk->sk_callback_lock);
sock_set_flag(sk, SOCK_DEAD);
- sk->socket = NULL;
- sk->sleep = NULL;
- write_unlock_bh(&sk->callback_lock);
+ sk->sk_socket = NULL;
+ sk->sk_sleep = NULL;
+ write_unlock_bh(&sk->sk_callback_lock);
}
static inline void sock_graft(struct sock *sk, struct socket *parent)
{
- write_lock_bh(&sk->callback_lock);
- sk->sleep = &parent->wait;
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_sleep = &parent->wait;
parent->sk = sk;
- sk->socket = parent;
- write_unlock_bh(&sk->callback_lock);
+ sk->sk_socket = parent;
+ write_unlock_bh(&sk->sk_callback_lock);
}
static inline int sock_i_uid(struct sock *sk)
{
int uid;
- read_lock(&sk->callback_lock);
- uid = sk->socket ? SOCK_INODE(sk->socket)->i_uid : 0;
- read_unlock(&sk->callback_lock);
+ read_lock(&sk->sk_callback_lock);
+ uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
+ read_unlock(&sk->sk_callback_lock);
return uid;
}
@@ -683,16 +709,16 @@ static inline unsigned long sock_i_ino(struct sock *sk)
{
unsigned long ino;
- read_lock(&sk->callback_lock);
- ino = sk->socket ? SOCK_INODE(sk->socket)->i_ino : 0;
- read_unlock(&sk->callback_lock);
+ read_lock(&sk->sk_callback_lock);
+ ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
+ read_unlock(&sk->sk_callback_lock);
return ino;
}
static inline struct dst_entry *
__sk_dst_get(struct sock *sk)
{
- return sk->dst_cache;
+ return sk->sk_dst_cache;
}
static inline struct dst_entry *
@@ -700,11 +726,11 @@ sk_dst_get(struct sock *sk)
{
struct dst_entry *dst;
- read_lock(&sk->dst_lock);
- dst = sk->dst_cache;
+ read_lock(&sk->sk_dst_lock);
+ dst = sk->sk_dst_cache;
if (dst)
dst_hold(dst);
- read_unlock(&sk->dst_lock);
+ read_unlock(&sk->sk_dst_lock);
return dst;
}
@@ -713,17 +739,17 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
struct dst_entry *old_dst;
- old_dst = sk->dst_cache;
- sk->dst_cache = dst;
+ old_dst = sk->sk_dst_cache;
+ sk->sk_dst_cache = dst;
dst_release(old_dst);
}
static inline void
sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
- write_lock(&sk->dst_lock);
+ write_lock(&sk->sk_dst_lock);
__sk_dst_set(sk, dst);
- write_unlock(&sk->dst_lock);
+ write_unlock(&sk->sk_dst_lock);
}
static inline void
@@ -731,26 +757,26 @@ __sk_dst_reset(struct sock *sk)
{
struct dst_entry *old_dst;
- old_dst = sk->dst_cache;
- sk->dst_cache = NULL;
+ old_dst = sk->sk_dst_cache;
+ sk->sk_dst_cache = NULL;
dst_release(old_dst);
}
static inline void
sk_dst_reset(struct sock *sk)
{
- write_lock(&sk->dst_lock);
+ write_lock(&sk->sk_dst_lock);
__sk_dst_reset(sk);
- write_unlock(&sk->dst_lock);
+ write_unlock(&sk->sk_dst_lock);
}
static inline struct dst_entry *
__sk_dst_check(struct sock *sk, u32 cookie)
{
- struct dst_entry *dst = sk->dst_cache;
+ struct dst_entry *dst = sk->sk_dst_cache;
if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
- sk->dst_cache = NULL;
+ sk->sk_dst_cache = NULL;
return NULL;
}
@@ -785,14 +811,14 @@ static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
sock_hold(sk);
skb->sk = sk;
skb->destructor = sock_wfree;
- atomic_add(skb->truesize, &sk->wmem_alloc);
+ atomic_add(skb->truesize, &sk->sk_wmem_alloc);
}
static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
{
skb->sk = sk;
skb->destructor = sock_rfree;
- atomic_add(skb->truesize, &sk->rmem_alloc);
+ atomic_add(skb->truesize, &sk->sk_rmem_alloc);
}
static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
@@ -802,7 +828,8 @@ static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
number of warnings when compiling with -W --ANK
*/
- if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf) {
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf) {
err = -ENOMEM;
goto out;
}
@@ -817,9 +844,9 @@ static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb->dev = NULL;
skb_set_owner_r(skb, sk);
- skb_queue_tail(&sk->receive_queue, skb);
+ skb_queue_tail(&sk->sk_receive_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
- sk->data_ready(sk,skb->len);
+ sk->sk_data_ready(sk, skb->len);
out:
return err;
}
@@ -829,12 +856,13 @@ static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
number of warnings when compiling with -W --ANK
*/
- if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf)
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf)
return -ENOMEM;
skb_set_owner_r(skb, sk);
- skb_queue_tail(&sk->error_queue,skb);
+ skb_queue_tail(&sk->sk_error_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
- sk->data_ready(sk,skb->len);
+ sk->sk_data_ready(sk, skb->len);
return 0;
}
@@ -844,7 +872,7 @@ static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
static inline int sock_error(struct sock *sk)
{
- int err=xchg(&sk->err,0);
+ int err = xchg(&sk->sk_err, 0);
return -err;
}
@@ -852,8 +880,8 @@ static inline unsigned long sock_wspace(struct sock *sk)
{
int amt = 0;
- if (!(sk->shutdown & SEND_SHUTDOWN)) {
- amt = sk->sndbuf - atomic_read(&sk->wmem_alloc);
+ if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
+ amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
if (amt < 0)
amt = 0;
}
@@ -862,8 +890,8 @@ static inline unsigned long sock_wspace(struct sock *sk)
static inline void sk_wake_async(struct sock *sk, int how, int band)
{
- if (sk->socket && sk->socket->fasync_list)
- sock_wake_async(sk->socket, how, band);
+ if (sk->sk_socket && sk->sk_socket->fasync_list)
+ sock_wake_async(sk->sk_socket, how, band);
}
#define SOCK_MIN_SNDBUF 2048
@@ -874,7 +902,7 @@ static inline void sk_wake_async(struct sock *sk, int how, int band)
*/
static inline int sock_writeable(struct sock *sk)
{
- return atomic_read(&sk->wmem_alloc) < (sk->sndbuf / 2);
+ return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);
}
static inline int gfp_any(void)
@@ -884,17 +912,17 @@ static inline int gfp_any(void)
static inline long sock_rcvtimeo(struct sock *sk, int noblock)
{
- return noblock ? 0 : sk->rcvtimeo;
+ return noblock ? 0 : sk->sk_rcvtimeo;
}
static inline long sock_sndtimeo(struct sock *sk, int noblock)
{
- return noblock ? 0 : sk->sndtimeo;
+ return noblock ? 0 : sk->sk_sndtimeo;
}
static inline int sock_rcvlowat(struct sock *sk, int waitall, int len)
{
- return (waitall ? len : min_t(int, sk->rcvlowat, len)) ? : 1;
+ return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
}
/* Alas, with timeout socket operations are not restartable.
@@ -908,10 +936,10 @@ static inline int sock_intr_errno(long timeo)
static __inline__ void
sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
{
- if (sk->rcvtstamp)
+ if (sk->sk_rcvtstamp)
put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(skb->stamp), &skb->stamp);
else
- sk->stamp = skb->stamp;
+ sk->sk_stamp = skb->stamp;
}
/*
@@ -940,11 +968,11 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
#define SOCK_SLEEP_PRE(sk) { struct task_struct *tsk = current; \
DECLARE_WAITQUEUE(wait, tsk); \
tsk->state = TASK_INTERRUPTIBLE; \
- add_wait_queue((sk)->sleep, &wait); \
+ add_wait_queue((sk)->sk_sleep, &wait); \
release_sock(sk);
#define SOCK_SLEEP_POST(sk) tsk->state = TASK_RUNNING; \
- remove_wait_queue((sk)->sleep, &wait); \
+ remove_wait_queue((sk)->sk_sleep, &wait); \
lock_sock(sk); \
}
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 10d509e3ecf0..14530760241e 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -54,7 +54,7 @@ struct tcp_ehash_bucket {
*
* 1) Sockets bound to different interfaces may share a local port.
* Failing that, goto test 2.
- * 2) If all sockets have sk->reuse set, and none of them are in
+ * 2) If all sockets have sk->sk_reuse set, and none of them are in
* TCP_LISTEN state, the port may be shared.
* Failing that, goto test 3.
* 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
@@ -65,12 +65,12 @@ struct tcp_ehash_bucket {
* The interesting point, is test #2. This is what an FTP server does
* all day. To optimize this case we use a specific flag bit defined
* below. As we add sockets to a bind bucket list, we perform a
- * check of: (newsk->reuse && (newsk->state != TCP_LISTEN))
+ * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
* As long as all sockets added to a bind bucket pass this test,
* the flag bit will be set.
* The resulting situation is that tcp_v[46]_verify_bind() can just check
* for this flag bit, if it is set and the socket trying to bind has
- * sk->reuse set, we don't even have to walk the owners list at all,
+ * sk->sk_reuse set, we don't even have to walk the owners list at all,
* we return that it is ok to bind this socket to the requested local port.
*
* Sounds like a lot of work, but it is worth it. In a more naive
@@ -97,7 +97,7 @@ extern struct tcp_hashinfo {
/* This is for sockets with full identity only. Sockets here will
* always be without wildcards and will have the following invariant:
*
- * TCP_ESTABLISHED <= sk->state < TCP_CLOSE
+ * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
*
* First half of the table is for sockets not in TIME_WAIT, second half
* is for TIME_WAIT sockets only.
@@ -165,46 +165,45 @@ extern void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
* without violating the protocol specification.
*/
struct tcp_tw_bucket {
- /* These _must_ match the beginning of struct sock precisely.
- * XXX Yes I know this is gross, but I'd have to edit every single
- * XXX networking file if I created a "struct sock_header". -DaveM
+ /*
+ * Now struct sock also uses sock_common, so please just
+ * don't add nothing before this first member (__tw_common) --acme
*/
- unsigned short family;
- volatile unsigned char state; /* Connection state */
- unsigned char reuse; /* SO_REUSEADDR setting */
- int bound_dev_if;
- /* Main hash linkage for various protocol lookup tables. */
- struct sock *next;
- struct sock **pprev;
- struct sock *bind_next;
- struct sock **bind_pprev;
- atomic_t refcnt;
- /* End of struct sock/struct tcp_tw_bucket shared layout */
- volatile unsigned char substate;
- unsigned char rcv_wscale;
- __u16 sport;
+ struct sock_common __tw_common;
+#define tw_family __tw_common.skc_family
+#define tw_state __tw_common.skc_state
+#define tw_reuse __tw_common.skc_reuse
+#define tw_bound_dev_if __tw_common.skc_bound_dev_if
+#define tw_next __tw_common.skc_next
+#define tw_pprev __tw_common.skc_pprev
+#define tw_bind_next __tw_common.skc_bind_next
+#define tw_bind_pprev __tw_common.skc_bind_pprev
+#define tw_refcnt __tw_common.skc_refcnt
+ volatile unsigned char tw_substate;
+ unsigned char tw_rcv_wscale;
+ __u16 tw_sport;
/* Socket demultiplex comparisons on incoming packets. */
/* these five are in inet_opt */
- __u32 daddr;
- __u32 rcv_saddr;
- __u16 dport;
- __u16 num;
+ __u32 tw_daddr;
+ __u32 tw_rcv_saddr;
+ __u16 tw_dport;
+ __u16 tw_num;
/* And these are ours. */
- int hashent;
- int timeout;
- __u32 rcv_nxt;
- __u32 snd_nxt;
- __u32 rcv_wnd;
- __u32 ts_recent;
- long ts_recent_stamp;
- unsigned long ttd;
- struct tcp_bind_bucket *tb;
- struct tcp_tw_bucket *next_death;
- struct tcp_tw_bucket **pprev_death;
+ int tw_hashent;
+ int tw_timeout;
+ __u32 tw_rcv_nxt;
+ __u32 tw_snd_nxt;
+ __u32 tw_rcv_wnd;
+ __u32 tw_ts_recent;
+ long tw_ts_recent_stamp;
+ unsigned long tw_ttd;
+ struct tcp_bind_bucket *tw_tb;
+ struct tcp_tw_bucket *tw_next_death;
+ struct tcp_tw_bucket **tw_pprev_death;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- struct in6_addr v6_daddr;
- struct in6_addr v6_rcv_saddr;
+ struct in6_addr tw_v6_daddr;
+ struct in6_addr tw_v6_rcv_saddr;
#endif
};
@@ -214,7 +213,7 @@ extern kmem_cache_t *tcp_timewait_cachep;
static inline void tcp_tw_put(struct tcp_tw_bucket *tw)
{
- if (atomic_dec_and_test(&tw->refcnt)) {
+ if (atomic_dec_and_test(&tw->tw_refcnt)) {
#ifdef INET_REFCNT_DEBUG
printk(KERN_DEBUG "tw_bucket %p released\n", tw);
#endif
@@ -249,31 +248,31 @@ extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw);
#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
(((*((__u64 *)&(inet_sk(__sk)->daddr)))== (__cookie)) && \
((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
- (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
+ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
- (((*((__u64 *)&(tcptw_sk(__sk)->daddr)))== (__cookie)) && \
- ((*((__u32 *)&(tcptw_sk(__sk)->dport)))== (__ports)) && \
- (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
+ (((*((__u64 *)&(tcptw_sk(__sk)->tw_daddr))) == (__cookie)) && \
+ ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \
+ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#else /* 32-bit arch */
#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr)
#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
((inet_sk(__sk)->daddr == (__saddr)) && \
(inet_sk(__sk)->rcv_saddr == (__daddr)) && \
((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
- (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
+ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
- ((tcptw_sk(__sk)->daddr == (__saddr)) && \
- (tcptw_sk(__sk)->rcv_saddr == (__daddr)) && \
- ((*((__u32 *)&(tcptw_sk(__sk)->dport)))== (__ports)) && \
- (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
+ ((tcptw_sk(__sk)->tw_daddr == (__saddr)) && \
+ (tcptw_sk(__sk)->tw_rcv_saddr == (__daddr)) && \
+ ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \
+ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#endif /* 64-bit arch */
#define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
(((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
- ((__sk)->family == AF_INET6) && \
+ ((__sk)->sk_family == AF_INET6) && \
!ipv6_addr_cmp(&inet6_sk(__sk)->daddr, (__saddr)) && \
!ipv6_addr_cmp(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \
- (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
+ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
/* These can have wildcards, don't try too hard. */
static __inline__ int tcp_lhashfn(unsigned short num)
@@ -932,7 +931,8 @@ static __inline__ unsigned int tcp_current_mss(struct sock *sk, int large)
{
struct tcp_opt *tp = tcp_sk(sk);
struct dst_entry *dst = __sk_dst_get(sk);
- int mss_now = large && (sk->route_caps&NETIF_F_TSO) && !tp->urg_mode ?
+ int mss_now = large && (sk->sk_route_caps & NETIF_F_TSO) &&
+ !tp->urg_mode ?
tp->mss_cache : tp->mss_cache_std;
if (dst) {
@@ -983,7 +983,7 @@ static inline void tcp_fast_path_check(struct sock *sk, struct tcp_opt *tp)
{
if (skb_queue_len(&tp->out_of_order_queue) == 0 &&
tp->rcv_wnd &&
- atomic_read(&sk->rmem_alloc) < sk->rcvbuf &&
+ atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
!tp->urg_data)
tcp_fast_path_on(tp);
}
@@ -1066,9 +1066,9 @@ struct tcp_skb_cb {
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
#define for_retrans_queue(skb, sk, tp) \
- for (skb = (sk)->write_queue.next; \
+ for (skb = (sk)->sk_write_queue.next; \
(skb != (tp)->send_head) && \
- (skb != (struct sk_buff *)&(sk)->write_queue); \
+ (skb != (struct sk_buff *)&(sk)->sk_write_queue); \
skb=skb->next)
@@ -1080,12 +1080,12 @@ struct tcp_skb_cb {
*/
static inline int tcp_min_write_space(struct sock *sk)
{
- return sk->wmem_queued/2;
+ return sk->sk_wmem_queued / 2;
}
static inline int tcp_wspace(struct sock *sk)
{
- return sk->sndbuf - sk->wmem_queued;
+ return sk->sk_sndbuf - sk->sk_wmem_queued;
}
@@ -1267,7 +1267,7 @@ static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_opt *tp
static __inline__ int tcp_skb_is_last(struct sock *sk, struct sk_buff *skb)
{
- return (skb->next == (struct sk_buff*)&sk->write_queue);
+ return skb->next == (struct sk_buff *)&sk->sk_write_queue;
}
/* Push out any pending frames which were held back due to
@@ -1365,19 +1365,19 @@ static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
if (!sysctl_tcp_low_latency && tp->ucopy.task) {
__skb_queue_tail(&tp->ucopy.prequeue, skb);
tp->ucopy.memory += skb->truesize;
- if (tp->ucopy.memory > sk->rcvbuf) {
+ if (tp->ucopy.memory > sk->sk_rcvbuf) {
struct sk_buff *skb1;
if (sock_owned_by_user(sk)) BUG();
while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
- sk->backlog_rcv(sk, skb1);
+ sk->sk_backlog_rcv(sk, skb1);
NET_INC_STATS_BH(TCPPrequeueDropped);
}
tp->ucopy.memory = 0;
} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
- wake_up_interruptible(sk->sleep);
+ wake_up_interruptible(sk->sk_sleep);
if (!tcp_ack_scheduled(tp))
tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4);
}
@@ -1399,7 +1399,7 @@ static char *statename[]={
static __inline__ void tcp_set_state(struct sock *sk, int state)
{
- int oldstate = sk->state;
+ int oldstate = sk->sk_state;
switch (state) {
case TCP_ESTABLISHED:
@@ -1411,8 +1411,8 @@ static __inline__ void tcp_set_state(struct sock *sk, int state)
if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
TCP_INC_STATS(TcpEstabResets);
- sk->prot->unhash(sk);
- if (sk->prev && !(sk->userlocks&SOCK_BINDPORT_LOCK))
+ sk->sk_prot->unhash(sk);
+ if (sk->sk_prev && !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
tcp_put_port(sk);
/* fall through */
default:
@@ -1423,7 +1423,7 @@ static __inline__ void tcp_set_state(struct sock *sk, int state)
/* Change state AFTER socket is unhashed to avoid closed
* socket sitting in hash tables.
*/
- sk->state = state;
+ sk->sk_state = state;
#ifdef STATE_TRACE
SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
@@ -1435,10 +1435,10 @@ static __inline__ void tcp_done(struct sock *sk)
tcp_set_state(sk, TCP_CLOSE);
tcp_clear_xmit_timers(sk);
- sk->shutdown = SHUTDOWN_MASK;
+ sk->sk_shutdown = SHUTDOWN_MASK;
if (!sock_flag(sk, SOCK_DEAD))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
else
tcp_destroy_sock(sk);
}
@@ -1588,27 +1588,28 @@ static inline int tcp_win_from_space(int space)
/* Note: caller must be prepared to deal with negative returns */
static inline int tcp_space(struct sock *sk)
{
- return tcp_win_from_space(sk->rcvbuf - atomic_read(&sk->rmem_alloc));
+ return tcp_win_from_space(sk->sk_rcvbuf -
+ atomic_read(&sk->sk_rmem_alloc));
}
static inline int tcp_full_space( struct sock *sk)
{
- return tcp_win_from_space(sk->rcvbuf);
+ return tcp_win_from_space(sk->sk_rcvbuf);
}
static inline void tcp_acceptq_removed(struct sock *sk)
{
- sk->ack_backlog--;
+ sk->sk_ack_backlog--;
}
static inline void tcp_acceptq_added(struct sock *sk)
{
- sk->ack_backlog++;
+ sk->sk_ack_backlog++;
}
static inline int tcp_acceptq_is_full(struct sock *sk)
{
- return sk->ack_backlog > sk->max_ack_backlog;
+ return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
}
static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
@@ -1711,15 +1712,15 @@ static __inline__ void tcp_openreq_init(struct open_request *req,
static inline void tcp_free_skb(struct sock *sk, struct sk_buff *skb)
{
tcp_sk(sk)->queue_shrunk = 1;
- sk->wmem_queued -= skb->truesize;
- sk->forward_alloc += skb->truesize;
+ sk->sk_wmem_queued -= skb->truesize;
+ sk->sk_forward_alloc += skb->truesize;
__kfree_skb(skb);
}
static inline void tcp_charge_skb(struct sock *sk, struct sk_buff *skb)
{
- sk->wmem_queued += skb->truesize;
- sk->forward_alloc -= skb->truesize;
+ sk->sk_wmem_queued += skb->truesize;
+ sk->sk_forward_alloc -= skb->truesize;
}
extern void __tcp_mem_reclaim(struct sock *sk);
@@ -1727,7 +1728,7 @@ extern int tcp_mem_schedule(struct sock *sk, int size, int kind);
static inline void tcp_mem_reclaim(struct sock *sk)
{
- if (sk->forward_alloc >= TCP_MEM_QUANTUM)
+ if (sk->sk_forward_alloc >= TCP_MEM_QUANTUM)
__tcp_mem_reclaim(sk);
}
@@ -1741,9 +1742,9 @@ static inline void tcp_enter_memory_pressure(void)
static inline void tcp_moderate_sndbuf(struct sock *sk)
{
- if (!(sk->userlocks&SOCK_SNDBUF_LOCK)) {
- sk->sndbuf = min(sk->sndbuf, sk->wmem_queued/2);
- sk->sndbuf = max(sk->sndbuf, SOCK_MIN_SNDBUF);
+ if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
+ sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);
+ sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
}
}
@@ -1753,7 +1754,7 @@ static inline struct sk_buff *tcp_alloc_pskb(struct sock *sk, int size, int mem,
if (skb) {
skb->truesize += mem;
- if (sk->forward_alloc >= (int)skb->truesize ||
+ if (sk->sk_forward_alloc >= (int)skb->truesize ||
tcp_mem_schedule(sk, skb->truesize, 0)) {
skb_reserve(skb, MAX_TCP_HEADER);
return skb;
@@ -1773,9 +1774,9 @@ static inline struct sk_buff *tcp_alloc_skb(struct sock *sk, int size, int gfp)
static inline struct page * tcp_alloc_page(struct sock *sk)
{
- if (sk->forward_alloc >= (int)PAGE_SIZE ||
+ if (sk->sk_forward_alloc >= (int)PAGE_SIZE ||
tcp_mem_schedule(sk, PAGE_SIZE, 0)) {
- struct page *page = alloc_pages(sk->allocation, 0);
+ struct page *page = alloc_pages(sk->sk_allocation, 0);
if (page)
return page;
}
@@ -1788,7 +1789,7 @@ static inline void tcp_writequeue_purge(struct sock *sk)
{
struct sk_buff *skb;
- while ((skb = __skb_dequeue(&sk->write_queue)) != NULL)
+ while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
tcp_free_skb(sk, skb);
tcp_mem_reclaim(sk);
}
@@ -1799,8 +1800,8 @@ static inline void tcp_set_owner_r(struct sk_buff *skb, struct sock *sk)
{
skb->sk = sk;
skb->destructor = tcp_rfree;
- atomic_add(skb->truesize, &sk->rmem_alloc);
- sk->forward_alloc -= skb->truesize;
+ atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+ sk->sk_forward_alloc -= skb->truesize;
}
extern void tcp_listen_wlock(void);
@@ -1870,10 +1871,10 @@ static inline int tcp_paws_check(struct tcp_opt *tp, int rst)
static inline void tcp_v4_setup_caps(struct sock *sk, struct dst_entry *dst)
{
- sk->route_caps = dst->dev->features;
- if (sk->route_caps & NETIF_F_TSO) {
- if (sk->no_largesend || dst->header_len)
- sk->route_caps &= ~NETIF_F_TSO;
+ sk->sk_route_caps = dst->dev->features;
+ if (sk->sk_route_caps & NETIF_F_TSO) {
+ if (sk->sk_no_largesend || dst->header_len)
+ sk->sk_route_caps &= ~NETIF_F_TSO;
}
}
diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h
index 3785af8bbe74..06105757262d 100644
--- a/include/net/tcp_ecn.h
+++ b/include/net/tcp_ecn.h
@@ -31,10 +31,10 @@ static __inline__ void
TCP_ECN_send_syn(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb)
{
tp->ecn_flags = 0;
- if (sysctl_tcp_ecn && !(sk->route_caps&NETIF_F_TSO)) {
+ if (sysctl_tcp_ecn && !(sk->sk_route_caps & NETIF_F_TSO)) {
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR;
tp->ecn_flags = TCP_ECN_OK;
- sk->no_largesend = 1;
+ sk->sk_no_largesend = 1;
}
}
diff --git a/include/net/udp.h b/include/net/udp.h
index 766330ad75b0..fd43c169bcbe 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -43,10 +43,9 @@ static inline int udp_lport_inuse(u16 num)
{
struct sock *sk = udp_hash[num & (UDP_HTABLE_SIZE - 1)];
- for(; sk != NULL; sk = sk->next) {
+ for (; sk; sk = sk->sk_next)
if (inet_sk(sk)->num == num)
return 1;
- }
return 0;
}
diff --git a/include/net/x25.h b/include/net/x25.h
index 98fb713f3121..9fa00b961475 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -149,7 +149,7 @@ struct x25_opt {
unsigned long vc_facil_mask; /* inc_call facilities mask */
};
-#define x25_sk(__sk) ((struct x25_opt *)(__sk)->protinfo)
+#define x25_sk(__sk) ((struct x25_opt *)(__sk)->sk_protinfo)
/* af_x25.c */
extern int sysctl_x25_restart_request_timeout;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 1a00ffc2e363..21542c6a1f17 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -78,7 +78,7 @@ extern struct semaphore xfrm_cfg_sem;
We add genid to each dst plus pointer to genid of raw IP route,
pmtu disc will update pmtu on raw IP route and increase its genid.
dst_check() will see this for top level and trigger resyncing
- metrics. Plus, it will be made via sk->dst_cache. Solved.
+ metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
*/
/* Full description of state of transformer. */
@@ -586,7 +586,7 @@ extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsi
static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
{
- if (sk && sk->policy[XFRM_POLICY_IN])
+ if (sk && sk->sk_policy[XFRM_POLICY_IN])
return __xfrm_policy_check(sk, dir, skb, family);
return !xfrm_policy_list[dir] ||
@@ -628,7 +628,7 @@ extern int __xfrm_sk_clone_policy(struct sock *sk);
static inline int xfrm_sk_clone_policy(struct sock *sk)
{
- if (unlikely(sk->policy[0] || sk->policy[1]))
+ if (unlikely(sk->sk_policy[0] || sk->sk_policy[1]))
return __xfrm_sk_clone_policy(sk);
return 0;
}
@@ -637,13 +637,13 @@ extern void __xfrm_sk_free_policy(struct xfrm_policy *, int dir);
static inline void xfrm_sk_free_policy(struct sock *sk)
{
- if (unlikely(sk->policy[0] != NULL)) {
- __xfrm_sk_free_policy(sk->policy[0], 0);
- sk->policy[0] = NULL;
+ if (unlikely(sk->sk_policy[0] != NULL)) {
+ __xfrm_sk_free_policy(sk->sk_policy[0], 0);
+ sk->sk_policy[0] = NULL;
}
- if (unlikely(sk->policy[1] != NULL)) {
- __xfrm_sk_free_policy(sk->policy[1], 1);
- sk->policy[1] = NULL;
+ if (unlikely(sk->sk_policy[1] != NULL)) {
+ __xfrm_sk_free_policy(sk->sk_policy[1], 1);
+ sk->sk_policy[1] = NULL;
}
}
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 664baeb8540c..a3c499e67192 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -630,7 +630,7 @@ out_unlock:
sendit:
if (skb->sk)
- skb->priority = skb->sk->priority;
+ skb->priority = skb->sk->sk_priority;
dev_queue_xmit(skb);
sent:
return 1;
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index a21585280eba..4f289c7f2187 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -144,7 +144,7 @@ static __inline__ struct sock *atalk_get_socket_idx(loff_t pos)
{
struct sock *s;
- for (s = atalk_sockets; pos && s; s = s->next)
+ for (s = atalk_sockets; pos && s; s = s->sk_next)
--pos;
return s;
@@ -170,7 +170,7 @@ static void *atalk_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
goto out;
}
i = v;
- i = i->next;
+ i = i->sk_next;
out:
return i;
}
@@ -196,10 +196,11 @@ static int atalk_seq_socket_show(struct seq_file *seq, void *v)
seq_printf(seq, "%02X %04X:%02X:%02X %04X:%02X:%02X %08X:%08X "
"%02X %d\n",
- s->type, ntohs(at->src_net), at->src_node, at->src_port,
+ s->sk_type, ntohs(at->src_net), at->src_node, at->src_port,
ntohs(at->dest_net), at->dest_node, at->dest_port,
- atomic_read(&s->wmem_alloc), atomic_read(&s->rmem_alloc),
- s->state, SOCK_INODE(s->socket)->i_uid);
+ atomic_read(&s->sk_wmem_alloc),
+ atomic_read(&s->sk_rmem_alloc),
+ s->sk_state, SOCK_INODE(s->sk_socket)->i_uid);
out:
return 0;
}
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 69d5988de779..b962e5d847fa 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -92,11 +92,11 @@ rwlock_t atalk_sockets_lock = RW_LOCK_UNLOCKED;
static inline void atalk_insert_socket(struct sock *sk)
{
write_lock_bh(&atalk_sockets_lock);
- sk->next = atalk_sockets;
- if (sk->next)
- atalk_sockets->pprev = &sk->next;
+ sk->sk_next = atalk_sockets;
+ if (sk->sk_next)
+ atalk_sockets->sk_pprev = &sk->sk_next;
atalk_sockets = sk;
- sk->pprev = &atalk_sockets;
+ sk->sk_pprev = &atalk_sockets;
write_unlock_bh(&atalk_sockets_lock);
}
#endif
@@ -104,11 +104,11 @@ static inline void atalk_insert_socket(struct sock *sk)
static inline void atalk_remove_socket(struct sock *sk)
{
write_lock_bh(&atalk_sockets_lock);
- if (sk->pprev) {
- if (sk->next)
- sk->next->pprev = sk->pprev;
- *sk->pprev = sk->next;
- sk->pprev = NULL;
+ if (sk->sk_pprev) {
+ if (sk->sk_next)
+ sk->sk_next->sk_pprev = sk->sk_pprev;
+ *sk->sk_pprev = sk->sk_next;
+ sk->sk_pprev = NULL;
}
write_unlock_bh(&atalk_sockets_lock);
}
@@ -119,7 +119,7 @@ static struct sock *atalk_search_socket(struct sockaddr_at *to,
struct sock *s;
read_lock_bh(&atalk_sockets_lock);
- for (s = atalk_sockets; s; s = s->next) {
+ for (s = atalk_sockets; s; s = s->sk_next) {
struct atalk_sock *at = at_sk(s);
if (to->sat_port != at->src_port)
@@ -165,7 +165,7 @@ static struct sock *atalk_find_or_insert_socket(struct sock *sk,
struct sock *s;
write_lock_bh(&atalk_sockets_lock);
- for (s = atalk_sockets; s; s = s->next) {
+ for (s = atalk_sockets; s; s = s->sk_next) {
struct atalk_sock *at = at_sk(s);
if (at->src_net == sat->sat_addr.s_net &&
@@ -176,11 +176,11 @@ static struct sock *atalk_find_or_insert_socket(struct sock *sk,
if (!s) {
/* Wheee, it's free, assign and insert. */
- sk->next = atalk_sockets;
- if (sk->next)
- atalk_sockets->pprev = &sk->next;
+ sk->sk_next = atalk_sockets;
+ if (sk->sk_next)
+ atalk_sockets->sk_pprev = &sk->sk_next;
atalk_sockets = sk;
- sk->pprev = &atalk_sockets;
+ sk->sk_pprev = &atalk_sockets;
}
write_unlock_bh(&atalk_sockets_lock);
@@ -191,29 +191,29 @@ static void atalk_destroy_timer(unsigned long data)
{
struct sock *sk = (struct sock *)data;
- if (!atomic_read(&sk->wmem_alloc) &&
- !atomic_read(&sk->rmem_alloc) && sock_flag(sk, SOCK_DEAD))
+ if (!atomic_read(&sk->sk_wmem_alloc) &&
+ !atomic_read(&sk->sk_rmem_alloc) && sock_flag(sk, SOCK_DEAD))
sock_put(sk);
else {
- sk->timer.expires = jiffies + SOCK_DESTROY_TIME;
- add_timer(&sk->timer);
+ sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME;
+ add_timer(&sk->sk_timer);
}
}
static inline void atalk_destroy_socket(struct sock *sk)
{
atalk_remove_socket(sk);
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
- if (!atomic_read(&sk->wmem_alloc) &&
- !atomic_read(&sk->rmem_alloc) && sock_flag(sk, SOCK_DEAD))
+ if (!atomic_read(&sk->sk_wmem_alloc) &&
+ !atomic_read(&sk->sk_rmem_alloc) && sock_flag(sk, SOCK_DEAD))
sock_put(sk);
else {
- init_timer(&sk->timer);
- sk->timer.expires = jiffies + SOCK_DESTROY_TIME;
- sk->timer.function = atalk_destroy_timer;
- sk->timer.data = (unsigned long) sk;
- add_timer(&sk->timer);
+ init_timer(&sk->sk_timer);
+ sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME;
+ sk->sk_timer.function = atalk_destroy_timer;
+ sk->sk_timer.data = (unsigned long)sk;
+ add_timer(&sk->sk_timer);
}
}
@@ -992,7 +992,7 @@ static int atalk_create(struct socket *sock, int protocol)
sock->ops = &atalk_dgram_ops;
sock_init_data(sock, sk);
/* Checksums on by default */
- sk->zapped = 1;
+ sk->sk_zapped = 1;
out:
return rc;
outsk:
@@ -1007,7 +1007,7 @@ static int atalk_release(struct socket *sock)
if (sk) {
if (!sock_flag(sk, SOCK_DEAD)) {
- sk->state_change(sk);
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
sock->sk = NULL;
@@ -1036,7 +1036,7 @@ static int atalk_pick_and_bind_port(struct sock *sk, struct sockaddr_at *sat)
for (sat->sat_port = ATPORT_RESERVED;
sat->sat_port < ATPORT_LAST;
sat->sat_port++) {
- for (s = atalk_sockets; s; s = s->next) {
+ for (s = atalk_sockets; s; s = s->sk_next) {
struct atalk_sock *at = at_sk(s);
if (at->src_net == sat->sat_addr.s_net &&
@@ -1046,11 +1046,11 @@ static int atalk_pick_and_bind_port(struct sock *sk, struct sockaddr_at *sat)
}
/* Wheee, it's free, assign and insert. */
- sk->next = atalk_sockets;
- if (sk->next)
- atalk_sockets->pprev = &sk->next;
+ sk->sk_next = atalk_sockets;
+ if (sk->sk_next)
+ atalk_sockets->sk_pprev = &sk->sk_next;
atalk_sockets = sk;
- sk->pprev = &atalk_sockets;
+ sk->sk_pprev = &atalk_sockets;
at_sk(sk)->src_port = sat->sat_port;
retval = 0;
goto out;
@@ -1079,7 +1079,7 @@ static int atalk_autobind(struct sock *sk)
n = atalk_pick_and_bind_port(sk, &sat);
if (!n)
- sk->zapped = 0;
+ sk->sk_zapped = 0;
out:
return n;
}
@@ -1091,7 +1091,7 @@ static int atalk_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct sock *sk = sock->sk;
struct atalk_sock *at = at_sk(sk);
- if (!sk->zapped || addr_len != sizeof(struct sockaddr_at))
+ if (!sk->sk_zapped || addr_len != sizeof(struct sockaddr_at))
return -EINVAL;
if (addr->sat_family != AF_APPLETALK)
@@ -1126,7 +1126,7 @@ static int atalk_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
return -EADDRINUSE;
}
- sk->zapped = 0;
+ sk->sk_zapped = 0;
return 0;
}
@@ -1138,7 +1138,7 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr,
struct atalk_sock *at = at_sk(sk);
struct sockaddr_at *addr;
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
if (addr_len != sizeof(*addr))
@@ -1161,7 +1161,7 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr,
#endif
}
- if (sk->zapped)
+ if (sk->sk_zapped)
if (atalk_autobind(sk) < 0)
return -EBUSY;
@@ -1172,8 +1172,8 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr,
at->dest_net = addr->sat_addr.s_net;
at->dest_node = addr->sat_addr.s_node;
- sock->state = SS_CONNECTED;
- sk->state = TCP_ESTABLISHED;
+ sock->state = SS_CONNECTED;
+ sk->sk_state = TCP_ESTABLISHED;
return 0;
}
@@ -1188,14 +1188,14 @@ static int atalk_getname(struct socket *sock, struct sockaddr *uaddr,
struct sock *sk = sock->sk;
struct atalk_sock *at = at_sk(sk);
- if (sk->zapped)
+ if (sk->sk_zapped)
if (atalk_autobind(sk) < 0)
return -ENOBUFS;
*uaddr_len = sizeof(struct sockaddr_at);
if (peer) {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
sat.sat_addr.s_net = at->dest_net;
@@ -1506,7 +1506,7 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
return -EMSGSIZE;
if (usat) {
- if (sk->zapped)
+ if (sk->sk_zapped)
if (atalk_autobind(sk) < 0)
return -EBUSY;
@@ -1524,7 +1524,7 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
#endif
}
} else {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
usat = &local_satalk;
usat->sat_family = AF_APPLETALK;
@@ -1599,7 +1599,7 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
return -EFAULT;
}
- if (sk->no_check == 1)
+ if (sk->sk_no_check == 1)
ddp->deh_sum = 0;
else
ddp->deh_sum = atalk_checksum(ddp, len + sizeof(*ddp));
@@ -1661,7 +1661,7 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
ddp = ddp_hdr(skb);
*((__u16 *)&ddphv) = ntohs(*((__u16 *)ddp));
- if (sk->type == SOCK_RAW) {
+ if (sk->sk_type == SOCK_RAW) {
copied = ddphv.deh_len;
if (copied > size) {
copied = size;
@@ -1705,7 +1705,8 @@ static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
switch (cmd) {
/* Protocol layer */
case TIOCOUTQ: {
- long amount = sk->sndbuf - atomic_read(&sk->wmem_alloc);
+ long amount = sk->sk_sndbuf -
+ atomic_read(&sk->sk_wmem_alloc);
if (amount < 0)
amount = 0;
@@ -1717,7 +1718,7 @@ static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
* These two are safe on a single CPU system as only
* user tasks fiddle here
*/
- struct sk_buff *skb = skb_peek(&sk->receive_queue);
+ struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
long amount = 0;
if (skb)
@@ -1729,9 +1730,9 @@ static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
if (!sk)
break;
rc = -ENOENT;
- if (!sk->stamp.tv_sec)
+ if (!sk->sk_stamp.tv_sec)
break;
- rc = copy_to_user((void *)arg, &sk->stamp,
+ rc = copy_to_user((void *)arg, &sk->sk_stamp,
sizeof(struct timeval)) ? -EFAULT : 0;
break;
/* Routing */
diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
index d4c801979e1f..9f6c4703c5a9 100644
--- a/net/atm/atm_misc.c
+++ b/net/atm/atm_misc.c
@@ -16,7 +16,8 @@
int atm_charge(struct atm_vcc *vcc,int truesize)
{
atm_force_charge(vcc,truesize);
- if (atomic_read(&vcc->sk->rmem_alloc) <= vcc->sk->rcvbuf) return 1;
+ if (atomic_read(&vcc->sk->sk_rmem_alloc) <= vcc->sk->sk_rcvbuf)
+ return 1;
atm_return(vcc,truesize);
atomic_inc(&vcc->stats->rx_drop);
return 0;
@@ -29,11 +30,12 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
int guess = atm_guess_pdu2truesize(pdu_size);
atm_force_charge(vcc,guess);
- if (atomic_read(&vcc->sk->rmem_alloc) <= vcc->sk->rcvbuf) {
+ if (atomic_read(&vcc->sk->sk_rmem_alloc) <= vcc->sk->sk_rcvbuf) {
struct sk_buff *skb = alloc_skb(pdu_size,gfp_flags);
if (skb) {
- atomic_add(skb->truesize-guess,&vcc->sk->rmem_alloc);
+ atomic_add(skb->truesize-guess,
+ &vcc->sk->sk_rmem_alloc);
return skb;
}
}
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 1dd4ba2a7ed4..4b6f2440a549 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -188,7 +188,7 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct br2684_dev *brdev,
dev_kfree_skb(skb);
return 0;
}
- atomic_add(skb->truesize, &atmvcc->sk->wmem_alloc);
+ atomic_add(skb->truesize, &atmvcc->sk->sk_wmem_alloc);
ATM_SKB(skb)->atm_options = atmvcc->atm_options;
brdev->stats.tx_packets++;
brdev->stats.tx_bytes += skb->len;
@@ -551,7 +551,7 @@ Note: we do not have explicit unassign, but look at _push()
barrier();
atmvcc->push = br2684_push;
skb_queue_head_init(&copy);
- skb_migrate(&atmvcc->sk->receive_queue, &copy);
+ skb_migrate(&atmvcc->sk->sk_receive_queue, &copy);
while ((skb = skb_dequeue(&copy))) {
BRPRIV(skb->dev)->stats.rx_bytes -= skb->len;
BRPRIV(skb->dev)->stats.rx_packets--;
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 5a89132fec8c..2071c3ad3cd1 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -66,7 +66,7 @@ static int to_atmarpd(enum atmarp_ctrl_type type,int itf,unsigned long ip)
ctrl->itf_num = itf;
ctrl->ip = ip;
atm_force_charge(atmarpd,skb->truesize);
- skb_queue_tail(&atmarpd->sk->receive_queue,skb);
+ skb_queue_tail(&atmarpd->sk->sk_receive_queue, skb);
wake_up(&atmarpd->sleep);
return 0;
}
@@ -435,7 +435,7 @@ static int clip_start_xmit(struct sk_buff *skb,struct net_device *dev)
memcpy(here,llc_oui,sizeof(llc_oui));
((u16 *) here)[3] = skb->protocol;
}
- atomic_add(skb->truesize,&vcc->sk->wmem_alloc);
+ atomic_add(skb->truesize, &vcc->sk->sk_wmem_alloc);
ATM_SKB(skb)->atm_options = vcc->atm_options;
entry->vccs->last_use = jiffies;
DPRINTK("atm_skb(%p)->vcc(%p)->dev(%p)\n",skb,vcc,vcc->dev);
@@ -493,7 +493,7 @@ static int clip_mkip(struct atm_vcc *vcc,int timeout)
vcc->push = clip_push;
vcc->pop = clip_pop;
skb_queue_head_init(&copy);
- skb_migrate(&vcc->sk->receive_queue,&copy);
+ skb_migrate(&vcc->sk->sk_receive_queue, &copy);
/* re-process everything received between connection setup and MKIP */
while ((skb = skb_dequeue(&copy)))
if (!clip_devs) {
@@ -699,10 +699,10 @@ static void atmarpd_close(struct atm_vcc *vcc)
barrier();
unregister_inetaddr_notifier(&clip_inet_notifier);
unregister_netdevice_notifier(&clip_dev_notifier);
- if (skb_peek(&vcc->sk->receive_queue))
+ if (skb_peek(&vcc->sk->sk_receive_queue))
printk(KERN_ERR "atmarpd_close: closing with requests "
"pending\n");
- skb_queue_purge(&vcc->sk->receive_queue);
+ skb_queue_purge(&vcc->sk->sk_receive_queue);
DPRINTK("(done)\n");
module_put(THIS_MODULE);
}
diff --git a/net/atm/common.c b/net/atm/common.c
index 22b01324c0ac..d449fd9aff87 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -161,14 +161,16 @@ static struct sk_buff *alloc_tx(struct atm_vcc *vcc,unsigned int size)
{
struct sk_buff *skb;
- if (atomic_read(&vcc->sk->wmem_alloc) && !atm_may_send(vcc,size)) {
+ if (atomic_read(&vcc->sk->sk_wmem_alloc) && !atm_may_send(vcc, size)) {
DPRINTK("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n",
- atomic_read(&vcc->sk->wmem_alloc),size,vcc->sk->sndbuf);
+ atomic_read(&vcc->sk->sk_wmem_alloc), size,
+ vcc->sk->sk_sndbuf);
return NULL;
}
while (!(skb = alloc_skb(size,GFP_KERNEL))) schedule();
- DPRINTK("AlTx %d += %d\n",atomic_read(&vcc->sk->wmem_alloc),skb->truesize);
- atomic_add(skb->truesize, &vcc->sk->wmem_alloc);
+ DPRINTK("AlTx %d += %d\n", atomic_read(&vcc->sk->sk_wmem_alloc),
+ skb->truesize);
+ atomic_add(skb->truesize, &vcc->sk->sk_wmem_alloc);
return skb;
}
@@ -188,15 +190,15 @@ int atm_create(struct socket *sock,int protocol,int family)
memset(&vcc->local,0,sizeof(struct sockaddr_atmsvc));
memset(&vcc->remote,0,sizeof(struct sockaddr_atmsvc));
vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */
- atomic_set(&vcc->sk->wmem_alloc,0);
- atomic_set(&vcc->sk->rmem_alloc,0);
+ atomic_set(&vcc->sk->sk_wmem_alloc, 0);
+ atomic_set(&vcc->sk->sk_rmem_alloc, 0);
vcc->push = NULL;
vcc->pop = NULL;
vcc->push_oam = NULL;
vcc->vpi = vcc->vci = 0; /* no VCI/VPI yet */
vcc->atm_options = vcc->aal_options = 0;
init_waitqueue_head(&vcc->sleep);
- sk->sleep = &vcc->sleep;
+ sk->sk_sleep = &vcc->sleep;
sock->sk = sk;
return 0;
}
@@ -211,17 +213,17 @@ void atm_release_vcc_sk(struct sock *sk,int free_sk)
if (vcc->dev) {
if (vcc->dev->ops->close) vcc->dev->ops->close(vcc);
if (vcc->push) vcc->push(vcc,NULL); /* atmarpd has no push */
- while ((skb = skb_dequeue(&vcc->sk->receive_queue))) {
+ while ((skb = skb_dequeue(&vcc->sk->sk_receive_queue))) {
atm_return(vcc,skb->truesize);
kfree_skb(skb);
}
module_put(vcc->dev->ops->owner);
atm_dev_release(vcc->dev);
- if (atomic_read(&vcc->sk->rmem_alloc))
+ if (atomic_read(&vcc->sk->sk_rmem_alloc))
printk(KERN_WARNING "atm_release_vcc: strange ... "
"rmem_alloc == %d after closing\n",
- atomic_read(&vcc->sk->rmem_alloc));
+ atomic_read(&vcc->sk->sk_rmem_alloc));
bind_vcc(vcc,NULL);
}
@@ -431,7 +433,7 @@ int atm_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
add_wait_queue(&vcc->sleep,&wait);
set_current_state(TASK_INTERRUPTIBLE);
error = 1; /* <= 0 is error */
- while (!(skb = skb_dequeue(&vcc->sk->receive_queue))) {
+ while (!(skb = skb_dequeue(&vcc->sk->sk_receive_queue))) {
if (test_bit(ATM_VF_RELEASED,&vcc->flags) ||
test_bit(ATM_VF_CLOSE,&vcc->flags)) {
error = vcc->reply;
@@ -462,7 +464,8 @@ int atm_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
if (vcc->dev->ops->feedback)
vcc->dev->ops->feedback(vcc,skb,(unsigned long) skb->data,
(unsigned long) buff,eff_len);
- DPRINTK("RcvM %d -= %d\n",atomic_read(&vcc->sk->rmem_alloc),skb->truesize);
+ DPRINTK("RcvM %d -= %d\n", atomic_read(&vcc->sk->sk_rmem_alloc),
+ skb->truesize);
atm_return(vcc,skb->truesize);
error = copy_to_user(buff,skb->data,eff_len) ? -EFAULT : 0;
kfree_skb(skb);
@@ -541,14 +544,15 @@ unsigned int atm_poll(struct file *file,struct socket *sock,poll_table *wait)
vcc = ATM_SD(sock);
poll_wait(file,&vcc->sleep,wait);
mask = 0;
- if (skb_peek(&vcc->sk->receive_queue))
+ if (skb_peek(&vcc->sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
if (test_bit(ATM_VF_RELEASED,&vcc->flags) ||
test_bit(ATM_VF_CLOSE,&vcc->flags))
mask |= POLLHUP;
if (sock->state != SS_CONNECTING) {
if (vcc->qos.txtp.traffic_class != ATM_NONE &&
- vcc->qos.txtp.max_sdu+atomic_read(&vcc->sk->wmem_alloc) <= vcc->sk->sndbuf)
+ vcc->qos.txtp.max_sdu +
+ atomic_read(&vcc->sk->sk_wmem_alloc) <= vcc->sk->sk_sndbuf)
mask |= POLLOUT | POLLWRNORM;
}
else if (vcc->reply != WAITING) {
@@ -613,8 +617,8 @@ int atm_ioctl(struct socket *sock,unsigned int cmd,unsigned long arg)
ret_val = -EINVAL;
goto done;
}
- ret_val = put_user(vcc->sk->sndbuf-
- atomic_read(&vcc->sk->wmem_alloc),
+ ret_val = put_user(vcc->sk->sk_sndbuf -
+ atomic_read(&vcc->sk->sk_wmem_alloc),
(int *) arg) ? -EFAULT : 0;
goto done;
case SIOCINQ:
@@ -625,7 +629,7 @@ int atm_ioctl(struct socket *sock,unsigned int cmd,unsigned long arg)
ret_val = -EINVAL;
goto done;
}
- skb = skb_peek(&vcc->sk->receive_queue);
+ skb = skb_peek(&vcc->sk->sk_receive_queue);
ret_val = put_user(skb ? skb->len : 0,(int *) arg)
? -EFAULT : 0;
goto done;
@@ -668,11 +672,11 @@ int atm_ioctl(struct socket *sock,unsigned int cmd,unsigned long arg)
kfree(tmp_buf);
goto done;
case SIOCGSTAMP: /* borrowed from IP */
- if (!vcc->sk->stamp.tv_sec) {
+ if (!vcc->sk->sk_stamp.tv_sec) {
ret_val = -ENOENT;
goto done;
}
- ret_val = copy_to_user((void *) arg, &vcc->sk->stamp,
+ ret_val = copy_to_user((void *)arg, &vcc->sk->sk_stamp,
sizeof(struct timeval)) ? -EFAULT : 0;
goto done;
case ATM_SETSC:
@@ -1078,7 +1082,7 @@ static int atm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos)
if (!error) error = adjust_tp(&qos->rxtp,qos->aal);
if (error) return error;
if (!vcc->dev->ops->change_qos) return -EOPNOTSUPP;
- if (vcc->sk->family == AF_ATMPVC)
+ if (vcc->sk->sk_family == AF_ATMPVC)
return vcc->dev->ops->change_qos(vcc,qos,ATM_MF_SET);
return svc_change_qos(vcc,qos);
}
diff --git a/net/atm/lec.c b/net/atm/lec.c
index fd448c28c795..35213ce8966d 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -133,7 +133,7 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
priv = (struct lec_priv *)dev->priv;
atm_force_charge(priv->lecd, skb2->truesize);
- skb_queue_tail(&priv->lecd->sk->receive_queue, skb2);
+ skb_queue_tail(&priv->lecd->sk->sk_receive_queue, skb2);
wake_up(&priv->lecd->sleep);
}
@@ -210,7 +210,7 @@ static __inline__ void
lec_send(struct atm_vcc *vcc, struct sk_buff *skb, struct lec_priv *priv)
{
if (atm_may_send(vcc, skb->len)) {
- atomic_add(skb->truesize, &vcc->sk->wmem_alloc);
+ atomic_add(skb->truesize, &vcc->sk->sk_wmem_alloc);
ATM_SKB(skb)->vcc = vcc;
ATM_SKB(skb)->atm_options = vcc->atm_options;
priv->stats.tx_packets++;
@@ -406,7 +406,7 @@ lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
int i;
char *tmp; /* FIXME */
- atomic_sub(skb->truesize, &vcc->sk->wmem_alloc);
+ atomic_sub(skb->truesize, &vcc->sk->sk_wmem_alloc);
mesg = (struct atmlec_msg *)skb->data;
tmp = skb->data;
tmp += sizeof(struct atmlec_msg);
@@ -512,7 +512,7 @@ lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
skb2->len = sizeof(struct atmlec_msg);
memcpy(skb2->data, mesg, sizeof(struct atmlec_msg));
atm_force_charge(priv->lecd, skb2->truesize);
- skb_queue_tail(&priv->lecd->sk->receive_queue, skb2);
+ skb_queue_tail(&priv->lecd->sk->sk_receive_queue, skb2);
wake_up(&priv->lecd->sleep);
}
if (f != NULL) br_fdb_put_hook(f);
@@ -541,10 +541,10 @@ lec_atm_close(struct atm_vcc *vcc)
netif_stop_queue(dev);
lec_arp_destroy(priv);
- if (skb_peek(&vcc->sk->receive_queue))
+ if (skb_peek(&vcc->sk->sk_receive_queue))
printk("%s lec_atm_close: closing with messages pending\n",
dev->name);
- while ((skb = skb_dequeue(&vcc->sk->receive_queue))) {
+ while ((skb = skb_dequeue(&vcc->sk->sk_receive_queue))) {
atm_return(vcc, skb->truesize);
dev_kfree_skb(skb);
}
@@ -597,13 +597,13 @@ send_to_lecd(struct lec_priv *priv, atmlec_msg_type type,
memcpy(&mesg->content.normal.atm_addr, atm_addr, ATM_ESA_LEN);
atm_force_charge(priv->lecd, skb->truesize);
- skb_queue_tail(&priv->lecd->sk->receive_queue, skb);
+ skb_queue_tail(&priv->lecd->sk->sk_receive_queue, skb);
wake_up(&priv->lecd->sleep);
if (data != NULL) {
DPRINTK("lec: about to send %d bytes of data\n", data->len);
atm_force_charge(priv->lecd, data->truesize);
- skb_queue_tail(&priv->lecd->sk->receive_queue, data);
+ skb_queue_tail(&priv->lecd->sk->sk_receive_queue, data);
wake_up(&priv->lecd->sleep);
}
@@ -685,7 +685,7 @@ lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
#endif /* DUMP_PACKETS > 0 */
if (memcmp(skb->data, lec_ctrl_magic, 4) ==0) { /* Control frame, to daemon*/
DPRINTK("%s: To daemon\n",dev->name);
- skb_queue_tail(&vcc->sk->receive_queue, skb);
+ skb_queue_tail(&vcc->sk->sk_receive_queue, skb);
wake_up(&vcc->sleep);
} else { /* Data frame, queue to protocol handlers */
unsigned char *dst;
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 683c5dc1b745..34ae3e956ad9 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -523,7 +523,7 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
memcpy(skb->data, &llc_snap_mpoa_data, sizeof(struct llc_snap_hdr));
}
- atomic_add(skb->truesize, &entry->shortcut->sk->wmem_alloc);
+ atomic_add(skb->truesize, &entry->shortcut->sk->sk_wmem_alloc);
ATM_SKB(skb)->atm_options = entry->shortcut->atm_options;
entry->shortcut->send(entry->shortcut, skb);
entry->packets_fwded++;
@@ -667,7 +667,8 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
skb->dev = dev;
if (memcmp(skb->data, &llc_snap_mpoa_ctrl, sizeof(struct llc_snap_hdr)) == 0) {
dprintk("mpoa: (%s) mpc_push: control packet arrived\n", dev->name);
- skb_queue_tail(&vcc->sk->receive_queue, skb); /* Pass control packets to daemon */
+ /* Pass control packets to daemon */
+ skb_queue_tail(&vcc->sk->sk_receive_queue, skb);
wake_up(&vcc->sleep);
return;
}
@@ -847,7 +848,7 @@ static void mpoad_close(struct atm_vcc *vcc)
mpc->in_ops->destroy_cache(mpc);
mpc->eg_ops->destroy_cache(mpc);
- while ( (skb = skb_dequeue(&vcc->sk->receive_queue)) ){
+ while ((skb = skb_dequeue(&vcc->sk->sk_receive_queue))) {
atm_return(vcc, skb->truesize);
kfree_skb(skb);
}
@@ -867,7 +868,7 @@ static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb)
struct mpoa_client *mpc = find_mpc_by_vcc(vcc);
struct k_message *mesg = (struct k_message*)skb->data;
- atomic_sub(skb->truesize, &vcc->sk->wmem_alloc);
+ atomic_sub(skb->truesize, &vcc->sk->sk_wmem_alloc);
if (mpc == NULL) {
printk("mpoa: msg_from_mpoad: no mpc found\n");
@@ -944,7 +945,7 @@ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc)
skb_put(skb, sizeof(struct k_message));
memcpy(skb->data, mesg, sizeof(struct k_message));
atm_force_charge(mpc->mpoad_vcc, skb->truesize);
- skb_queue_tail(&mpc->mpoad_vcc->sk->receive_queue, skb);
+ skb_queue_tail(&mpc->mpoad_vcc->sk->sk_receive_queue, skb);
wake_up(&mpc->mpoad_vcc->sleep);
return 0;
@@ -1223,7 +1224,7 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
purge_msg->content.eg_info = entry->ctrl_info;
atm_force_charge(vcc, skb->truesize);
- skb_queue_tail(&vcc->sk->receive_queue, skb);
+ skb_queue_tail(&vcc->sk->sk_receive_queue, skb);
wake_up(&vcc->sleep);
dprintk("mpoa: purge_egress_shortcut: exiting:\n");
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index 31d0856f4a66..9e96bca45574 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -231,7 +231,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
kfree_skb(skb);
return 1;
}
- atomic_add(skb->truesize, &ATM_SKB(skb)->vcc->sk->wmem_alloc);
+ atomic_add(skb->truesize, &ATM_SKB(skb)->vcc->sk->sk_wmem_alloc);
ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
DPRINTK("(unit %d): atm_skb(%p)->vcc(%p)->dev(%p)\n",
pvcc->chan.unit, skb, ATM_SKB(skb)->vcc,
diff --git a/net/atm/proc.c b/net/atm/proc.c
index fa85563831c6..3627f71a11c5 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -136,7 +136,7 @@ static void atmarp_info(struct net_device *dev,struct atmarp_entry *entry,
unsigned char *ip;
int svc,off,ip_len;
- svc = !clip_vcc || clip_vcc->vcc->sk->family == AF_ATMSVC;
+ svc = !clip_vcc || clip_vcc->vcc->sk->sk_family == AF_ATMSVC;
off = sprintf(buf,"%-6s%-4s%-4s%5ld ",dev->name,svc ? "SVC" : "PVC",
!clip_vcc || clip_vcc->encap ? "LLC" : "NULL",
(jiffies-(clip_vcc ? clip_vcc->last_use : entry->neigh->used))/
@@ -213,7 +213,7 @@ static void vc_info(struct atm_vcc *vcc,char *buf)
if (!vcc->dev) here += sprintf(here,"Unassigned ");
else here += sprintf(here,"%3d %3d %5d ",vcc->dev->number,vcc->vpi,
vcc->vci);
- switch (vcc->sk->family) {
+ switch (vcc->sk->sk_family) {
case AF_ATMPVC:
here += sprintf(here,"PVC");
break;
@@ -221,12 +221,12 @@ static void vc_info(struct atm_vcc *vcc,char *buf)
here += sprintf(here,"SVC");
break;
default:
- here += sprintf(here,"%3d",vcc->sk->family);
+ here += sprintf(here, "%3d", vcc->sk->sk_family);
}
here += sprintf(here," %04lx %5d %7d/%7d %7d/%7d\n",vcc->flags,
vcc->reply,
- atomic_read(&vcc->sk->wmem_alloc),vcc->sk->sndbuf,
- atomic_read(&vcc->sk->rmem_alloc),vcc->sk->rcvbuf);
+ atomic_read(&vcc->sk->sk_wmem_alloc), vcc->sk->sk_sndbuf,
+ atomic_read(&vcc->sk->sk_rmem_alloc), vcc->sk->sk_rcvbuf);
}
@@ -354,7 +354,8 @@ static int atm_pvc_info(loff_t pos,char *buf)
dev = list_entry(p, struct atm_dev, dev_list);
spin_lock_irqsave(&dev->lock, flags);
for (vcc = dev->vccs; vcc; vcc = vcc->next)
- if (vcc->sk->family == PF_ATMPVC && vcc->dev && !left--) {
+ if (vcc->sk->sk_family == PF_ATMPVC &&
+ vcc->dev && !left--) {
pvc_info(vcc,buf,clip_info);
spin_unlock_irqrestore(&dev->lock, flags);
spin_unlock(&atm_dev_lock);
@@ -423,7 +424,7 @@ static int atm_svc_info(loff_t pos,char *buf)
dev = list_entry(p, struct atm_dev, dev_list);
spin_lock_irqsave(&dev->lock, flags);
for (vcc = dev->vccs; vcc; vcc = vcc->next)
- if (vcc->sk->family == PF_ATMSVC && !left--) {
+ if (vcc->sk->sk_family == PF_ATMSVC && !left--) {
svc_info(vcc,buf);
spin_unlock_irqrestore(&dev->lock, flags);
spin_unlock(&atm_dev_lock);
diff --git a/net/atm/raw.c b/net/atm/raw.c
index b32e9929c621..d516efa0acbb 100644
--- a/net/atm/raw.c
+++ b/net/atm/raw.c
@@ -28,7 +28,7 @@
void atm_push_raw(struct atm_vcc *vcc,struct sk_buff *skb)
{
if (skb) {
- skb_queue_tail(&vcc->sk->receive_queue,skb);
+ skb_queue_tail(&vcc->sk->sk_receive_queue, skb);
wake_up(&vcc->sleep);
}
}
@@ -36,8 +36,9 @@ void atm_push_raw(struct atm_vcc *vcc,struct sk_buff *skb)
static void atm_pop_raw(struct atm_vcc *vcc,struct sk_buff *skb)
{
- DPRINTK("APopR (%d) %d -= %d\n",vcc->vci,vcc->sk->wmem_alloc,skb->truesize);
- atomic_sub(skb->truesize, &vcc->sk->wmem_alloc);
+ DPRINTK("APopR (%d) %d -= %d\n", vcc->vci, vcc->sk->sk_wmem_alloc,
+ skb->truesize);
+ atomic_sub(skb->truesize, &vcc->sk->sk_wmem_alloc);
dev_kfree_skb_any(skb);
wake_up(&vcc->sleep);
}
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 52c4e11161e4..9849062cea2e 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -60,7 +60,7 @@ static void sigd_put_skb(struct sk_buff *skb)
}
#endif
atm_force_charge(sigd,skb->truesize);
- skb_queue_tail(&sigd->sk->receive_queue,skb);
+ skb_queue_tail(&sigd->sk->sk_receive_queue,skb);
wake_up(&sigd->sleep);
}
@@ -97,7 +97,7 @@ static int sigd_send(struct atm_vcc *vcc,struct sk_buff *skb)
struct atm_vcc *session_vcc;
msg = (struct atmsvc_msg *) skb->data;
- atomic_sub(skb->truesize, &vcc->sk->wmem_alloc);
+ atomic_sub(skb->truesize, &vcc->sk->sk_wmem_alloc);
DPRINTK("sigd_send %d (0x%lx)\n",(int) msg->type,
(unsigned long) msg->vcc);
vcc = *(struct atm_vcc **) &msg->vcc;
@@ -128,12 +128,13 @@ static int sigd_send(struct atm_vcc *vcc,struct sk_buff *skb)
case as_indicate:
vcc = *(struct atm_vcc **) &msg->listen_vcc;
DPRINTK("as_indicate!!!\n");
- if (vcc->sk->ack_backlog == vcc->sk->max_ack_backlog) {
+ if (vcc->sk->sk_ack_backlog ==
+ vcc->sk->sk_max_ack_backlog) {
sigd_enq(0,as_reject,vcc,NULL,NULL);
return 0;
}
- vcc->sk->ack_backlog++;
- skb_queue_tail(&vcc->sk->receive_queue,skb);
+ vcc->sk->sk_ack_backlog++;
+ skb_queue_tail(&vcc->sk->sk_receive_queue, skb);
if (vcc->callback) {
DPRINTK("waking vcc->sleep 0x%p\n",
&vcc->sleep);
@@ -197,7 +198,7 @@ void sigd_enq(struct atm_vcc *vcc,enum atmsvc_msg_type type,
static void purge_vccs(struct atm_vcc *vcc)
{
while (vcc) {
- if (vcc->sk->family == PF_ATMSVC &&
+ if (vcc->sk->sk_family == PF_ATMSVC &&
!test_bit(ATM_VF_META,&vcc->flags)) {
set_bit(ATM_VF_RELEASED,&vcc->flags);
vcc->reply = -EUNATCH;
@@ -216,9 +217,9 @@ static void sigd_close(struct atm_vcc *vcc)
DPRINTK("sigd_close\n");
sigd = NULL;
- if (skb_peek(&vcc->sk->receive_queue))
+ if (skb_peek(&vcc->sk->sk_receive_queue))
printk(KERN_ERR "sigd_close: closing with requests pending\n");
- skb_queue_purge(&vcc->sk->receive_queue);
+ skb_queue_purge(&vcc->sk->sk_receive_queue);
spin_lock(&atm_dev_lock);
list_for_each(p, &atm_devs) {
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 8f87da6ae8d4..f1401b6a20f7 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -74,7 +74,7 @@ static void svc_disconnect(struct atm_vcc *vcc)
}
/* beware - socket is still in use by atmsigd until the last
as_indicate has been answered */
- while ((skb = skb_dequeue(&vcc->sk->receive_queue))) {
+ while ((skb = skb_dequeue(&vcc->sk->sk_receive_queue))) {
DPRINTK("LISTEN REL\n");
sigd_enq2(NULL,as_reject,vcc,NULL,NULL,&vcc->qos,0);
dev_kfree_skb(skb);
@@ -253,7 +253,8 @@ static int svc_listen(struct socket *sock,int backlog)
remove_wait_queue(&vcc->sleep,&wait);
if (!sigd) return -EUNATCH;
set_bit(ATM_VF_LISTEN,&vcc->flags);
- vcc->sk->max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT;
+ vcc->sk->sk_max_ack_backlog = backlog > 0 ? backlog :
+ ATM_BACKLOG_DEFAULT;
return vcc->reply;
}
@@ -277,7 +278,8 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
DECLARE_WAITQUEUE(wait,current);
add_wait_queue(&old_vcc->sleep,&wait);
- while (!(skb = skb_dequeue(&old_vcc->sk->receive_queue)) && sigd) {
+ while (!(skb = skb_dequeue(&old_vcc->sk->sk_receive_queue)) &&
+ sigd) {
if (test_bit(ATM_VF_RELEASED,&old_vcc->flags)) break;
if (test_bit(ATM_VF_CLOSE,&old_vcc->flags)) {
error = old_vcc->reply;
@@ -306,7 +308,7 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
error = atm_connect(newsock,msg->pvc.sap_addr.itf,
msg->pvc.sap_addr.vpi,msg->pvc.sap_addr.vci);
dev_kfree_skb(skb);
- old_vcc->sk->ack_backlog--;
+ old_vcc->sk->sk_ack_backlog--;
if (error) {
sigd_enq2(NULL,as_reject,old_vcc,NULL,NULL,
&old_vcc->qos,error);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index de5b1bd9f9dd..8048d8fac636 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -174,7 +174,8 @@ struct sock *ax25_find_listener(ax25_address *addr, int digi,
for (s = ax25_list; s != NULL; s = s->next) {
if ((s->iamdigi && !digi) || (!s->iamdigi && digi))
continue;
- if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 && s->sk->type == type && s->sk->state == TCP_LISTEN) {
+ if (s->sk && !ax25cmp(&s->source_addr, addr) &&
+ s->sk->sk_type == type && s->sk->sk_state == TCP_LISTEN) {
/* If device is null we match any device */
if (s->ax25_dev == NULL || s->ax25_dev->dev == dev) {
spin_unlock_bh(&ax25_list_lock);
@@ -199,7 +200,9 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr,
spin_lock_bh(&ax25_list_lock);
for (s = ax25_list; s != NULL; s = s->next) {
- if (s->sk != NULL && ax25cmp(&s->source_addr, my_addr) == 0 && ax25cmp(&s->dest_addr, dest_addr) == 0 && s->sk->type == type) {
+ if (s->sk && !ax25cmp(&s->source_addr, my_addr) &&
+ !ax25cmp(&s->dest_addr, dest_addr) &&
+ s->sk->sk_type == type) {
sk = s->sk;
/* XXX Sleeps with spinlock held, use refcounts instead. XXX */
lock_sock(sk);
@@ -223,7 +226,7 @@ ax25_cb *ax25_find_cb(ax25_address *src_addr, ax25_address *dest_addr,
spin_lock_bh(&ax25_list_lock);
for (s = ax25_list; s != NULL; s = s->next) {
- if (s->sk != NULL && s->sk->type != SOCK_SEQPACKET)
+ if (s->sk && s->sk->sk_type != SOCK_SEQPACKET)
continue;
if (s->ax25_dev == NULL)
continue;
@@ -258,7 +261,7 @@ struct sock *ax25_addr_match(ax25_address *addr)
spin_lock_bh(&ax25_list_lock);
for (s = ax25_list; s != NULL; s = s->next) {
if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 &&
- s->sk->type == SOCK_RAW) {
+ s->sk->sk_type == SOCK_RAW) {
sk = s->sk;
lock_sock(sk);
break;
@@ -274,9 +277,9 @@ void ax25_send_to_raw(struct sock *sk, struct sk_buff *skb, int proto)
struct sk_buff *copy;
while (sk != NULL) {
- if (sk->type == SOCK_RAW &&
- sk->protocol == proto &&
- atomic_read(&sk->rmem_alloc) <= sk->rcvbuf) {
+ if (sk->sk_type == SOCK_RAW &&
+ sk->sk_protocol == proto &&
+ atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
if ((copy = skb_clone(skb, GFP_ATOMIC)) == NULL)
return;
@@ -284,7 +287,7 @@ void ax25_send_to_raw(struct sock *sk, struct sk_buff *skb, int proto)
kfree_skb(copy);
}
- sk = sk->next;
+ sk = sk->sk_next;
}
}
@@ -322,7 +325,7 @@ void ax25_destroy_socket(ax25_cb *ax25)
ax25_clear_queues(ax25); /* Flush the queues */
if (ax25->sk != NULL) {
- while ((skb = skb_dequeue(&ax25->sk->receive_queue)) != NULL) {
+ while ((skb = skb_dequeue(&ax25->sk->sk_receive_queue)) != NULL) {
if (skb->sk != ax25->sk) {
/* A pending connection */
ax25_cb *sax25 = ax25_sk(skb->sk);
@@ -339,8 +342,8 @@ void ax25_destroy_socket(ax25_cb *ax25)
}
if (ax25->sk != NULL) {
- if (atomic_read(&ax25->sk->wmem_alloc) != 0 ||
- atomic_read(&ax25->sk->rmem_alloc) != 0) {
+ if (atomic_read(&ax25->sk->sk_wmem_alloc) ||
+ atomic_read(&ax25->sk->sk_rmem_alloc)) {
/* Defer: outstanding buffers */
init_timer(&ax25->timer);
ax25->timer.expires = jiffies + 10 * HZ;
@@ -650,8 +653,9 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (sk->type == SOCK_SEQPACKET &&
- (sock->state != SS_UNCONNECTED || sk->state == TCP_LISTEN)) {
+ if (sk->sk_type == SOCK_SEQPACKET &&
+ (sock->state != SS_UNCONNECTED ||
+ sk->sk_state == TCP_LISTEN)) {
res = -EADDRNOTAVAIL;
break;
}
@@ -771,9 +775,9 @@ static int ax25_listen(struct socket *sock, int backlog)
int res = 0;
lock_sock(sk);
- if (sk->type == SOCK_SEQPACKET && sk->state != TCP_LISTEN) {
- sk->max_ack_backlog = backlog;
- sk->state = TCP_LISTEN;
+ if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_LISTEN) {
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_state = TCP_LISTEN;
goto out;
}
res = -EOPNOTSUPP;
@@ -846,9 +850,9 @@ int ax25_create(struct socket *sock, int protocol)
sock_init_data(sock, sk);
sk_set_owner(sk, THIS_MODULE);
- sk->destruct = ax25_free_sock;
+ sk->sk_destruct = ax25_free_sock;
sock->ops = &ax25_proto_ops;
- sk->protocol = protocol;
+ sk->sk_protocol = protocol;
ax25->sk = sk;
@@ -868,7 +872,7 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
return NULL;
}
- switch (osk->type) {
+ switch (osk->sk_type) {
case SOCK_DGRAM:
break;
case SOCK_SEQPACKET:
@@ -882,17 +886,17 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
sock_init_data(NULL, sk);
sk_set_owner(sk, THIS_MODULE);
- sk->destruct = ax25_free_sock;
- sk->type = osk->type;
- sk->socket = osk->socket;
- sk->priority = osk->priority;
- sk->protocol = osk->protocol;
- sk->rcvbuf = osk->rcvbuf;
- sk->sndbuf = osk->sndbuf;
- sk->debug = osk->debug;
- sk->state = TCP_ESTABLISHED;
- sk->sleep = osk->sleep;
- sk->zapped = osk->zapped;
+ sk->sk_destruct = ax25_free_sock;
+ sk->sk_type = osk->sk_type;
+ sk->sk_socket = osk->sk_socket;
+ sk->sk_priority = osk->sk_priority;
+ sk->sk_protocol = osk->sk_protocol;
+ sk->sk_rcvbuf = osk->sk_rcvbuf;
+ sk->sk_sndbuf = osk->sk_sndbuf;
+ sk->sk_debug = osk->sk_debug;
+ sk->sk_state = TCP_ESTABLISHED;
+ sk->sk_sleep = osk->sk_sleep;
+ sk->sk_zapped = osk->sk_zapped;
oax25 = ax25_sk(osk);
@@ -938,7 +942,7 @@ static int ax25_release(struct socket *sock)
lock_sock(sk);
ax25 = ax25_sk(sk);
- if (sk->type == SOCK_SEQPACKET) {
+ if (sk->sk_type == SOCK_SEQPACKET) {
switch (ax25->state) {
case AX25_STATE_0:
ax25_disconnect(ax25, 0);
@@ -978,9 +982,9 @@ static int ax25_release(struct socket *sock)
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
ax25->state = AX25_STATE_2;
- sk->state = TCP_CLOSE;
- sk->shutdown |= SEND_SHUTDOWN;
- sk->state_change(sk);
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
sock_set_flag(sk, SOCK_DESTROY);
break;
@@ -989,15 +993,15 @@ static int ax25_release(struct socket *sock)
break;
}
} else {
- sk->state = TCP_CLOSE;
- sk->shutdown |= SEND_SHUTDOWN;
- sk->state_change(sk);
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
ax25_destroy_socket(ax25);
}
sock->sk = NULL;
- sk->socket = NULL; /* Not used, but we should do this */
+ sk->sk_socket = NULL; /* Not used, but we should do this */
release_sock(sk);
return 0;
@@ -1041,7 +1045,7 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
lock_sock(sk);
ax25 = ax25_sk(sk);
- if (sk->zapped == 0) {
+ if (!sk->sk_zapped) {
err = -EINVAL;
goto out;
}
@@ -1075,7 +1079,7 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
done:
ax25_insert_socket(ax25);
- sk->zapped = 0;
+ sk->sk_zapped = 0;
out:
release_sock(sk);
@@ -1122,7 +1126,7 @@ static int ax25_connect(struct socket *sock, struct sockaddr *uaddr,
/* deal with restarts */
if (sock->state == SS_CONNECTING) {
- switch (sk->state) {
+ switch (sk->sk_state) {
case TCP_SYN_SENT: /* still trying */
err = -EINPROGRESS;
goto out;
@@ -1138,12 +1142,12 @@ static int ax25_connect(struct socket *sock, struct sockaddr *uaddr,
}
}
- if (sk->state == TCP_ESTABLISHED && sk->type == SOCK_SEQPACKET) {
+ if (sk->sk_state == TCP_ESTABLISHED && sk->sk_type == SOCK_SEQPACKET) {
err = -EISCONN; /* No reconnect on a seqpacket socket */
goto out;
}
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
if (ax25->digipeat != NULL) {
@@ -1188,7 +1192,7 @@ static int ax25_connect(struct socket *sock, struct sockaddr *uaddr,
* the socket is already bound, check to see if the device has
* been filled in, error if it hasn't.
*/
- if (sk->zapped) {
+ if (sk->sk_zapped) {
/* check if we can remove this feature. It is broken. */
printk(KERN_WARNING "ax25_connect(): %s uses autobind, please contact jreuter@yaina.de\n",
current->comm);
@@ -1204,7 +1208,7 @@ static int ax25_connect(struct socket *sock, struct sockaddr *uaddr,
}
}
- if (sk->type == SOCK_SEQPACKET &&
+ if (sk->sk_type == SOCK_SEQPACKET &&
ax25_find_cb(&ax25->source_addr, &fsa->fsa_ax25.sax25_call, digi,
ax25->ax25_dev->dev)) {
if (digi != NULL)
@@ -1217,15 +1221,15 @@ static int ax25_connect(struct socket *sock, struct sockaddr *uaddr,
ax25->digipeat = digi;
/* First the easy one */
- if (sk->type != SOCK_SEQPACKET) {
+ if (sk->sk_type != SOCK_SEQPACKET) {
sock->state = SS_CONNECTED;
- sk->state = TCP_ESTABLISHED;
+ sk->sk_state = TCP_ESTABLISHED;
goto out;
}
/* Move to connecting socket, ax.25 lapb WAIT_UA.. */
sock->state = SS_CONNECTING;
- sk->state = TCP_SYN_SENT;
+ sk->sk_state = TCP_SYN_SENT;
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
@@ -1250,18 +1254,18 @@ static int ax25_connect(struct socket *sock, struct sockaddr *uaddr,
ax25_start_heartbeat(ax25);
/* Now the loop */
- if (sk->state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
+ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
err = -EINPROGRESS;
goto out;
}
- if (sk->state == TCP_SYN_SENT) {
+ if (sk->sk_state == TCP_SYN_SENT) {
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
- add_wait_queue(sk->sleep, &wait);
+ add_wait_queue(sk->sk_sleep, &wait);
for (;;) {
- if (sk->state != TCP_SYN_SENT)
+ if (sk->sk_state != TCP_SYN_SENT)
break;
set_current_state(TASK_INTERRUPTIBLE);
release_sock(sk);
@@ -1273,10 +1277,10 @@ static int ax25_connect(struct socket *sock, struct sockaddr *uaddr,
return -ERESTARTSYS;
}
current->state = TASK_RUNNING;
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
}
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
/* Not in ABM, not in WAIT_UA -> failed */
sock->state = SS_UNCONNECTED;
err = sock_error(sk); /* Always set at this point */
@@ -1308,12 +1312,12 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
return -EINVAL;
lock_sock(sk);
- if (sk->type != SOCK_SEQPACKET) {
+ if (sk->sk_type != SOCK_SEQPACKET) {
err = -EOPNOTSUPP;
goto out;
}
- if (sk->state != TCP_LISTEN) {
+ if (sk->sk_state != TCP_LISTEN) {
err = -EINVAL;
goto out;
}
@@ -1322,9 +1326,9 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
* The read queue this time is holding sockets ready to use
* hooked into the SABM we saved
*/
- add_wait_queue(sk->sleep, &wait);
+ add_wait_queue(sk->sk_sleep, &wait);
for (;;) {
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
break;
@@ -1340,16 +1344,16 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
return -ERESTARTSYS;
}
current->state = TASK_RUNNING;
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
- newsk = skb->sk;
- newsk->pair = NULL;
- newsk->socket = newsock;
- newsk->sleep = &newsock->wait;
+ newsk = skb->sk;
+ newsk->sk_pair = NULL;
+ newsk->sk_socket = newsock;
+ newsk->sk_sleep = &newsock->wait;
/* Now attach up the new socket */
kfree_skb(skb);
- sk->ack_backlog--;
+ sk->sk_ack_backlog--;
newsock->sk = newsk;
newsock->state = SS_CONNECTED;
@@ -1372,7 +1376,7 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr,
ax25 = ax25_sk(sk);
if (peer != 0) {
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
@@ -1426,12 +1430,12 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
lock_sock(sk);
ax25 = ax25_sk(sk);
- if (sk->zapped) {
+ if (sk->sk_zapped) {
err = -EADDRNOTAVAIL;
goto out;
}
- if (sk->shutdown & SEND_SHUTDOWN) {
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
err = -EPIPE;
goto out;
@@ -1486,7 +1490,8 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
}
sax = *usax;
- if (sk->type == SOCK_SEQPACKET && ax25cmp(&ax25->dest_addr, &sax.sax25_call) != 0) {
+ if (sk->sk_type == SOCK_SEQPACKET &&
+ ax25cmp(&ax25->dest_addr, &sax.sax25_call)) {
err = -EISCONN;
goto out;
}
@@ -1500,7 +1505,7 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
* it has become closed (not started closed) and is VC
* we ought to SIGPIPE, EPIPE
*/
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
@@ -1532,14 +1537,14 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
/* Add the PID if one is not supplied by the user in the skb */
if (!ax25->pidincl) {
asmptr = skb_push(skb, 1);
- *asmptr = sk->protocol;
+ *asmptr = sk->sk_protocol;
}
SOCK_DEBUG(sk, "AX.25: Transmitting buffer\n");
- if (sk->type == SOCK_SEQPACKET) {
+ if (sk->sk_type == SOCK_SEQPACKET) {
/* Connected mode sockets go via the LAPB machine */
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
kfree_skb(skb);
err = -ENOTCONN;
goto out;
@@ -1598,7 +1603,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
* This works for seqpacket too. The receiver has ordered the
* queue for us! We do one quick check first though
*/
- if (sk->type == SOCK_SEQPACKET && sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
@@ -1670,7 +1675,7 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
switch (cmd) {
case TIOCOUTQ: {
long amount;
- amount = sk->sndbuf - atomic_read(&sk->wmem_alloc);
+ amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
if (amount < 0)
amount = 0;
res = put_user(amount, (int *)arg);
@@ -1681,7 +1686,7 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
struct sk_buff *skb;
long amount = 0L;
/* These two are safe on a single CPU system as only user tasks fiddle here */
- if ((skb = skb_peek(&sk->receive_queue)) != NULL)
+ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
res = put_user(amount, (int *)arg);
break;
@@ -1689,11 +1694,12 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
case SIOCGSTAMP:
if (sk != NULL) {
- if (sk->stamp.tv_sec == 0) {
+ if (!sk->sk_stamp.tv_sec) {
res = -ENOENT;
break;
}
- res = copy_to_user((void *)arg, &sk->stamp, sizeof(struct timeval)) ? -EFAULT : 0;
+ res = copy_to_user((void *)arg, &sk->sk_stamp,
+ sizeof(struct timeval)) ? -EFAULT : 0;
break;
}
res = -EINVAL;
@@ -1764,8 +1770,8 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
ax25_info.idletimer = ax25_display_timer(&ax25->idletimer) / (60 * HZ);
ax25_info.n2count = ax25->n2count;
ax25_info.state = ax25->state;
- ax25_info.rcv_q = atomic_read(&sk->rmem_alloc);
- ax25_info.snd_q = atomic_read(&sk->wmem_alloc);
+ ax25_info.rcv_q = atomic_read(&sk->sk_rmem_alloc);
+ ax25_info.snd_q = atomic_read(&sk->sk_wmem_alloc);
ax25_info.vs = ax25->vs;
ax25_info.vr = ax25->vr;
ax25_info.va = ax25->va;
@@ -1878,9 +1884,9 @@ static int ax25_get_info(char *buffer, char **start, off_t offset, int length)
if (ax25->sk != NULL) {
len += sprintf(buffer + len, " %d %d %ld\n",
- atomic_read(&ax25->sk->wmem_alloc),
- atomic_read(&ax25->sk->rmem_alloc),
- ax25->sk->socket != NULL ? SOCK_INODE(ax25->sk->socket)->i_ino : 0L);
+ atomic_read(&ax25->sk->sk_wmem_alloc),
+ atomic_read(&ax25->sk->sk_rmem_alloc),
+ ax25->sk->sk_socket != NULL ? SOCK_INODE(ax25->sk->sk_socket)->i_ino : 0L);
} else {
len += sprintf(buffer + len, " * * *\n");
}
diff --git a/net/ax25/ax25_ds_in.c b/net/ax25/ax25_ds_in.c
index cee3451aee2a..799fb20b531f 100644
--- a/net/ax25/ax25_ds_in.c
+++ b/net/ax25/ax25_ds_in.c
@@ -65,13 +65,13 @@ static int ax25_ds_state1_machine(ax25_cb *ax25, struct sk_buff *skb, int framet
ax25->state = AX25_STATE_3;
ax25->n2count = 0;
if (ax25->sk != NULL) {
- ax25->sk->state = TCP_ESTABLISHED;
+ ax25->sk->sk_state = TCP_ESTABLISHED;
/*
* For WAIT_SABM connections we will produce an accept
* ready socket here
*/
if (!sock_flag(ax25->sk, SOCK_DEAD))
- ax25->sk->state_change(ax25->sk);
+ ax25->sk->sk_state_change(ax25->sk);
}
ax25_dama_on(ax25);
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c
index 8e06ca149b4d..5ac1f62cba01 100644
--- a/net/ax25/ax25_ds_timer.c
+++ b/net/ax25/ax25_ds_timer.c
@@ -104,7 +104,7 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (!ax25->sk || sock_flag(ax25->sk, SOCK_DESTROY) ||
- (ax25->sk->state == TCP_LISTEN &&
+ (ax25->sk->sk_state == TCP_LISTEN &&
sock_flag(ax25->sk, SOCK_DEAD))) {
ax25_destroy_socket(ax25);
return;
@@ -116,7 +116,8 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
* Check the state of the receive buffer.
*/
if (ax25->sk != NULL) {
- if (atomic_read(&ax25->sk->rmem_alloc) < (ax25->sk->rcvbuf / 2) &&
+ if (atomic_read(&ax25->sk->sk_rmem_alloc) <
+ (ax25->sk->sk_rcvbuf / 2) &&
(ax25->condition & AX25_COND_OWN_RX_BUSY)) {
ax25->condition &= ~AX25_COND_OWN_RX_BUSY;
ax25->condition &= ~AX25_COND_ACK_PENDING;
@@ -156,11 +157,11 @@ void ax25_ds_idletimer_expiry(ax25_cb *ax25)
ax25_stop_t3timer(ax25);
if (ax25->sk != NULL) {
- ax25->sk->state = TCP_CLOSE;
- ax25->sk->err = 0;
- ax25->sk->shutdown |= SEND_SHUTDOWN;
+ ax25->sk->sk_state = TCP_CLOSE;
+ ax25->sk->sk_err = 0;
+ ax25->sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(ax25->sk, SOCK_DEAD)) {
- ax25->sk->state_change(ax25->sk);
+ ax25->sk->sk_state_change(ax25->sk);
sock_set_flag(ax25->sk, SOCK_DEAD);
}
}
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
index d36280ac5c78..593d9dbb080b 100644
--- a/net/ax25/ax25_in.c
+++ b/net/ax25/ax25_in.c
@@ -147,7 +147,8 @@ int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
}
if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) {
- if ((!ax25->pidincl && ax25->sk->protocol == pid) || ax25->pidincl) {
+ if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) ||
+ ax25->pidincl) {
if (sock_queue_rcv_skb(ax25->sk, skb) == 0)
queued = 1;
else
@@ -277,7 +278,8 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
/* Now find a suitable dgram socket */
sk = ax25_get_socket(&dest, &src, SOCK_DGRAM);
if (sk != NULL) {
- if (atomic_read(&sk->rmem_alloc) >= sk->rcvbuf) {
+ if (atomic_read(&sk->sk_rmem_alloc) >=
+ sk->sk_rcvbuf) {
kfree_skb(skb);
} else {
/*
@@ -355,7 +357,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET);
if (sk != NULL) {
- if (sk->ack_backlog == sk->max_ack_backlog ||
+ if (sk->sk_ack_backlog == sk->sk_max_ack_backlog ||
(make = ax25_make_new(sk, ax25_dev)) == NULL) {
if (mine)
ax25_return_dm(dev, &src, &dest, &dp);
@@ -366,12 +368,12 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
ax25 = ax25_sk(make);
skb_set_owner_r(skb, make);
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
- make->state = TCP_ESTABLISHED;
- make->pair = sk;
+ make->sk_state = TCP_ESTABLISHED;
+ make->sk_pair = sk;
- sk->ack_backlog++;
+ sk->sk_ack_backlog++;
} else {
if (!mine) {
kfree_skb(skb);
@@ -435,7 +437,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
if (sk) {
if (!sock_flag(sk, SOCK_DEAD))
- sk->data_ready(sk, skb->len);
+ sk->sk_data_ready(sk, skb->len);
} else
kfree_skb(skb);
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index c30f0875c6df..0d3bbcf07fc0 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -435,7 +435,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
}
if (ax25->sk != NULL)
- ax25->sk->zapped = 0;
+ ax25->sk->sk_zapped = 0;
put:
ax25_put_route(ax25_rt);
diff --git a/net/ax25/ax25_std_in.c b/net/ax25/ax25_std_in.c
index 7abbee89d093..ac30541d95a4 100644
--- a/net/ax25/ax25_std_in.c
+++ b/net/ax25/ax25_std_in.c
@@ -73,10 +73,10 @@ static int ax25_std_state1_machine(ax25_cb *ax25, struct sk_buff *skb, int frame
ax25->state = AX25_STATE_3;
ax25->n2count = 0;
if (ax25->sk != NULL) {
- ax25->sk->state = TCP_ESTABLISHED;
+ ax25->sk->sk_state = TCP_ESTABLISHED;
/* For WAIT_SABM connections we will produce an accept ready socket here */
if (!sock_flag(ax25->sk, SOCK_DEAD))
- ax25->sk->state_change(ax25->sk);
+ ax25->sk->sk_state_change(ax25->sk);
}
}
break;
diff --git a/net/ax25/ax25_std_timer.c b/net/ax25/ax25_std_timer.c
index 8f82c2169fb7..efcfe1d27525 100644
--- a/net/ax25/ax25_std_timer.c
+++ b/net/ax25/ax25_std_timer.c
@@ -38,7 +38,7 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25)
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (!ax25->sk || sock_flag(ax25->sk, SOCK_DESTROY) ||
- (ax25->sk->state == TCP_LISTEN &&
+ (ax25->sk->sk_state == TCP_LISTEN &&
sock_flag(ax25->sk, SOCK_DEAD))) {
ax25_destroy_socket(ax25);
return;
@@ -51,7 +51,8 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25)
* Check the state of the receive buffer.
*/
if (ax25->sk != NULL) {
- if (atomic_read(&ax25->sk->rmem_alloc) < (ax25->sk->rcvbuf / 2) &&
+ if (atomic_read(&ax25->sk->sk_rmem_alloc) <
+ (ax25->sk->sk_rcvbuf / 2) &&
(ax25->condition & AX25_COND_OWN_RX_BUSY)) {
ax25->condition &= ~AX25_COND_OWN_RX_BUSY;
ax25->condition &= ~AX25_COND_ACK_PENDING;
@@ -93,11 +94,11 @@ void ax25_std_idletimer_expiry(ax25_cb *ax25)
ax25_stop_t3timer(ax25);
if (ax25->sk != NULL) {
- ax25->sk->state = TCP_CLOSE;
- ax25->sk->err = 0;
- ax25->sk->shutdown |= SEND_SHUTDOWN;
+ ax25->sk->sk_state = TCP_CLOSE;
+ ax25->sk->sk_err = 0;
+ ax25->sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(ax25->sk, SOCK_DEAD)) {
- ax25->sk->state_change(ax25->sk);
+ ax25->sk->sk_state_change(ax25->sk);
sock_set_flag(ax25->sk, SOCK_DEAD);
}
}
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 3a5c3f9f4e86..654307fbb8d0 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -282,11 +282,11 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
ax25_link_failed(ax25, reason);
if (ax25->sk != NULL) {
- ax25->sk->state = TCP_CLOSE;
- ax25->sk->err = reason;
- ax25->sk->shutdown |= SEND_SHUTDOWN;
+ ax25->sk->sk_state = TCP_CLOSE;
+ ax25->sk->sk_err = reason;
+ ax25->sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(ax25->sk, SOCK_DEAD)) {
- ax25->sk->state_change(ax25->sk);
+ ax25->sk->sk_state_change(ax25->sk);
sock_set_flag(ax25->sk, SOCK_DEAD);
}
}
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 495967f8a844..6d67ccf1e7c1 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -126,15 +126,15 @@ struct sock *bt_sock_alloc(struct socket *sock, int proto, int pi_size, int prio
return NULL;
}
memset(pi, 0, pi_size);
- sk->protinfo = pi;
+ sk->sk_protinfo = pi;
}
sock_init_data(sock, sk);
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
- sk->zapped = 0;
- sk->protocol = proto;
- sk->state = BT_OPEN;
+ sk->sk_zapped = 0;
+ sk->sk_protocol = proto;
+ sk->sk_state = BT_OPEN;
return sk;
}
@@ -142,7 +142,7 @@ struct sock *bt_sock_alloc(struct socket *sock, int proto, int pi_size, int prio
void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
{
write_lock_bh(&l->lock);
- sk->next = l->head;
+ sk->sk_next = l->head;
l->head = sk;
sock_hold(sk);
write_unlock_bh(&l->lock);
@@ -153,9 +153,9 @@ void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
struct sock **skp;
write_lock_bh(&l->lock);
- for (skp = &l->head; *skp; skp = &((*skp)->next)) {
+ for (skp = &l->head; *skp; skp = &((*skp)->sk_next)) {
if (*skp == sk) {
- *skp = sk->next;
+ *skp = sk->sk_next;
__sock_put(sk);
break;
}
@@ -170,15 +170,15 @@ void bt_accept_enqueue(struct sock *parent, struct sock *sk)
sock_hold(sk);
list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
bt_sk(sk)->parent = parent;
- parent->ack_backlog++;
+ parent->sk_ack_backlog++;
}
static void bt_accept_unlink(struct sock *sk)
{
- BT_DBG("sk %p state %d", sk, sk->state);
+ BT_DBG("sk %p state %d", sk, sk->sk_state);
list_del_init(&bt_sk(sk)->accept_q);
- bt_sk(sk)->parent->ack_backlog--;
+ bt_sk(sk)->parent->sk_ack_backlog--;
bt_sk(sk)->parent = NULL;
sock_put(sk);
}
@@ -194,13 +194,13 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
lock_sock(sk);
- if (sk->state == BT_CLOSED) {
+ if (sk->sk_state == BT_CLOSED) {
release_sock(sk);
bt_accept_unlink(sk);
continue;
}
- if (sk->state == BT_CONNECTED || !newsock) {
+ if (sk->sk_state == BT_CONNECTED || !newsock) {
bt_accept_unlink(sk);
if (newsock)
sock_graft(sk, newsock);
@@ -226,7 +226,7 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
return -EOPNOTSUPP;
if (!(skb = skb_recv_datagram(sk, flags, noblock, &err))) {
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
return 0;
return err;
}
@@ -254,30 +254,30 @@ unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *w
BT_DBG("sock %p, sk %p", sock, sk);
- poll_wait(file, sk->sleep, wait);
+ poll_wait(file, sk->sk_sleep, wait);
mask = 0;
- if (sk->err || !skb_queue_empty(&sk->error_queue))
+ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
mask |= POLLERR;
- if (sk->shutdown == SHUTDOWN_MASK)
+ if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
- if (!skb_queue_empty(&sk->receive_queue) ||
+ if (!skb_queue_empty(&sk->sk_receive_queue) ||
!list_empty(&bt_sk(sk)->accept_q) ||
- (sk->shutdown & RCV_SHUTDOWN))
+ (sk->sk_shutdown & RCV_SHUTDOWN))
mask |= POLLIN | POLLRDNORM;
- if (sk->state == BT_CLOSED)
+ if (sk->sk_state == BT_CLOSED)
mask |= POLLHUP;
- if (sk->state == BT_CONNECT || sk->state == BT_CONNECT2)
+ if (sk->sk_state == BT_CONNECT || sk->sk_state == BT_CONNECT2)
return mask;
if (sock_writeable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
- set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
return mask;
}
@@ -290,8 +290,8 @@ int bt_sock_w4_connect(struct sock *sk, int flags)
BT_DBG("sk %p", sk);
- add_wait_queue(sk->sleep, &wait);
- while (sk->state != BT_CONNECTED) {
+ add_wait_queue(sk->sk_sleep, &wait);
+ while (sk->sk_state != BT_CONNECTED) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
err = -EAGAIN;
@@ -303,10 +303,10 @@ int bt_sock_w4_connect(struct sock *sk, int flags)
lock_sock(sk);
err = 0;
- if (sk->state == BT_CONNECTED)
+ if (sk->sk_state == BT_CONNECTED)
break;
- if (sk->err) {
+ if (sk->sk_err) {
err = sock_error(sk);
break;
}
@@ -317,7 +317,7 @@ int bt_sock_w4_connect(struct sock *sk, int flags)
}
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
return err;
}
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index c2e49a273b4b..864e4e30d96c 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -465,21 +465,21 @@ static int bnep_session(void *arg)
set_fs(KERNEL_DS);
init_waitqueue_entry(&wait, current);
- add_wait_queue(sk->sleep, &wait);
+ add_wait_queue(sk->sk_sleep, &wait);
while (!atomic_read(&s->killed)) {
set_current_state(TASK_INTERRUPTIBLE);
// RX
- while ((skb = skb_dequeue(&sk->receive_queue))) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
bnep_rx_frame(s, skb);
}
- if (sk->state != BT_CONNECTED)
+ if (sk->sk_state != BT_CONNECTED)
break;
// TX
- while ((skb = skb_dequeue(&sk->write_queue)))
+ while ((skb = skb_dequeue(&sk->sk_write_queue)))
if (bnep_tx_frame(s, skb))
break;
netif_wake_queue(dev);
@@ -487,7 +487,7 @@ static int bnep_session(void *arg)
schedule();
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
/* Cleanup session */
down_write(&bnep_session_sem);
@@ -609,11 +609,11 @@ int bnep_del_connection(struct bnep_conndel_req *req)
if (s) {
/* Wakeup user-space which is polling for socket errors.
* This is temporary hack untill we have shutdown in L2CAP */
- s->sock->sk->err = EUNATCH;
+ s->sock->sk->sk_err = EUNATCH;
/* Kill session thread */
atomic_inc(&s->killed);
- wake_up_interruptible(s->sock->sk->sleep);
+ wake_up_interruptible(s->sock->sk->sk_sleep);
} else
err = -ENOENT;
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 71217edf3566..6d4f51b6643d 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -121,8 +121,8 @@ static void bnep_net_set_mc_list(struct net_device *dev)
r->len = htons(skb->len - len);
}
- skb_queue_tail(&sk->write_queue, skb);
- wake_up_interruptible(sk->sleep);
+ skb_queue_tail(&sk->sk_write_queue, skb);
+ wake_up_interruptible(sk->sk_sleep);
#endif
}
@@ -209,13 +209,13 @@ static int bnep_net_xmit(struct sk_buff *skb, struct net_device *dev)
/*
* We cannot send L2CAP packets from here as we are potentially in a bh.
* So we have to queue them and wake up session thread which is sleeping
- * on the sk->sleep.
+ * on the sk->sk_sleep.
*/
dev->trans_start = jiffies;
- skb_queue_tail(&sk->write_queue, skb);
- wake_up_interruptible(sk->sleep);
+ skb_queue_tail(&sk->sk_write_queue, skb);
+ wake_up_interruptible(sk->sk_sleep);
- if (skb_queue_len(&sk->write_queue) >= BNEP_TX_QUEUE_LEN) {
+ if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) {
BT_DBG("tx queue is full");
/* Stop queuing.
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 609fe3da8d4d..bcef0a0c48a3 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -93,7 +93,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
if (!nsock)
return err;
- if (nsock->sk->state != BT_CONNECTED)
+ if (nsock->sk->sk_state != BT_CONNECTED)
return -EBADFD;
err = bnep_add_connection(&ca, nsock);
@@ -179,8 +179,8 @@ static int bnep_sock_create(struct socket *sock, int protocol)
sock->state = SS_UNCONNECTED;
- sk->destruct = NULL;
- sk->protocol = protocol;
+ sk->sk_destruct = NULL;
+ sk->sk_protocol = protocol;
return 0;
}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 61429363982c..be6bf7362686 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -95,11 +95,11 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("hdev %p len %d", hdev, skb->len);
read_lock(&hci_sk_list.lock);
- for (sk = hci_sk_list.head; sk; sk = sk->next) {
+ for (sk = hci_sk_list.head; sk; sk = sk->sk_next) {
struct hci_filter *flt;
struct sk_buff *nskb;
- if (sk->state != BT_BOUND || hci_pi(sk)->hdev != hdev)
+ if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
continue;
/* Don't send frame to the socket it came from */
@@ -157,8 +157,8 @@ static int hci_sock_release(struct socket *sock)
sock_orphan(sk);
- skb_queue_purge(&sk->receive_queue);
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
sock_put(sk);
return 0;
@@ -283,7 +283,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
}
hci_pi(sk)->hdev = hdev;
- sk->state = BT_BOUND;
+ sk->sk_state = BT_BOUND;
done:
release_sock(sk);
@@ -330,7 +330,7 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msgh
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
- if (sk->state == BT_CLOSED)
+ if (sk->sk_state == BT_CLOSED)
return 0;
if (!(skb = skb_recv_datagram(sk, flags, noblock, &err)))
@@ -587,7 +587,7 @@ static int hci_sock_create(struct socket *sock, int protocol)
return -ENOMEM;
sock->state = SS_UNCONNECTED;
- sk->state = BT_OPEN;
+ sk->sk_state = BT_OPEN;
bt_sock_link(&hci_sk_list, sk);
return 0;
@@ -610,13 +610,13 @@ static int hci_sock_dev_event(struct notifier_block *this, unsigned long event,
/* Detach sockets from device */
read_lock(&hci_sk_list.lock);
- for (sk = hci_sk_list.head; sk; sk = sk->next) {
+ for (sk = hci_sk_list.head; sk; sk = sk->sk_next) {
bh_lock_sock(sk);
if (hci_pi(sk)->hdev == hdev) {
hci_pi(sk)->hdev = NULL;
- sk->err = EPIPE;
- sk->state = BT_OPEN;
- sk->state_change(sk);
+ sk->sk_err = EPIPE;
+ sk->sk_state = BT_OPEN;
+ sk->sk_state_change(sk);
hci_dev_put(hdev);
}
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 84e58069d90f..4d88cab7e410 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -86,7 +86,7 @@ static void l2cap_sock_timeout(unsigned long arg)
{
struct sock *sk = (struct sock *) arg;
- BT_DBG("sock %p state %d", sk, sk->state);
+ BT_DBG("sock %p state %d", sk, sk->sk_state);
bh_lock_sock(sk);
__l2cap_sock_close(sk, ETIMEDOUT);
@@ -98,25 +98,25 @@ static void l2cap_sock_timeout(unsigned long arg)
static void l2cap_sock_set_timer(struct sock *sk, long timeout)
{
- BT_DBG("sk %p state %d timeout %ld", sk, sk->state, timeout);
+ BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
- if (!mod_timer(&sk->timer, jiffies + timeout))
+ if (!mod_timer(&sk->sk_timer, jiffies + timeout))
sock_hold(sk);
}
static void l2cap_sock_clear_timer(struct sock *sk)
{
- BT_DBG("sock %p state %d", sk, sk->state);
+ BT_DBG("sock %p state %d", sk, sk->sk_state);
- if (timer_pending(&sk->timer) && del_timer(&sk->timer))
+ if (timer_pending(&sk->sk_timer) && del_timer(&sk->sk_timer))
__sock_put(sk);
}
static void l2cap_sock_init_timer(struct sock *sk)
{
- init_timer(&sk->timer);
- sk->timer.function = l2cap_sock_timeout;
- sk->timer.data = (unsigned long)sk;
+ init_timer(&sk->sk_timer);
+ sk->sk_timer.function = l2cap_sock_timeout;
+ sk->sk_timer.data = (unsigned long)sk;
}
/* ---- L2CAP connections ---- */
@@ -186,7 +186,7 @@ static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, stru
static struct sock *__l2cap_get_sock_by_addr(u16 psm, bdaddr_t *src)
{
struct sock *sk;
- for (sk = l2cap_sk_list.head; sk; sk = sk->next) {
+ for (sk = l2cap_sk_list.head; sk; sk = sk->sk_next) {
if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
break;
}
@@ -200,8 +200,8 @@ static struct sock *__l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
{
struct sock *sk, *sk1 = NULL;
- for (sk = l2cap_sk_list.head; sk; sk = sk->next) {
- if (state && sk->state != state)
+ for (sk = l2cap_sk_list.head; sk; sk = sk->sk_next) {
+ if (state && sk->sk_state != state)
continue;
if (l2cap_pi(sk)->psm == psm) {
@@ -233,11 +233,11 @@ static void l2cap_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
- skb_queue_purge(&sk->receive_queue);
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
- if (sk->protinfo)
- kfree(sk->protinfo);
+ if (sk->sk_protinfo)
+ kfree(sk->sk_protinfo);
}
static void l2cap_sock_cleanup_listen(struct sock *parent)
@@ -250,8 +250,8 @@ static void l2cap_sock_cleanup_listen(struct sock *parent)
while ((sk = bt_accept_dequeue(parent, NULL)))
l2cap_sock_close(sk);
- parent->state = BT_CLOSED;
- parent->zapped = 1;
+ parent->sk_state = BT_CLOSED;
+ parent->sk_zapped = 1;
}
/* Kill socket (only if zapped and orphan)
@@ -259,10 +259,10 @@ static void l2cap_sock_cleanup_listen(struct sock *parent)
*/
static void l2cap_sock_kill(struct sock *sk)
{
- if (!sk->zapped || sk->socket)
+ if (!sk->sk_zapped || sk->sk_socket)
return;
- BT_DBG("sk %p state %d", sk, sk->state);
+ BT_DBG("sk %p state %d", sk, sk->sk_state);
/* Kill poor orphan */
bt_sock_unlink(&l2cap_sk_list, sk);
@@ -272,9 +272,9 @@ static void l2cap_sock_kill(struct sock *sk)
static void __l2cap_sock_close(struct sock *sk, int reason)
{
- BT_DBG("sk %p state %d socket %p", sk, sk->state, sk->socket);
+ BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
- switch (sk->state) {
+ switch (sk->sk_state) {
case BT_LISTEN:
l2cap_sock_cleanup_listen(sk);
break;
@@ -282,11 +282,11 @@ static void __l2cap_sock_close(struct sock *sk, int reason)
case BT_CONNECTED:
case BT_CONFIG:
case BT_CONNECT2:
- if (sk->type == SOCK_SEQPACKET) {
+ if (sk->sk_type == SOCK_SEQPACKET) {
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
struct l2cap_disconn_req req;
- sk->state = BT_DISCONN;
+ sk->sk_state = BT_DISCONN;
l2cap_sock_set_timer(sk, HZ * 5);
req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
@@ -303,7 +303,7 @@ static void __l2cap_sock_close(struct sock *sk, int reason)
break;
default:
- sk->zapped = 1;
+ sk->sk_zapped = 1;
break;
}
}
@@ -327,7 +327,7 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
BT_DBG("sk %p", sk);
if (parent) {
- sk->type = parent->type;
+ sk->sk_type = parent->sk_type;
pi->imtu = l2cap_pi(parent)->imtu;
pi->omtu = l2cap_pi(parent)->omtu;
pi->link_mode = l2cap_pi(parent)->link_mode;
@@ -352,11 +352,11 @@ static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, int prio)
sk_set_owner(sk, THIS_MODULE);
- sk->destruct = l2cap_sock_destruct;
- sk->sndtimeo = L2CAP_CONN_TIMEOUT;
+ sk->sk_destruct = l2cap_sock_destruct;
+ sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
- sk->protocol = proto;
- sk->state = BT_OPEN;
+ sk->sk_protocol = proto;
+ sk->sk_state = BT_OPEN;
l2cap_sock_init_timer(sk);
@@ -401,7 +401,7 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_
lock_sock(sk);
- if (sk->state != BT_OPEN) {
+ if (sk->sk_state != BT_OPEN) {
err = -EBADFD;
goto done;
}
@@ -414,7 +414,7 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_
bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
l2cap_pi(sk)->psm = la->l2_psm;
l2cap_pi(sk)->sport = la->l2_psm;
- sk->state = BT_BOUND;
+ sk->sk_state = BT_BOUND;
}
write_unlock_bh(&l2cap_sk_list.lock);
@@ -458,18 +458,18 @@ static int l2cap_do_connect(struct sock *sk)
l2cap_chan_add(conn, sk, NULL);
- sk->state = BT_CONNECT;
- l2cap_sock_set_timer(sk, sk->sndtimeo);
+ sk->sk_state = BT_CONNECT;
+ l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
if (hcon->state == BT_CONNECTED) {
- if (sk->type == SOCK_SEQPACKET) {
+ if (sk->sk_type == SOCK_SEQPACKET) {
struct l2cap_conn_req req;
req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
req.psm = l2cap_pi(sk)->psm;
l2cap_send_req(conn, L2CAP_CONN_REQ, sizeof(req), &req);
} else {
l2cap_sock_clear_timer(sk);
- sk->state = BT_CONNECTED;
+ sk->sk_state = BT_CONNECTED;
}
}
@@ -494,12 +494,12 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
goto done;
}
- if (sk->type == SOCK_SEQPACKET && !la->l2_psm) {
+ if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
err = -EINVAL;
goto done;
}
- switch(sk->state) {
+ switch(sk->sk_state) {
case BT_CONNECT:
case BT_CONNECT2:
case BT_CONFIG:
@@ -544,7 +544,7 @@ int l2cap_sock_listen(struct socket *sock, int backlog)
lock_sock(sk);
- if (sk->state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
+ if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
err = -EBADFD;
goto done;
}
@@ -554,9 +554,9 @@ int l2cap_sock_listen(struct socket *sock, int backlog)
goto done;
}
- sk->max_ack_backlog = backlog;
- sk->ack_backlog = 0;
- sk->state = BT_LISTEN;
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_ack_backlog = 0;
+ sk->sk_state = BT_LISTEN;
done:
release_sock(sk);
@@ -572,7 +572,7 @@ int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
lock_sock(sk);
- if (sk->state != BT_LISTEN) {
+ if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
goto done;
}
@@ -582,7 +582,7 @@ int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
- add_wait_queue_exclusive(sk->sleep, &wait);
+ add_wait_queue_exclusive(sk->sk_sleep, &wait);
while (!(nsk = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
@@ -594,7 +594,7 @@ int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
timeo = schedule_timeout(timeo);
lock_sock(sk);
- if (sk->state != BT_LISTEN) {
+ if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
@@ -605,7 +605,7 @@ int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
}
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
if (err)
goto done;
@@ -648,7 +648,7 @@ static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
BT_DBG("sk %p len %d", sk, len);
/* First fragment (with L2CAP header) */
- if (sk->type == SOCK_DGRAM)
+ if (sk->sk_type == SOCK_DGRAM)
hlen = L2CAP_HDR_SIZE + 2;
else
hlen = L2CAP_HDR_SIZE;
@@ -665,7 +665,7 @@ static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
lh->cid = __cpu_to_le16(l2cap_pi(sk)->dcid);
lh->len = __cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- if (sk->type == SOCK_DGRAM)
+ if (sk->sk_type == SOCK_DGRAM)
put_unaligned(l2cap_pi(sk)->psm, (u16 *) skb_put(skb, 2));
if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
@@ -713,7 +713,7 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
BT_DBG("sock %p, sk %p", sock, sk);
- if (sk->err)
+ if (sk->sk_err)
return sock_error(sk);
if (msg->msg_flags & MSG_OOB)
@@ -725,7 +725,7 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
lock_sock(sk);
- if (sk->state == BT_CONNECTED)
+ if (sk->sk_state == BT_CONNECTED)
err = l2cap_do_send(sk, msg, len);
else
err = -ENOTCONN;
@@ -804,7 +804,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
break;
case L2CAP_CONNINFO:
- if (sk->state != BT_CONNECTED) {
+ if (sk->sk_state != BT_CONNECTED) {
err = -ENOTCONN;
break;
}
@@ -837,7 +837,7 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
l2cap_sock_clear_timer(sk);
lock_sock(sk);
- sk->shutdown = SHUTDOWN_MASK;
+ sk->sk_shutdown = SHUTDOWN_MASK;
__l2cap_sock_close(sk, ECONNRESET);
release_sock(sk);
@@ -939,10 +939,10 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
l2cap_pi(sk)->conn = conn;
- if (sk->type == SOCK_SEQPACKET) {
+ if (sk->sk_type == SOCK_SEQPACKET) {
/* Alloc CID for connection-oriented socket */
l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
- } else if (sk->type == SOCK_DGRAM) {
+ } else if (sk->sk_type == SOCK_DGRAM) {
/* Connectionless socket */
l2cap_pi(sk)->scid = 0x0002;
l2cap_pi(sk)->dcid = 0x0002;
@@ -978,14 +978,14 @@ static void l2cap_chan_del(struct sock *sk, int err)
hci_conn_put(conn->hcon);
}
- sk->state = BT_CLOSED;
- sk->err = err;
- sk->zapped = 1;
+ sk->sk_state = BT_CLOSED;
+ sk->sk_err = err;
+ sk->sk_zapped = 1;
if (parent)
- parent->data_ready(parent, 0);
+ parent->sk_data_ready(parent, 0);
else
- sk->state_change(sk);
+ sk->sk_state_change(sk);
}
static void l2cap_conn_ready(struct l2cap_conn *conn)
@@ -1000,11 +1000,11 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
bh_lock_sock(sk);
- if (sk->type != SOCK_SEQPACKET) {
+ if (sk->sk_type != SOCK_SEQPACKET) {
l2cap_sock_clear_timer(sk);
- sk->state = BT_CONNECTED;
- sk->state_change(sk);
- } else if (sk->state == BT_CONNECT) {
+ sk->sk_state = BT_CONNECTED;
+ sk->sk_state_change(sk);
+ } else if (sk->sk_state == BT_CONNECT) {
struct l2cap_conn_req req;
req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
req.psm = l2cap_pi(sk)->psm;
@@ -1030,13 +1030,13 @@ static void l2cap_chan_ready(struct sock *sk)
/* Outgoing channel.
* Wake up socket sleeping on connect.
*/
- sk->state = BT_CONNECTED;
- sk->state_change(sk);
+ sk->sk_state = BT_CONNECTED;
+ sk->sk_state_change(sk);
} else {
/* Incoming channel.
* Wake up socket sleeping on accept.
*/
- parent->data_ready(parent, 0);
+ parent->sk_data_ready(parent, 0);
}
}
@@ -1051,7 +1051,7 @@ void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
read_lock(&l->lock);
for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
- if (sk->type != SOCK_RAW)
+ if (sk->sk_type != SOCK_RAW)
continue;
/* Don't send frame to the socket it came from */
@@ -1352,8 +1352,8 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
result = L2CAP_CR_NO_MEM;
/* Check for backlog size */
- if (parent->ack_backlog > parent->max_ack_backlog) {
- BT_DBG("backlog full %d", parent->ack_backlog);
+ if (parent->sk_ack_backlog > parent->sk_max_ack_backlog) {
+ BT_DBG("backlog full %d", parent->sk_ack_backlog);
goto response;
}
@@ -1366,7 +1366,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
/* Check if we already have channel with that dcid */
if (__l2cap_get_chan_by_dcid(list, scid)) {
write_unlock(&list->lock);
- sk->zapped = 1;
+ sk->sk_zapped = 1;
l2cap_sock_kill(sk);
goto response;
}
@@ -1382,12 +1382,12 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
__l2cap_chan_add(conn, sk, parent);
dcid = l2cap_pi(sk)->scid;
- l2cap_sock_set_timer(sk, sk->sndtimeo);
+ l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
/* Service level security */
result = L2CAP_CR_PEND;
status = L2CAP_CS_AUTHEN_PEND;
- sk->state = BT_CONNECT2;
+ sk->sk_state = BT_CONNECT2;
l2cap_pi(sk)->ident = cmd->ident;
if (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) {
@@ -1398,7 +1398,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
goto done;
}
- sk->state = BT_CONFIG;
+ sk->sk_state = BT_CONFIG;
result = status = 0;
done:
@@ -1435,7 +1435,7 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
switch (result) {
case L2CAP_CR_SUCCESS:
- sk->state = BT_CONFIG;
+ sk->sk_state = BT_CONFIG;
l2cap_pi(sk)->dcid = dcid;
l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
@@ -1488,7 +1488,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
- sk->state = BT_CONNECTED;
+ sk->sk_state = BT_CONNECTED;
l2cap_chan_ready(sk);
} else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
u8 req[64];
@@ -1522,7 +1522,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
/* They didn't like our options. Well... we do not negotiate.
* Close channel.
*/
- sk->state = BT_DISCONN;
+ sk->sk_state = BT_DISCONN;
l2cap_sock_set_timer(sk, HZ * 5);
req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
@@ -1538,7 +1538,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
- sk->state = BT_CONNECTED;
+ sk->sk_state = BT_CONNECTED;
l2cap_chan_ready(sk);
}
@@ -1566,7 +1566,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
l2cap_send_rsp(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
- sk->shutdown = SHUTDOWN_MASK;
+ sk->sk_shutdown = SHUTDOWN_MASK;
l2cap_chan_del(sk, ECONNRESET);
bh_unlock_sock(sk);
@@ -1690,7 +1690,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
BT_DBG("sk %p, len %d", sk, skb->len);
- if (sk->state != BT_CONNECTED)
+ if (sk->sk_state != BT_CONNECTED)
goto drop;
if (l2cap_pi(sk)->imtu < skb->len)
@@ -1722,7 +1722,7 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, u16 psm, struct
BT_DBG("sk %p, len %d", sk, skb->len);
- if (sk->state != BT_BOUND && sk->state != BT_CONNECTED)
+ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
goto drop;
if (l2cap_pi(sk)->imtu < skb->len)
@@ -1781,8 +1781,8 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
/* Find listening sockets and check their link_mode */
read_lock(&l2cap_sk_list.lock);
- for (sk = l2cap_sk_list.head; sk; sk = sk->next) {
- if (sk->state != BT_LISTEN)
+ for (sk = l2cap_sk_list.head; sk; sk = sk->sk_next) {
+ if (sk->sk_state != BT_LISTEN)
continue;
if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
@@ -1845,17 +1845,17 @@ static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
bh_lock_sock(sk);
- if (sk->state != BT_CONNECT2 ||
+ if (sk->sk_state != BT_CONNECT2 ||
(l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT)) {
bh_unlock_sock(sk);
continue;
}
if (!status) {
- sk->state = BT_CONFIG;
+ sk->sk_state = BT_CONFIG;
result = 0;
} else {
- sk->state = BT_DISCONN;
+ sk->sk_state = BT_DISCONN;
l2cap_sock_set_timer(sk, HZ/10);
result = L2CAP_CR_SEC_BLOCK;
}
@@ -1892,16 +1892,16 @@ static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
bh_lock_sock(sk);
- if (sk->state != BT_CONNECT2) {
+ if (sk->sk_state != BT_CONNECT2) {
bh_unlock_sock(sk);
continue;
}
if (!status) {
- sk->state = BT_CONFIG;
+ sk->sk_state = BT_CONFIG;
result = 0;
} else {
- sk->state = BT_DISCONN;
+ sk->sk_state = BT_DISCONN;
l2cap_sock_set_timer(sk, HZ/10);
result = L2CAP_CR_SEC_BLOCK;
}
@@ -2008,7 +2008,7 @@ static void *l2cap_seq_start(struct seq_file *seq, loff_t *pos)
read_lock_bh(&l2cap_sk_list.lock);
- for (sk = l2cap_sk_list.head; sk; sk = sk->next)
+ for (sk = l2cap_sk_list.head; sk; sk = sk->sk_next)
if (!l--)
return sk;
return NULL;
@@ -2018,7 +2018,7 @@ static void *l2cap_seq_next(struct seq_file *seq, void *e, loff_t *pos)
{
struct sock *sk = e;
(*pos)++;
- return sk->next;
+ return sk->sk_next;
}
static void l2cap_seq_stop(struct seq_file *seq, void *e)
@@ -2033,8 +2033,8 @@ static int l2cap_seq_show(struct seq_file *seq, void *e)
seq_printf(seq, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
- sk->state, pi->psm, pi->scid, pi->dcid, pi->imtu, pi->omtu,
- pi->link_mode);
+ sk->sk_state, pi->psm, pi->scid, pi->dcid, pi->imtu,
+ pi->omtu, pi->link_mode);
return 0;
}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index c701753c8bcf..4b7c3315ff89 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -144,7 +144,7 @@ static inline int __check_fcs(u8 *data, int type, u8 fcs)
/* ---- L2CAP callbacks ---- */
static void rfcomm_l2state_change(struct sock *sk)
{
- BT_DBG("%p state %d", sk, sk->state);
+ BT_DBG("%p state %d", sk, sk->sk_state);
rfcomm_schedule(RFCOMM_SCHED_STATE);
}
@@ -163,8 +163,8 @@ static int rfcomm_l2sock_create(struct socket **sock)
err = sock_create(PF_BLUETOOTH, SOCK_SEQPACKET, BTPROTO_L2CAP, sock);
if (!err) {
struct sock *sk = (*sock)->sk;
- sk->data_ready = rfcomm_l2data_ready;
- sk->state_change = rfcomm_l2state_change;
+ sk->sk_data_ready = rfcomm_l2data_ready;
+ sk->sk_state_change = rfcomm_l2state_change;
}
return err;
}
@@ -1545,19 +1545,19 @@ static inline void rfcomm_process_rx(struct rfcomm_session *s)
struct sock *sk = sock->sk;
struct sk_buff *skb;
- BT_DBG("session %p state %ld qlen %d", s, s->state, skb_queue_len(&sk->receive_queue));
+ BT_DBG("session %p state %ld qlen %d", s, s->state, skb_queue_len(&sk->sk_receive_queue));
/* Get data directly from socket receive queue without copying it. */
- while ((skb = skb_dequeue(&sk->receive_queue))) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
rfcomm_recv_frame(s, skb);
}
- if (sk->state == BT_CLOSED) {
+ if (sk->sk_state == BT_CLOSED) {
if (!s->initiator)
rfcomm_session_put(s);
- rfcomm_session_close(s, sk->err);
+ rfcomm_session_close(s, sk->sk_err);
}
}
@@ -1587,8 +1587,8 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s)
}
/* Set our callbacks */
- nsock->sk->data_ready = rfcomm_l2data_ready;
- nsock->sk->state_change = rfcomm_l2state_change;
+ nsock->sk->sk_data_ready = rfcomm_l2data_ready;
+ nsock->sk->sk_state_change = rfcomm_l2state_change;
s = rfcomm_session_add(nsock, BT_OPEN);
if (s)
@@ -1603,7 +1603,7 @@ static inline void rfcomm_check_connection(struct rfcomm_session *s)
BT_DBG("%p state %ld", s, s->state);
- switch(sk->state) {
+ switch(sk->sk_state) {
case BT_CONNECTED:
s->state = BT_CONNECT;
@@ -1616,7 +1616,7 @@ static inline void rfcomm_check_connection(struct rfcomm_session *s)
case BT_CLOSED:
s->state = BT_CLOSED;
- rfcomm_session_close(s, sk->err);
+ rfcomm_session_close(s, sk->sk_err);
break;
}
}
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 1bc13b7a57e3..fba45baff519 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -78,11 +78,11 @@ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb)
if (!sk)
return;
- atomic_add(skb->len, &sk->rmem_alloc);
- skb_queue_tail(&sk->receive_queue, skb);
- sk->data_ready(sk, skb->len);
+ atomic_add(skb->len, &sk->sk_rmem_alloc);
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
- if (atomic_read(&sk->rmem_alloc) >= sk->rcvbuf)
+ if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
rfcomm_dlc_throttle(d);
}
@@ -97,16 +97,16 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
bh_lock_sock(sk);
if (err)
- sk->err = err;
- sk->state = d->state;
+ sk->sk_err = err;
+ sk->sk_state = d->state;
parent = bt_sk(sk)->parent;
if (!parent) {
if (d->state == BT_CONNECTED)
rfcomm_session_getaddr(d->session, &bt_sk(sk)->src, NULL);
- sk->state_change(sk);
+ sk->sk_state_change(sk);
} else
- parent->data_ready(parent, 0);
+ parent->sk_data_ready(parent, 0);
bh_unlock_sock(sk);
}
@@ -116,7 +116,7 @@ static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src)
{
struct sock *sk;
- for (sk = rfcomm_sk_list.head; sk; sk = sk->next) {
+ for (sk = rfcomm_sk_list.head; sk; sk = sk->sk_next) {
if (rfcomm_pi(sk)->channel == channel &&
!bacmp(&bt_sk(sk)->src, src))
break;
@@ -132,8 +132,8 @@ static struct sock *__rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t
{
struct sock *sk, *sk1 = NULL;
- for (sk = rfcomm_sk_list.head; sk; sk = sk->next) {
- if (state && sk->state != state)
+ for (sk = rfcomm_sk_list.head; sk; sk = sk->sk_next) {
+ if (state && sk->sk_state != state)
continue;
if (rfcomm_pi(sk)->channel == channel) {
@@ -167,8 +167,8 @@ static void rfcomm_sock_destruct(struct sock *sk)
BT_DBG("sk %p dlc %p", sk, d);
- skb_queue_purge(&sk->receive_queue);
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
rfcomm_dlc_lock(d);
rfcomm_pi(sk)->dlc = NULL;
@@ -180,8 +180,8 @@ static void rfcomm_sock_destruct(struct sock *sk)
rfcomm_dlc_put(d);
- if (sk->protinfo)
- kfree(sk->protinfo);
+ if (sk->sk_protinfo)
+ kfree(sk->sk_protinfo);
}
static void rfcomm_sock_cleanup_listen(struct sock *parent)
@@ -194,8 +194,8 @@ static void rfcomm_sock_cleanup_listen(struct sock *parent)
while ((sk = bt_accept_dequeue(parent, NULL)))
rfcomm_sock_close(sk);
- parent->state = BT_CLOSED;
- parent->zapped = 1;
+ parent->sk_state = BT_CLOSED;
+ parent->sk_zapped = 1;
}
/* Kill socket (only if zapped and orphan)
@@ -203,10 +203,10 @@ static void rfcomm_sock_cleanup_listen(struct sock *parent)
*/
static void rfcomm_sock_kill(struct sock *sk)
{
- if (!sk->zapped || sk->socket)
+ if (!sk->sk_zapped || sk->sk_socket)
return;
- BT_DBG("sk %p state %d refcnt %d", sk, sk->state, atomic_read(&sk->refcnt));
+ BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
/* Kill poor orphan */
bt_sock_unlink(&rfcomm_sk_list, sk);
@@ -223,9 +223,9 @@ static void rfcomm_sock_close(struct sock *sk)
lock_sock(sk);
- BT_DBG("sk %p state %d socket %p", sk, sk->state, sk->socket);
+ BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
- switch (sk->state) {
+ switch (sk->sk_state) {
case BT_LISTEN:
rfcomm_sock_cleanup_listen(sk);
break;
@@ -237,7 +237,7 @@ static void rfcomm_sock_close(struct sock *sk)
rfcomm_dlc_close(d, 0);
default:
- sk->zapped = 1;
+ sk->sk_zapped = 1;
break;
};
@@ -251,7 +251,7 @@ static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
BT_DBG("sk %p", sk);
if (parent)
- sk->type = parent->type;
+ sk->sk_type = parent->sk_type;
}
static struct sock *rfcomm_sock_alloc(struct socket *sock, int proto, int prio)
@@ -276,14 +276,14 @@ static struct sock *rfcomm_sock_alloc(struct socket *sock, int proto, int prio)
rfcomm_pi(sk)->dlc = d;
d->owner = sk;
- sk->destruct = rfcomm_sock_destruct;
- sk->sndtimeo = RFCOMM_CONN_TIMEOUT;
+ sk->sk_destruct = rfcomm_sock_destruct;
+ sk->sk_sndtimeo = RFCOMM_CONN_TIMEOUT;
- sk->sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
- sk->rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
+ sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
+ sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
- sk->protocol = proto;
- sk->state = BT_OPEN;
+ sk->sk_protocol = proto;
+ sk->sk_state = BT_OPEN;
bt_sock_link(&rfcomm_sk_list, sk);
@@ -324,7 +324,7 @@ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr
lock_sock(sk);
- if (sk->state != BT_OPEN) {
+ if (sk->sk_state != BT_OPEN) {
err = -EBADFD;
goto done;
}
@@ -337,7 +337,7 @@ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr
/* Save source address */
bacpy(&bt_sk(sk)->src, &sa->rc_bdaddr);
rfcomm_pi(sk)->channel = sa->rc_channel;
- sk->state = BT_BOUND;
+ sk->sk_state = BT_BOUND;
}
write_unlock_bh(&rfcomm_sk_list.lock);
@@ -359,15 +359,15 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a
if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_rc))
return -EINVAL;
- if (sk->state != BT_OPEN && sk->state != BT_BOUND)
+ if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
return -EBADFD;
- if (sk->type != SOCK_STREAM)
+ if (sk->sk_type != SOCK_STREAM)
return -EINVAL;
lock_sock(sk);
- sk->state = BT_CONNECT;
+ sk->sk_state = BT_CONNECT;
bacpy(&bt_sk(sk)->dst, &sa->rc_bdaddr);
rfcomm_pi(sk)->channel = sa->rc_channel;
@@ -388,14 +388,14 @@ int rfcomm_sock_listen(struct socket *sock, int backlog)
lock_sock(sk);
- if (sk->state != BT_BOUND) {
+ if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
- sk->max_ack_backlog = backlog;
- sk->ack_backlog = 0;
- sk->state = BT_LISTEN;
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_ack_backlog = 0;
+ sk->sk_state = BT_LISTEN;
done:
release_sock(sk);
@@ -411,7 +411,7 @@ int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags)
lock_sock(sk);
- if (sk->state != BT_LISTEN) {
+ if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
goto done;
}
@@ -421,7 +421,7 @@ int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags)
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
- add_wait_queue_exclusive(sk->sleep, &wait);
+ add_wait_queue_exclusive(sk->sk_sleep, &wait);
while (!(nsk = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
@@ -433,7 +433,7 @@ int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags)
timeo = schedule_timeout(timeo);
lock_sock(sk);
- if (sk->state != BT_LISTEN) {
+ if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
@@ -444,7 +444,7 @@ int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags)
}
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
if (err)
goto done;
@@ -488,7 +488,7 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
- if (sk->shutdown & SEND_SHUTDOWN)
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
return -EPIPE;
BT_DBG("sock %p, sk %p", sock, sk);
@@ -530,23 +530,23 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo)
{
DECLARE_WAITQUEUE(wait, current);
- add_wait_queue(sk->sleep, &wait);
+ add_wait_queue(sk->sk_sleep, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- if (skb_queue_len(&sk->receive_queue) || sk->err || (sk->shutdown & RCV_SHUTDOWN) ||
+ if (skb_queue_len(&sk->sk_receive_queue) || sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN) ||
signal_pending(current) || !timeo)
break;
- set_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
+ clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
}
__set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
return timeo;
}
@@ -557,7 +557,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
int target, err = 0, copied = 0;
long timeo;
- if (sk->state != BT_CONNECTED)
+ if (sk->sk_state != BT_CONNECTED)
return -EINVAL;
if (flags & MSG_OOB)
@@ -576,14 +576,14 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct sk_buff *skb;
int chunk;
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (!skb) {
if (copied >= target)
break;
if ((err = sock_error(sk)) != 0)
break;
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
err = -EAGAIN;
@@ -601,7 +601,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
chunk = min_t(unsigned int, skb->len, size);
if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
if (!copied)
copied = -EFAULT;
break;
@@ -610,24 +610,24 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
size -= chunk;
if (!(flags & MSG_PEEK)) {
- atomic_sub(chunk, &sk->rmem_alloc);
+ atomic_sub(chunk, &sk->sk_rmem_alloc);
skb_pull(skb, chunk);
if (skb->len) {
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
kfree_skb(skb);
} else {
/* put message back and return */
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
} while (size);
out:
- if (atomic_read(&sk->rmem_alloc) <= (sk->rcvbuf >> 2))
+ if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2))
rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc);
release_sock(sk);
@@ -643,8 +643,8 @@ static int rfcomm_sock_shutdown(struct socket *sock, int how)
if (!sk) return 0;
lock_sock(sk);
- sk->shutdown = SHUTDOWN_MASK;
- if (sk->state == BT_CONNECTED)
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ if (sk->sk_state == BT_CONNECTED)
rfcomm_dlc_close(rfcomm_pi(sk)->dlc, 0);
release_sock(sk);
@@ -744,8 +744,8 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
return 0;
/* Check for backlog size */
- if (parent->ack_backlog > parent->max_ack_backlog) {
- BT_DBG("backlog full %d", parent->ack_backlog);
+ if (parent->sk_ack_backlog > parent->sk_max_ack_backlog) {
+ BT_DBG("backlog full %d", parent->sk_ack_backlog);
goto done;
}
@@ -758,7 +758,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
bacpy(&bt_sk(sk)->dst, &dst);
rfcomm_pi(sk)->channel = channel;
- sk->state = BT_CONFIG;
+ sk->sk_state = BT_CONFIG;
bt_accept_enqueue(parent, sk);
/* Accept connection and return socket DLC */
@@ -779,7 +779,7 @@ static void *rfcomm_seq_start(struct seq_file *seq, loff_t *pos)
read_lock_bh(&rfcomm_sk_list.lock);
- for (sk = rfcomm_sk_list.head; sk; sk = sk->next)
+ for (sk = rfcomm_sk_list.head; sk; sk = sk->sk_next)
if (!l--)
return sk;
return NULL;
@@ -789,7 +789,7 @@ static void *rfcomm_seq_next(struct seq_file *seq, void *e, loff_t *pos)
{
struct sock *sk = e;
(*pos)++;
- return sk->next;
+ return sk->sk_next;
}
static void rfcomm_seq_stop(struct seq_file *seq, void *e)
@@ -802,7 +802,7 @@ static int rfcomm_seq_show(struct seq_file *seq, void *e)
struct sock *sk = e;
seq_printf(seq, "%s %s %d %d\n",
batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
- sk->state, rfcomm_pi(sk)->channel);
+ sk->sk_state, rfcomm_pi(sk)->channel);
return 0;
}
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 99e947050ea9..c6939f465545 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -294,7 +294,7 @@ static int rfcomm_create_dev(struct sock *sk, unsigned long arg)
if (req.flags & (1 << RFCOMM_REUSE_DLC)) {
/* Socket must be connected */
- if (sk->state != BT_CONNECTED)
+ if (sk->sk_state != BT_CONNECTED)
return -EBADFD;
dlc = rfcomm_pi(sk)->dlc;
@@ -314,7 +314,7 @@ static int rfcomm_create_dev(struct sock *sk, unsigned long arg)
if (req.flags & (1 << RFCOMM_REUSE_DLC)) {
/* DLC is now used by device.
* Socket must be disconnected */
- sk->state = BT_CLOSED;
+ sk->sk_state = BT_CLOSED;
}
return id;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index daf42f95c42c..cc37206d329b 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -81,11 +81,11 @@ static void sco_sock_timeout(unsigned long arg)
{
struct sock *sk = (struct sock *) arg;
- BT_DBG("sock %p state %d", sk, sk->state);
+ BT_DBG("sock %p state %d", sk, sk->sk_state);
bh_lock_sock(sk);
- sk->err = ETIMEDOUT;
- sk->state_change(sk);
+ sk->sk_err = ETIMEDOUT;
+ sk->sk_state_change(sk);
bh_unlock_sock(sk);
sco_sock_kill(sk);
@@ -94,25 +94,25 @@ static void sco_sock_timeout(unsigned long arg)
static void sco_sock_set_timer(struct sock *sk, long timeout)
{
- BT_DBG("sock %p state %d timeout %ld", sk, sk->state, timeout);
+ BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout);
- if (!mod_timer(&sk->timer, jiffies + timeout))
+ if (!mod_timer(&sk->sk_timer, jiffies + timeout))
sock_hold(sk);
}
static void sco_sock_clear_timer(struct sock *sk)
{
- BT_DBG("sock %p state %d", sk, sk->state);
+ BT_DBG("sock %p state %d", sk, sk->sk_state);
- if (timer_pending(&sk->timer) && del_timer(&sk->timer))
+ if (timer_pending(&sk->sk_timer) && del_timer(&sk->sk_timer))
__sock_put(sk);
}
static void sco_sock_init_timer(struct sock *sk)
{
- init_timer(&sk->timer);
- sk->timer.function = sco_sock_timeout;
- sk->timer.data = (unsigned long)sk;
+ init_timer(&sk->sk_timer);
+ sk->sk_timer.function = sco_sock_timeout;
+ sk->sk_timer.data = (unsigned long)sk;
}
/* ---- SCO connections ---- */
@@ -232,10 +232,10 @@ int sco_connect(struct sock *sk)
if (hcon->state == BT_CONNECTED) {
sco_sock_clear_timer(sk);
- sk->state = BT_CONNECTED;
+ sk->sk_state = BT_CONNECTED;
} else {
- sk->state = BT_CONNECT;
- sco_sock_set_timer(sk, sk->sndtimeo);
+ sk->sk_state = BT_CONNECT;
+ sco_sock_set_timer(sk, sk->sk_sndtimeo);
}
done:
hci_dev_unlock_bh(hdev);
@@ -283,7 +283,7 @@ static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
BT_DBG("sk %p len %d", sk, skb->len);
- if (sk->state != BT_CONNECTED)
+ if (sk->sk_state != BT_CONNECTED)
goto drop;
if (!sock_queue_rcv_skb(sk, skb))
@@ -299,7 +299,7 @@ static struct sock *__sco_get_sock_by_addr(bdaddr_t *ba)
{
struct sock *sk;
- for (sk = sco_sk_list.head; sk; sk = sk->next) {
+ for (sk = sco_sk_list.head; sk; sk = sk->sk_next) {
if (!bacmp(&bt_sk(sk)->src, ba))
break;
}
@@ -316,8 +316,8 @@ static struct sock *sco_get_sock_listen(bdaddr_t *src)
read_lock(&sco_sk_list.lock);
- for (sk = sco_sk_list.head; sk; sk = sk->next) {
- if (sk->state != BT_LISTEN)
+ for (sk = sco_sk_list.head; sk; sk = sk->sk_next) {
+ if (sk->sk_state != BT_LISTEN)
continue;
/* Exact match. */
@@ -338,11 +338,11 @@ static void sco_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
- skb_queue_purge(&sk->receive_queue);
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
- if (sk->protinfo)
- kfree(sk->protinfo);
+ if (sk->sk_protinfo)
+ kfree(sk->sk_protinfo);
}
static void sco_sock_cleanup_listen(struct sock *parent)
@@ -355,8 +355,8 @@ static void sco_sock_cleanup_listen(struct sock *parent)
while ((sk = bt_accept_dequeue(parent, NULL)))
sco_sock_close(sk);
- parent->state = BT_CLOSED;
- parent->zapped = 1;
+ parent->sk_state = BT_CLOSED;
+ parent->sk_zapped = 1;
}
/* Kill socket (only if zapped and orphan)
@@ -364,10 +364,10 @@ static void sco_sock_cleanup_listen(struct sock *parent)
*/
static void sco_sock_kill(struct sock *sk)
{
- if (!sk->zapped || sk->socket)
+ if (!sk->sk_zapped || sk->sk_socket)
return;
- BT_DBG("sk %p state %d", sk, sk->state);
+ BT_DBG("sk %p state %d", sk, sk->sk_state);
/* Kill poor orphan */
bt_sock_unlink(&sco_sk_list, sk);
@@ -388,9 +388,9 @@ static void sco_sock_close(struct sock *sk)
conn = sco_pi(sk)->conn;
- BT_DBG("sk %p state %d conn %p socket %p", sk, sk->state, conn, sk->socket);
+ BT_DBG("sk %p state %d conn %p socket %p", sk, sk->sk_state, conn, sk->sk_socket);
- switch (sk->state) {
+ switch (sk->sk_state) {
case BT_LISTEN:
sco_sock_cleanup_listen(sk);
break;
@@ -403,7 +403,7 @@ static void sco_sock_close(struct sock *sk)
break;
default:
- sk->zapped = 1;
+ sk->sk_zapped = 1;
break;
};
@@ -417,7 +417,7 @@ static void sco_sock_init(struct sock *sk, struct sock *parent)
BT_DBG("sk %p", sk);
if (parent)
- sk->type = parent->type;
+ sk->sk_type = parent->sk_type;
}
static struct sock *sco_sock_alloc(struct socket *sock, int proto, int prio)
@@ -430,9 +430,9 @@ static struct sock *sco_sock_alloc(struct socket *sock, int proto, int prio)
sk_set_owner(sk, THIS_MODULE);
- sk->destruct = sco_sock_destruct;
- sk->sndtimeo = SCO_CONN_TIMEOUT;
- sk->state = BT_OPEN;
+ sk->sk_destruct = sco_sock_destruct;
+ sk->sk_sndtimeo = SCO_CONN_TIMEOUT;
+ sk->sk_state = BT_OPEN;
sco_sock_init_timer(sk);
@@ -474,7 +474,7 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
lock_sock(sk);
- if (sk->state != BT_OPEN) {
+ if (sk->sk_state != BT_OPEN) {
err = -EBADFD;
goto done;
}
@@ -486,7 +486,7 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
} else {
/* Save source address */
bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
- sk->state = BT_BOUND;
+ sk->sk_state = BT_BOUND;
}
write_unlock_bh(&sco_sk_list.lock);
@@ -508,10 +508,10 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_sco))
return -EINVAL;
- if (sk->state != BT_OPEN && sk->state != BT_BOUND)
+ if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
return -EBADFD;
- if (sk->type != SOCK_SEQPACKET)
+ if (sk->sk_type != SOCK_SEQPACKET)
return -EINVAL;
lock_sock(sk);
@@ -538,14 +538,14 @@ int sco_sock_listen(struct socket *sock, int backlog)
lock_sock(sk);
- if (sk->state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
+ if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
err = -EBADFD;
goto done;
}
- sk->max_ack_backlog = backlog;
- sk->ack_backlog = 0;
- sk->state = BT_LISTEN;
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_ack_backlog = 0;
+ sk->sk_state = BT_LISTEN;
done:
release_sock(sk);
@@ -561,7 +561,7 @@ int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags)
lock_sock(sk);
- if (sk->state != BT_LISTEN) {
+ if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
goto done;
}
@@ -571,7 +571,7 @@ int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags)
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
- add_wait_queue_exclusive(sk->sleep, &wait);
+ add_wait_queue_exclusive(sk->sk_sleep, &wait);
while (!(ch = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
@@ -583,7 +583,7 @@ int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags)
timeo = schedule_timeout(timeo);
lock_sock(sk);
- if (sk->state != BT_LISTEN) {
+ if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
@@ -594,7 +594,7 @@ int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags)
}
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
if (err)
goto done;
@@ -633,7 +633,7 @@ static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
BT_DBG("sock %p, sk %p", sock, sk);
- if (sk->err)
+ if (sk->sk_err)
return sock_error(sk);
if (msg->msg_flags & MSG_OOB)
@@ -641,7 +641,7 @@ static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
lock_sock(sk);
- if (sk->state == BT_CONNECTED)
+ if (sk->sk_state == BT_CONNECTED)
err = sco_send_frame(sk, msg, len);
else
err = -ENOTCONN;
@@ -685,7 +685,7 @@ int sco_sock_getsockopt(struct socket *sock, int level, int optname, char *optva
switch (optname) {
case SCO_OPTIONS:
- if (sk->state != BT_CONNECTED) {
+ if (sk->sk_state != BT_CONNECTED) {
err = -ENOTCONN;
break;
}
@@ -701,7 +701,7 @@ int sco_sock_getsockopt(struct socket *sock, int level, int optname, char *optva
break;
case SCO_CONNINFO:
- if (sk->state != BT_CONNECTED) {
+ if (sk->sk_state != BT_CONNECTED) {
err = -ENOTCONN;
break;
}
@@ -767,11 +767,11 @@ static void sco_chan_del(struct sock *sk, int err)
hci_conn_put(conn->hcon);
}
- sk->state = BT_CLOSED;
- sk->err = err;
- sk->state_change(sk);
+ sk->sk_state = BT_CLOSED;
+ sk->sk_err = err;
+ sk->sk_state_change(sk);
- sk->zapped = 1;
+ sk->sk_zapped = 1;
}
static void sco_conn_ready(struct sco_conn *conn)
@@ -785,8 +785,8 @@ static void sco_conn_ready(struct sco_conn *conn)
if ((sk = conn->sk)) {
sco_sock_clear_timer(sk);
bh_lock_sock(sk);
- sk->state = BT_CONNECTED;
- sk->state_change(sk);
+ sk->sk_state = BT_CONNECTED;
+ sk->sk_state_change(sk);
bh_unlock_sock(sk);
} else {
parent = sco_get_sock_listen(conn->src);
@@ -809,10 +809,10 @@ static void sco_conn_ready(struct sco_conn *conn)
hci_conn_hold(conn->hcon);
__sco_chan_add(conn, sk, parent);
- sk->state = BT_CONNECTED;
+ sk->sk_state = BT_CONNECTED;
/* Wake up parent */
- parent->data_ready(parent, 1);
+ parent->sk_data_ready(parent, 1);
bh_unlock_sock(parent);
}
@@ -888,7 +888,7 @@ static void *sco_seq_start(struct seq_file *seq, loff_t *pos)
read_lock_bh(&sco_sk_list.lock);
- for (sk = sco_sk_list.head; sk; sk = sk->next)
+ for (sk = sco_sk_list.head; sk; sk = sk->sk_next)
if (!l--)
return sk;
return NULL;
@@ -898,7 +898,7 @@ static void *sco_seq_next(struct seq_file *seq, void *e, loff_t *pos)
{
struct sock *sk = e;
(*pos)++;
- return sk->next;
+ return sk->sk_next;
}
static void sco_seq_stop(struct seq_file *seq, void *e)
@@ -910,7 +910,7 @@ static int sco_seq_show(struct seq_file *seq, void *e)
{
struct sock *sk = e;
seq_printf(seq, "%s %s %d\n",
- batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), sk->state);
+ batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), sk->sk_state);
return 0;
}
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 7474e087fa12..16eba2c60986 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -59,7 +59,7 @@
*/
static inline int connection_based(struct sock *sk)
{
- return sk->type == SOCK_SEQPACKET || sk->type == SOCK_STREAM;
+ return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
}
/*
@@ -70,26 +70,26 @@ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
int error;
DEFINE_WAIT(wait);
- prepare_to_wait_exclusive(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
/* Socket errors? */
error = sock_error(sk);
if (error)
goto out_err;
- if (!skb_queue_empty(&sk->receive_queue))
+ if (!skb_queue_empty(&sk->sk_receive_queue))
goto out;
/* Socket shut down? */
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
goto out_noerr;
/* Sequenced packets can come disconnected.
* If so we report the problem
*/
error = -ENOTCONN;
- if (connection_based(sk) && !(sk->state == TCP_ESTABLISHED ||
- sk->state == TCP_LISTEN))
+ if (connection_based(sk) &&
+ !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
goto out_err;
/* handle signals */
@@ -99,7 +99,7 @@ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
error = 0;
*timeo_p = schedule_timeout(*timeo_p);
out:
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
return error;
interrupted:
error = sock_intr_errno(*timeo_p);
@@ -146,7 +146,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
{
struct sk_buff *skb;
long timeo;
- /* Caller is allowed not to check sk->err before skb_recv_datagram() */
+ /*
+ * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
+ */
int error = sock_error(sk);
if (error)
@@ -164,14 +166,15 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
if (flags & MSG_PEEK) {
unsigned long cpu_flags;
- spin_lock_irqsave(&sk->receive_queue.lock, cpu_flags);
- skb = skb_peek(&sk->receive_queue);
+ spin_lock_irqsave(&sk->sk_receive_queue.lock,
+ cpu_flags);
+ skb = skb_peek(&sk->sk_receive_queue);
if (skb)
atomic_inc(&skb->users);
- spin_unlock_irqrestore(&sk->receive_queue.lock,
+ spin_unlock_irqrestore(&sk->sk_receive_queue.lock,
cpu_flags);
} else
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
return skb;
@@ -451,26 +454,26 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk;
unsigned int mask;
- poll_wait(file, sk->sleep, wait);
+ poll_wait(file, sk->sk_sleep, wait);
mask = 0;
/* exceptional events? */
- if (sk->err || !skb_queue_empty(&sk->error_queue))
+ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
mask |= POLLERR;
- if (sk->shutdown == SHUTDOWN_MASK)
+ if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
/* readable? */
- if (!skb_queue_empty(&sk->receive_queue) ||
- (sk->shutdown & RCV_SHUTDOWN))
+ if (!skb_queue_empty(&sk->sk_receive_queue) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN))
mask |= POLLIN | POLLRDNORM;
/* Connection-based need to check for termination and startup */
if (connection_based(sk)) {
- if (sk->state == TCP_CLOSE)
+ if (sk->sk_state == TCP_CLOSE)
mask |= POLLHUP;
/* connection hasn't started yet? */
- if (sk->state == TCP_SYN_SENT)
+ if (sk->sk_state == TCP_SYN_SENT)
return mask;
}
@@ -478,7 +481,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
if (sock_writeable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
- set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
return mask;
}
diff --git a/net/core/filter.c b/net/core/filter.c
index dfa0279e30a7..3bb1e4669ba5 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -414,10 +414,10 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
if (!err) {
struct sk_filter *old_fp;
- spin_lock_bh(&sk->lock.slock);
- old_fp = sk->filter;
- sk->filter = fp;
- spin_unlock_bh(&sk->lock.slock);
+ spin_lock_bh(&sk->sk_lock.slock);
+ old_fp = sk->sk_filter;
+ sk->sk_filter = fp;
+ spin_unlock_bh(&sk->sk_lock.slock);
fp = old_fp;
}
diff --git a/net/core/netfilter.c b/net/core/netfilter.c
index ab56416b7b3a..1c08a5bfae90 100644
--- a/net/core/netfilter.c
+++ b/net/core/netfilter.c
@@ -633,7 +633,7 @@ int ip_route_me_harder(struct sk_buff **pskb)
.fwmark = (*pskb)->nfmark
#endif
} },
- .oif = (*pskb)->sk ? (*pskb)->sk->bound_dev_if : 0,
+ .oif = (*pskb)->sk ? (*pskb)->sk->sk_bound_dev_if : 0,
};
struct net_device *dev_src = NULL;
int err;
@@ -648,7 +648,7 @@ int ip_route_me_harder(struct sk_buff **pskb)
if ((err=ip_route_output_key(&rt, &fl)) != 0) {
printk("route_me_harder: ip_route_output_key(dst=%u.%u.%u.%u, src=%u.%u.%u.%u, oif=%d, tos=0x%x, fwmark=0x%lx) error %d\n",
NIPQUAD(iph->daddr), NIPQUAD(iph->saddr),
- (*pskb)->sk ? (*pskb)->sk->bound_dev_if : 0,
+ (*pskb)->sk ? (*pskb)->sk->sk_bound_dev_if : 0,
RT_TOS(iph->tos)|RTO_CONN,
#ifdef CONFIG_IP_ROUTE_FWMARK
(*pskb)->nfmark,
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index cc98f82f7c92..32e64ed866a3 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -490,10 +490,11 @@ static void rtnetlink_rcv(struct sock *sk, int len)
if (rtnl_shlock_nowait())
return;
- while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (rtnetlink_rcv_skb(skb)) {
if (skb->len)
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue,
+ skb);
else
kfree_skb(skb);
break;
@@ -504,7 +505,7 @@ static void rtnetlink_rcv(struct sock *sk, int len)
up(&rtnl_sem);
netdev_run_todo();
- } while (rtnl && rtnl->receive_queue.qlen);
+ } while (rtnl && rtnl->sk_receive_queue.qlen);
}
static struct rtnetlink_link link_rtnetlink_table[RTM_MAX-RTM_BASE+1] =
diff --git a/net/core/sock.c b/net/core/sock.c
index 98bcc95a15e4..afa73193620c 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -202,17 +202,17 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
ret = -EACCES;
}
else
- sk->debug=valbool;
+ sk->sk_debug = valbool;
break;
case SO_REUSEADDR:
- sk->reuse = valbool;
+ sk->sk_reuse = valbool;
break;
case SO_TYPE:
case SO_ERROR:
ret = -ENOPROTOOPT;
break;
case SO_DONTROUTE:
- sk->localroute=valbool;
+ sk->sk_localroute = valbool;
break;
case SO_BROADCAST:
sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
@@ -226,17 +226,17 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
if (val > sysctl_wmem_max)
val = sysctl_wmem_max;
- sk->userlocks |= SOCK_SNDBUF_LOCK;
+ sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
if ((val * 2) < SOCK_MIN_SNDBUF)
- sk->sndbuf = SOCK_MIN_SNDBUF;
+ sk->sk_sndbuf = SOCK_MIN_SNDBUF;
else
- sk->sndbuf = (val * 2);
+ sk->sk_sndbuf = val * 2;
/*
* Wake up sending tasks if we
* upped the value.
*/
- sk->write_space(sk);
+ sk->sk_write_space(sk);
break;
case SO_RCVBUF:
@@ -248,20 +248,18 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
if (val > sysctl_rmem_max)
val = sysctl_rmem_max;
- sk->userlocks |= SOCK_RCVBUF_LOCK;
+ sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
/* FIXME: is this lower bound the right one? */
if ((val * 2) < SOCK_MIN_RCVBUF)
- sk->rcvbuf = SOCK_MIN_RCVBUF;
+ sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
else
- sk->rcvbuf = (val * 2);
+ sk->sk_rcvbuf = val * 2;
break;
case SO_KEEPALIVE:
#ifdef CONFIG_INET
- if (sk->protocol == IPPROTO_TCP)
- {
+ if (sk->sk_protocol == IPPROTO_TCP)
tcp_set_keepalive(sk, valbool);
- }
#endif
sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
break;
@@ -271,12 +269,12 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
break;
case SO_NO_CHECK:
- sk->no_check = valbool;
+ sk->sk_no_check = valbool;
break;
case SO_PRIORITY:
if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
- sk->priority = val;
+ sk->sk_priority = val;
else
ret = -EPERM;
break;
@@ -295,10 +293,10 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
else {
#if (BITS_PER_LONG == 32)
if (ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
- sk->lingertime=MAX_SCHEDULE_TIMEOUT;
+ sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
else
#endif
- sk->lingertime=ling.l_linger*HZ;
+ sk->sk_lingertime = ling.l_linger * HZ;
sock_set_flag(sk, SOCK_LINGER);
}
break;
@@ -312,21 +310,21 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
break;
case SO_TIMESTAMP:
- sk->rcvtstamp = valbool;
+ sk->sk_rcvtstamp = valbool;
break;
case SO_RCVLOWAT:
if (val < 0)
val = INT_MAX;
- sk->rcvlowat = val ? : 1;
+ sk->sk_rcvlowat = val ? : 1;
break;
case SO_RCVTIMEO:
- ret = sock_set_timeout(&sk->rcvtimeo, optval, optlen);
+ ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
break;
case SO_SNDTIMEO:
- ret = sock_set_timeout(&sk->sndtimeo, optval, optlen);
+ ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
break;
#ifdef CONFIG_NETDEVICES
@@ -347,7 +345,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
*/
if (!valbool) {
- sk->bound_dev_if = 0;
+ sk->sk_bound_dev_if = 0;
} else {
if (optlen > IFNAMSIZ)
optlen = IFNAMSIZ;
@@ -360,14 +358,14 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
sk_dst_reset(sk);
if (devname[0] == '\0') {
- sk->bound_dev_if = 0;
+ sk->sk_bound_dev_if = 0;
} else {
struct net_device *dev = dev_get_by_name(devname);
if (!dev) {
ret = -ENODEV;
break;
}
- sk->bound_dev_if = dev->ifindex;
+ sk->sk_bound_dev_if = dev->ifindex;
dev_put(dev);
}
}
@@ -390,15 +388,15 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
break;
case SO_DETACH_FILTER:
- spin_lock_bh(&sk->lock.slock);
- filter = sk->filter;
+ spin_lock_bh(&sk->sk_lock.slock);
+ filter = sk->sk_filter;
if (filter) {
- sk->filter = NULL;
- spin_unlock_bh(&sk->lock.slock);
+ sk->sk_filter = NULL;
+ spin_unlock_bh(&sk->sk_lock.slock);
sk_filter_release(sk, filter);
break;
}
- spin_unlock_bh(&sk->lock.slock);
+ spin_unlock_bh(&sk->sk_lock.slock);
ret = -ENONET;
break;
@@ -435,11 +433,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
switch(optname)
{
case SO_DEBUG:
- v.val = sk->debug;
+ v.val = sk->sk_debug;
break;
case SO_DONTROUTE:
- v.val = sk->localroute;
+ v.val = sk->sk_localroute;
break;
case SO_BROADCAST:
@@ -447,15 +445,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_SNDBUF:
- v.val=sk->sndbuf;
+ v.val = sk->sk_sndbuf;
break;
case SO_RCVBUF:
- v.val =sk->rcvbuf;
+ v.val = sk->sk_rcvbuf;
break;
case SO_REUSEADDR:
- v.val = sk->reuse;
+ v.val = sk->sk_reuse;
break;
case SO_KEEPALIVE:
@@ -463,13 +461,13 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_TYPE:
- v.val = sk->type;
+ v.val = sk->sk_type;
break;
case SO_ERROR:
v.val = -sock_error(sk);
if(v.val==0)
- v.val=xchg(&sk->err_soft,0);
+ v.val = xchg(&sk->sk_err_soft, 0);
break;
case SO_OOBINLINE:
@@ -477,17 +475,17 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_NO_CHECK:
- v.val = sk->no_check;
+ v.val = sk->sk_no_check;
break;
case SO_PRIORITY:
- v.val = sk->priority;
+ v.val = sk->sk_priority;
break;
case SO_LINGER:
lv = sizeof(v.ling);
v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
- v.ling.l_linger = sk->lingertime / HZ;
+ v.ling.l_linger = sk->sk_lingertime / HZ;
break;
case SO_BSDCOMPAT:
@@ -495,33 +493,33 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_TIMESTAMP:
- v.val = sk->rcvtstamp;
+ v.val = sk->sk_rcvtstamp;
break;
case SO_RCVTIMEO:
lv=sizeof(struct timeval);
- if (sk->rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
+ if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
v.tm.tv_sec = 0;
v.tm.tv_usec = 0;
} else {
- v.tm.tv_sec = sk->rcvtimeo/HZ;
- v.tm.tv_usec = ((sk->rcvtimeo%HZ)*1000)/HZ;
+ v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
+ v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000) / HZ;
}
break;
case SO_SNDTIMEO:
lv=sizeof(struct timeval);
- if (sk->sndtimeo == MAX_SCHEDULE_TIMEOUT) {
+ if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
v.tm.tv_sec = 0;
v.tm.tv_usec = 0;
} else {
- v.tm.tv_sec = sk->sndtimeo/HZ;
- v.tm.tv_usec = ((sk->sndtimeo%HZ)*1000)/HZ;
+ v.tm.tv_sec = sk->sk_sndtimeo / HZ;
+ v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000) / HZ;
}
break;
case SO_RCVLOWAT:
- v.val = sk->rcvlowat;
+ v.val = sk->sk_rcvlowat;
break;
case SO_SNDLOWAT:
@@ -533,9 +531,9 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_PEERCRED:
- if (len > sizeof(sk->peercred))
- len = sizeof(sk->peercred);
- if (copy_to_user(optval, &sk->peercred, len))
+ if (len > sizeof(sk->sk_peercred))
+ len = sizeof(sk->sk_peercred);
+ if (copy_to_user(optval, &sk->sk_peercred, len))
return -EFAULT;
goto lenout;
@@ -556,7 +554,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
* the UNIX standard wants it for whatever reason... -DaveM
*/
case SO_ACCEPTCONN:
- v.val = (sk->state == TCP_LISTEN);
+ v.val = sk->sk_state == TCP_LISTEN;
break;
default:
@@ -597,10 +595,10 @@ struct sock *sk_alloc(int family, int priority, int zero_it, kmem_cache_t *slab)
if (zero_it) {
memset(sk, 0,
zero_it == 1 ? sizeof(struct sock) : zero_it);
- sk->family = family;
+ sk->sk_family = family;
sock_lock_init(sk);
}
- sk->slab = slab;
+ sk->sk_slab = slab;
}
return sk;
}
@@ -608,21 +606,22 @@ struct sock *sk_alloc(int family, int priority, int zero_it, kmem_cache_t *slab)
void sk_free(struct sock *sk)
{
struct sk_filter *filter;
- struct module *owner = sk->owner;
+ struct module *owner = sk->sk_owner;
- if (sk->destruct)
- sk->destruct(sk);
+ if (sk->sk_destruct)
+ sk->sk_destruct(sk);
- filter = sk->filter;
+ filter = sk->sk_filter;
if (filter) {
sk_filter_release(sk, filter);
- sk->filter = NULL;
+ sk->sk_filter = NULL;
}
- if (atomic_read(&sk->omem_alloc))
- printk(KERN_DEBUG "sk_free: optmem leakage (%d bytes) detected.\n", atomic_read(&sk->omem_alloc));
+ if (atomic_read(&sk->sk_omem_alloc))
+ printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
+ __FUNCTION__, atomic_read(&sk->sk_omem_alloc));
- kmem_cache_free(sk->slab, sk);
+ kmem_cache_free(sk->sk_slab, sk);
module_put(owner);
}
@@ -657,9 +656,9 @@ void sock_wfree(struct sk_buff *skb)
struct sock *sk = skb->sk;
/* In case it might be waiting for more memory. */
- atomic_sub(skb->truesize, &sk->wmem_alloc);
- if (!sk->use_write_queue)
- sk->write_space(sk);
+ atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
+ if (!sk->sk_use_write_queue)
+ sk->sk_write_space(sk);
sock_put(sk);
}
@@ -670,7 +669,7 @@ void sock_rfree(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
- atomic_sub(skb->truesize, &sk->rmem_alloc);
+ atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
}
/*
@@ -678,7 +677,7 @@ void sock_rfree(struct sk_buff *skb)
*/
struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int priority)
{
- if (force || atomic_read(&sk->wmem_alloc) < sk->sndbuf) {
+ if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
struct sk_buff * skb = alloc_skb(size, priority);
if (skb) {
skb_set_owner_w(skb, sk);
@@ -693,7 +692,7 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int
*/
struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority)
{
- if (force || atomic_read(&sk->rmem_alloc) < sk->rcvbuf) {
+ if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
struct sk_buff *skb = alloc_skb(size, priority);
if (skb) {
skb_set_owner_r(skb, sk);
@@ -709,16 +708,16 @@ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int
void *sock_kmalloc(struct sock *sk, int size, int priority)
{
if ((unsigned)size <= sysctl_optmem_max &&
- atomic_read(&sk->omem_alloc)+size < sysctl_optmem_max) {
+ atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
void *mem;
/* First do the add, to avoid the race if kmalloc
* might sleep.
*/
- atomic_add(size, &sk->omem_alloc);
+ atomic_add(size, &sk->sk_omem_alloc);
mem = kmalloc(size, priority);
if (mem)
return mem;
- atomic_sub(size, &sk->omem_alloc);
+ atomic_sub(size, &sk->sk_omem_alloc);
}
return NULL;
}
@@ -729,7 +728,7 @@ void *sock_kmalloc(struct sock *sk, int size, int priority)
void sock_kfree_s(struct sock *sk, void *mem, int size)
{
kfree(mem);
- atomic_sub(size, &sk->omem_alloc);
+ atomic_sub(size, &sk->sk_omem_alloc);
}
/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
@@ -739,23 +738,23 @@ static long sock_wait_for_wmem(struct sock * sk, long timeo)
{
DEFINE_WAIT(wait);
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+ clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
for (;;) {
if (!timeo)
break;
if (signal_pending(current))
break;
- set_bit(SOCK_NOSPACE, &sk->socket->flags);
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
- if (atomic_read(&sk->wmem_alloc) < sk->sndbuf)
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
break;
- if (sk->shutdown & SEND_SHUTDOWN)
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
break;
- if (sk->err)
+ if (sk->sk_err)
break;
timeo = schedule_timeout(timeo);
}
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
return timeo;
}
@@ -772,7 +771,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
long timeo;
int err;
- gfp_mask = sk->allocation;
+ gfp_mask = sk->sk_allocation;
if (gfp_mask & __GFP_WAIT)
gfp_mask |= __GFP_REPEAT;
@@ -783,11 +782,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
goto failure;
err = -EPIPE;
- if (sk->shutdown & SEND_SHUTDOWN)
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
goto failure;
- if (atomic_read(&sk->wmem_alloc) < sk->sndbuf) {
- skb = alloc_skb(header_len, sk->allocation);
+ if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
+ skb = alloc_skb(header_len, sk->sk_allocation);
if (skb) {
int npages;
int i;
@@ -803,7 +802,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
struct page *page;
skb_frag_t *frag;
- page = alloc_pages(sk->allocation, 0);
+ page = alloc_pages(sk->sk_allocation, 0);
if (!page) {
err = -ENOBUFS;
skb_shinfo(skb)->nr_frags = i;
@@ -826,8 +825,8 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
err = -ENOBUFS;
goto failure;
}
- set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
- set_bit(SOCK_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
err = -EAGAIN;
if (!timeo)
goto failure;
@@ -857,35 +856,35 @@ void __lock_sock(struct sock *sk)
DEFINE_WAIT(wait);
for(;;) {
- prepare_to_wait_exclusive(&sk->lock.wq, &wait,
+ prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
TASK_UNINTERRUPTIBLE);
- spin_unlock_bh(&sk->lock.slock);
+ spin_unlock_bh(&sk->sk_lock.slock);
schedule();
- spin_lock_bh(&sk->lock.slock);
+ spin_lock_bh(&sk->sk_lock.slock);
if(!sock_owned_by_user(sk))
break;
}
- finish_wait(&sk->lock.wq, &wait);
+ finish_wait(&sk->sk_lock.wq, &wait);
}
void __release_sock(struct sock *sk)
{
- struct sk_buff *skb = sk->backlog.head;
+ struct sk_buff *skb = sk->sk_backlog.head;
do {
- sk->backlog.head = sk->backlog.tail = NULL;
+ sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
bh_unlock_sock(sk);
do {
struct sk_buff *next = skb->next;
skb->next = NULL;
- sk->backlog_rcv(sk, skb);
+ sk->sk_backlog_rcv(sk, skb);
skb = next;
} while (skb != NULL);
bh_lock_sock(sk);
- } while((skb = sk->backlog.head) != NULL);
+ } while((skb = sk->sk_backlog.head) != NULL);
}
/*
@@ -1014,101 +1013,101 @@ ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, siz
void sock_def_wakeup(struct sock *sk)
{
- read_lock(&sk->callback_lock);
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible_all(sk->sleep);
- read_unlock(&sk->callback_lock);
+ read_lock(&sk->sk_callback_lock);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_all(sk->sk_sleep);
+ read_unlock(&sk->sk_callback_lock);
}
void sock_def_error_report(struct sock *sk)
{
- read_lock(&sk->callback_lock);
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
+ read_lock(&sk->sk_callback_lock);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
sk_wake_async(sk,0,POLL_ERR);
- read_unlock(&sk->callback_lock);
+ read_unlock(&sk->sk_callback_lock);
}
void sock_def_readable(struct sock *sk, int len)
{
- read_lock(&sk->callback_lock);
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
+ read_lock(&sk->sk_callback_lock);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
sk_wake_async(sk,1,POLL_IN);
- read_unlock(&sk->callback_lock);
+ read_unlock(&sk->sk_callback_lock);
}
void sock_def_write_space(struct sock *sk)
{
- read_lock(&sk->callback_lock);
+ read_lock(&sk->sk_callback_lock);
/* Do not wake up a writer until he can make "significant"
* progress. --DaveM
*/
- if((atomic_read(&sk->wmem_alloc) << 1) <= sk->sndbuf) {
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
+ if((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
/* Should agree with poll, otherwise some programs break */
if (sock_writeable(sk))
sk_wake_async(sk, 2, POLL_OUT);
}
- read_unlock(&sk->callback_lock);
+ read_unlock(&sk->sk_callback_lock);
}
void sock_def_destruct(struct sock *sk)
{
- if (sk->protinfo)
- kfree(sk->protinfo);
+ if (sk->sk_protinfo)
+ kfree(sk->sk_protinfo);
}
void sk_send_sigurg(struct sock *sk)
{
- if (sk->socket && sk->socket->file)
- if (send_sigurg(&sk->socket->file->f_owner))
+ if (sk->sk_socket && sk->sk_socket->file)
+ if (send_sigurg(&sk->sk_socket->file->f_owner))
sk_wake_async(sk, 3, POLL_PRI);
}
void sock_init_data(struct socket *sock, struct sock *sk)
{
- skb_queue_head_init(&sk->receive_queue);
- skb_queue_head_init(&sk->write_queue);
- skb_queue_head_init(&sk->error_queue);
+ skb_queue_head_init(&sk->sk_receive_queue);
+ skb_queue_head_init(&sk->sk_write_queue);
+ skb_queue_head_init(&sk->sk_error_queue);
- init_timer(&sk->timer);
+ init_timer(&sk->sk_timer);
- sk->allocation = GFP_KERNEL;
- sk->rcvbuf = sysctl_rmem_default;
- sk->sndbuf = sysctl_wmem_default;
- sk->state = TCP_CLOSE;
- sk->zapped = 1;
- sk->socket = sock;
+ sk->sk_allocation = GFP_KERNEL;
+ sk->sk_rcvbuf = sysctl_rmem_default;
+ sk->sk_sndbuf = sysctl_wmem_default;
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_zapped = 1;
+ sk->sk_socket = sock;
if(sock)
{
- sk->type = sock->type;
- sk->sleep = &sock->wait;
+ sk->sk_type = sock->type;
+ sk->sk_sleep = &sock->wait;
sock->sk = sk;
} else
- sk->sleep = NULL;
-
- sk->dst_lock = RW_LOCK_UNLOCKED;
- sk->callback_lock = RW_LOCK_UNLOCKED;
-
- sk->state_change = sock_def_wakeup;
- sk->data_ready = sock_def_readable;
- sk->write_space = sock_def_write_space;
- sk->error_report = sock_def_error_report;
- sk->destruct = sock_def_destruct;
-
- sk->peercred.pid = 0;
- sk->peercred.uid = -1;
- sk->peercred.gid = -1;
- sk->rcvlowat = 1;
- sk->rcvtimeo = MAX_SCHEDULE_TIMEOUT;
- sk->sndtimeo = MAX_SCHEDULE_TIMEOUT;
- sk->owner = NULL;
-
- atomic_set(&sk->refcnt, 1);
+ sk->sk_sleep = NULL;
+
+ sk->sk_dst_lock = RW_LOCK_UNLOCKED;
+ sk->sk_callback_lock = RW_LOCK_UNLOCKED;
+
+ sk->sk_state_change = sock_def_wakeup;
+ sk->sk_data_ready = sock_def_readable;
+ sk->sk_write_space = sock_def_write_space;
+ sk->sk_error_report = sock_def_error_report;
+ sk->sk_destruct = sock_def_destruct;
+
+ sk->sk_peercred.pid = 0;
+ sk->sk_peercred.uid = -1;
+ sk->sk_peercred.gid = -1;
+ sk->sk_rcvlowat = 1;
+ sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+ sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
+ sk->sk_owner = NULL;
+
+ atomic_set(&sk->sk_refcnt, 1);
}
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 358daa516cf8..a58319be47bc 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -180,7 +180,7 @@ static int check_port(unsigned short port)
struct dn_scp *scp = DN_SK(sk);
if (scp->addrloc == port)
return -1;
- sk = sk->next;
+ sk = sk->sk_next;
}
return 0;
}
@@ -212,9 +212,9 @@ static int dn_hash_sock(struct sock *sk)
struct sock **skp;
int rv = -EUSERS;
- if (sk->next)
+ if (sk->sk_next)
BUG();
- if (sk->pprev)
+ if (sk->sk_pprev)
BUG();
write_lock_bh(&dn_hash_lock);
@@ -226,8 +226,8 @@ static int dn_hash_sock(struct sock *sk)
if ((skp = dn_find_list(sk)) == NULL)
goto out;
- sk->next = *skp;
- sk->pprev = skp;
+ sk->sk_next = *skp;
+ sk->sk_pprev = skp;
*skp = sk;
rv = 0;
out:
@@ -237,36 +237,36 @@ out:
static void dn_unhash_sock(struct sock *sk)
{
- struct sock **skp = sk->pprev;
+ struct sock **skp = sk->sk_pprev;
if (skp == NULL)
return;
write_lock(&dn_hash_lock);
while(*skp != sk)
- skp = &((*skp)->next);
- *skp = sk->next;
+ skp = &((*skp)->sk_next);
+ *skp = sk->sk_next;
write_unlock(&dn_hash_lock);
- sk->next = NULL;
- sk->pprev = NULL;
+ sk->sk_next = NULL;
+ sk->sk_pprev = NULL;
}
static void dn_unhash_sock_bh(struct sock *sk)
{
- struct sock **skp = sk->pprev;
+ struct sock **skp = sk->sk_pprev;
if (skp == NULL)
return;
write_lock_bh(&dn_hash_lock);
while(*skp != sk)
- skp = &((*skp)->next);
- *skp = sk->next;
+ skp = &((*skp)->sk_next);
+ *skp = sk->sk_next;
write_unlock_bh(&dn_hash_lock);
- sk->next = NULL;
- sk->pprev = NULL;
+ sk->sk_next = NULL;
+ sk->sk_pprev = NULL;
}
struct sock **listen_hash(struct sockaddr_dn *addr)
@@ -292,7 +292,7 @@ struct sock **listen_hash(struct sockaddr_dn *addr)
*/
static void dn_rehash_sock(struct sock *sk)
{
- struct sock **skp = sk->pprev;
+ struct sock **skp = sk->sk_pprev;
struct dn_scp *scp = DN_SK(sk);
if (scp->addr.sdn_flags & SDF_WILD)
@@ -300,14 +300,14 @@ static void dn_rehash_sock(struct sock *sk)
write_lock_bh(&dn_hash_lock);
while(*skp != sk)
- skp = &((*skp)->next);
- *skp = sk->next;
+ skp = &((*skp)->sk_next);
+ *skp = sk->sk_next;
DN_SK(sk)->addrloc = 0;
skp = listen_hash(&DN_SK(sk)->addr);
- sk->next = *skp;
- sk->pprev = skp;
+ sk->sk_next = *skp;
+ sk->sk_pprev = skp;
*skp = sk;
write_unlock_bh(&dn_hash_lock);
}
@@ -405,9 +405,9 @@ struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr)
struct sock *sk;
read_lock(&dn_hash_lock);
- for(sk = *skp; sk != NULL; sk = sk->next) {
+ for(sk = *skp; sk; sk = sk->sk_next) {
struct dn_scp *scp = DN_SK(sk);
- if (sk->state != TCP_LISTEN)
+ if (sk->sk_state != TCP_LISTEN)
continue;
if (scp->addr.sdn_objnum) {
if (scp->addr.sdn_objnum != addr->sdn_objnum)
@@ -425,7 +425,7 @@ struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr)
return sk;
}
- if (dn_wild_sk && (dn_wild_sk->state == TCP_LISTEN))
+ if (dn_wild_sk && (dn_wild_sk->sk_state == TCP_LISTEN))
sock_hold((sk = dn_wild_sk));
read_unlock(&dn_hash_lock);
@@ -440,7 +440,7 @@ struct sock *dn_find_by_skb(struct sk_buff *skb)
read_lock(&dn_hash_lock);
sk = dn_sk_hash[cb->dst_port & DN_SK_HASH_MASK];
- for (; sk != NULL; sk = sk->next) {
+ for (; sk; sk = sk->sk_next) {
scp = DN_SK(sk);
if (cb->src != dn_saddr2dn(&scp->peer))
continue;
@@ -469,7 +469,7 @@ static void dn_destruct(struct sock *sk)
skb_queue_purge(&scp->other_xmit_queue);
skb_queue_purge(&scp->other_receive_queue);
- dst_release(xchg(&sk->dst_cache, NULL));
+ dst_release(xchg(&sk->sk_dst_cache, NULL));
}
struct sock *dn_alloc_sock(struct socket *sock, int gfp)
@@ -488,12 +488,12 @@ struct sock *dn_alloc_sock(struct socket *sock, int gfp)
sock_init_data(sock, sk);
sk_set_owner(sk, THIS_MODULE);
- sk->backlog_rcv = dn_nsp_backlog_rcv;
- sk->destruct = dn_destruct;
- sk->no_check = 1;
- sk->family = PF_DECnet;
- sk->protocol = 0;
- sk->allocation = gfp;
+ sk->sk_backlog_rcv = dn_nsp_backlog_rcv;
+ sk->sk_destruct = dn_destruct;
+ sk->sk_no_check = 1;
+ sk->sk_family = PF_DECnet;
+ sk->sk_protocol = 0;
+ sk->sk_allocation = gfp;
/* Initialization of DECnet Session Control Port */
scp->state = DN_O; /* Open */
@@ -600,7 +600,7 @@ int dn_destroy_timer(struct sock *sk)
scp->persist = (HZ * decnet_time_wait);
- if (sk->socket)
+ if (sk->sk_socket)
return 0;
dn_stop_fast_timer(sk); /* unlikely, but possible that this is runninng */
@@ -619,16 +619,17 @@ static void dn_destroy_sock(struct sock *sk)
scp->nsp_rxtshift = 0; /* reset back off */
- if (sk->socket) {
- if (sk->socket->state != SS_UNCONNECTED)
- sk->socket->state = SS_DISCONNECTING;
+ if (sk->sk_socket) {
+ if (sk->sk_socket->state != SS_UNCONNECTED)
+ sk->sk_socket->state = SS_DISCONNECTING;
}
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
switch(scp->state) {
case DN_DN:
- dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, sk->allocation);
+ dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
+ sk->sk_allocation);
scp->persist_fxn = dn_destroy_timer;
scp->persist = dn_nsp_persist(sk);
break;
@@ -640,7 +641,7 @@ static void dn_destroy_sock(struct sock *sk)
case DN_DI:
case DN_DR:
disc_reject:
- dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->allocation);
+ dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation);
case DN_NC:
case DN_NR:
case DN_RJ:
@@ -697,7 +698,7 @@ static int dn_create(struct socket *sock, int protocol)
if ((sk = dn_alloc_sock(sock, GFP_KERNEL)) == NULL)
return -ENOBUFS;
- sk->protocol = protocol;
+ sk->sk_protocol = protocol;
return 0;
}
@@ -777,13 +778,13 @@ static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
rv = -EINVAL;
lock_sock(sk);
- if (sk->zapped != 0) {
+ if (sk->sk_zapped) {
memcpy(&scp->addr, saddr, addr_len);
- sk->zapped = 0;
+ sk->sk_zapped = 0;
rv = dn_hash_sock(sk);
if (rv) {
- sk->zapped = 1;
+ sk->sk_zapped = 1;
}
}
release_sock(sk);
@@ -798,7 +799,7 @@ static int dn_auto_bind(struct socket *sock)
struct dn_scp *scp = DN_SK(sk);
int rv;
- sk->zapped = 0;
+ sk->sk_zapped = 0;
scp->addr.sdn_flags = 0;
scp->addr.sdn_objnum = 0;
@@ -823,7 +824,7 @@ static int dn_auto_bind(struct socket *sock)
if (rv == 0) {
rv = dn_hash_sock(sk);
if (rv) {
- sk->zapped = 1;
+ sk->sk_zapped = 1;
}
}
@@ -843,7 +844,7 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, int allocation)
scp->segsize_loc = dst_path_metric(__sk_dst_get(sk), RTAX_ADVMSS);
dn_send_conn_conf(sk, allocation);
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
for(;;) {
release_sock(sk);
if (scp->state == DN_CC)
@@ -861,13 +862,13 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, int allocation)
err = -EAGAIN;
if (!*timeo)
break;
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
if (err == 0) {
- sk->socket->state = SS_CONNECTED;
+ sk->sk_socket->state = SS_CONNECTED;
} else if (scp->state != DN_CC) {
- sk->socket->state = SS_UNCONNECTED;
+ sk->sk_socket->state = SS_UNCONNECTED;
}
return err;
}
@@ -884,7 +885,7 @@ static int dn_wait_run(struct sock *sk, long *timeo)
if (!*timeo)
return -EALREADY;
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
for(;;) {
release_sock(sk);
if (scp->state == DN_CI || scp->state == DN_CC)
@@ -902,21 +903,21 @@ static int dn_wait_run(struct sock *sk, long *timeo)
err = -ETIMEDOUT;
if (!*timeo)
break;
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
out:
if (err == 0) {
- sk->socket->state = SS_CONNECTED;
+ sk->sk_socket->state = SS_CONNECTED;
} else if (scp->state != DN_CI && scp->state != DN_CC) {
- sk->socket->state = SS_UNCONNECTED;
+ sk->sk_socket->state = SS_UNCONNECTED;
}
return err;
}
static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
{
- struct socket *sock = sk->socket;
+ struct socket *sock = sk->sk_socket;
struct dn_scp *scp = DN_SK(sk);
int err = -EISCONN;
struct flowi fl;
@@ -949,8 +950,8 @@ static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen,
if (addr->sdn_flags & SDF_WILD)
goto out;
- if (sk->zapped) {
- err = dn_auto_bind(sk->socket);
+ if (sk->sk_zapped) {
+ err = dn_auto_bind(sk->sk_socket);
if (err)
goto out;
}
@@ -959,17 +960,17 @@ static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen,
err = -EHOSTUNREACH;
memset(&fl, 0, sizeof(fl));
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
fl.fld_dst = dn_saddr2dn(&scp->peer);
fl.fld_src = dn_saddr2dn(&scp->addr);
dn_sk_ports_copy(&fl, scp);
fl.proto = DNPROTO_NSP;
- if (dn_route_output_sock(&sk->dst_cache, &fl, sk, flags) < 0)
+ if (dn_route_output_sock(&sk->sk_dst_cache, &fl, sk, flags) < 0)
goto out;
- sk->route_caps = sk->dst_cache->dev->features;
+ sk->sk_route_caps = sk->sk_dst_cache->dev->features;
sock->state = SS_CONNECTING;
scp->state = DN_CI;
- scp->segsize_loc = dst_path_metric(sk->dst_cache, RTAX_ADVMSS);
+ scp->segsize_loc = dst_path_metric(sk->sk_dst_cache, RTAX_ADVMSS);
dn_nsp_send_conninit(sk, NSP_CI);
err = -EINPROGRESS;
@@ -1002,7 +1003,7 @@ static inline int dn_check_state(struct sock *sk, struct sockaddr_dn *addr, int
case DN_RUN:
return 0;
case DN_CR:
- return dn_confirm_accept(sk, timeo, sk->allocation);
+ return dn_confirm_accept(sk, timeo, sk->sk_allocation);
case DN_CI:
case DN_CC:
return dn_wait_run(sk, timeo);
@@ -1050,19 +1051,19 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
struct sk_buff *skb = NULL;
int err = 0;
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
for(;;) {
release_sock(sk);
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (skb == NULL) {
*timeo = schedule_timeout(*timeo);
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
}
lock_sock(sk);
if (skb != NULL)
break;
err = -EINVAL;
- if (sk->state != TCP_LISTEN)
+ if (sk->sk_state != TCP_LISTEN)
break;
err = sock_intr_errno(*timeo);
if (signal_pending(current))
@@ -1070,9 +1071,9 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
err = -EAGAIN;
if (!*timeo)
break;
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
return skb == NULL ? ERR_PTR(err) : skb;
}
@@ -1089,12 +1090,12 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
lock_sock(sk);
- if (sk->state != TCP_LISTEN || DN_SK(sk)->state != DN_O) {
+ if (sk->sk_state != TCP_LISTEN || DN_SK(sk)->state != DN_O) {
release_sock(sk);
return -EINVAL;
}
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (skb == NULL) {
skb = dn_wait_for_connect(sk, &timeo);
if (IS_ERR(skb)) {
@@ -1104,8 +1105,8 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
}
cb = DN_SKB_CB(skb);
- sk->ack_backlog--;
- newsk = dn_alloc_sock(newsock, sk->allocation);
+ sk->sk_ack_backlog--;
+ newsk = dn_alloc_sock(newsock, sk->sk_allocation);
if (newsk == NULL) {
release_sock(sk);
kfree_skb(skb);
@@ -1113,7 +1114,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
}
release_sock(sk);
- dst_release(xchg(&newsk->dst_cache, skb->dst));
+ dst_release(xchg(&newsk->sk_dst_cache, skb->dst));
skb->dst = NULL;
DN_SK(newsk)->state = DN_CR;
@@ -1129,7 +1130,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
if ((DN_SK(newsk)->services_rem & NSP_FC_MASK) == NSP_FC_NONE)
DN_SK(newsk)->max_window = decnet_no_fc_max_cwnd;
- newsk->state = TCP_LISTEN;
+ newsk->sk_state = TCP_LISTEN;
memcpy(&(DN_SK(newsk)->addr), &(DN_SK(sk)->addr), sizeof(struct sockaddr_dn));
/*
@@ -1168,15 +1169,16 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
lock_sock(newsk);
err = dn_hash_sock(newsk);
if (err == 0) {
- newsk->zapped = 0;
+ newsk->sk_zapped = 0;
dn_send_conn_ack(newsk);
/*
- * Here we use sk->allocation since although the conn conf is
+ * Here we use sk->sk_allocation since although the conn conf is
* for the newsk, the context is the old socket.
*/
if (DN_SK(newsk)->accept_mode == ACC_IMMED)
- err = dn_confirm_accept(newsk, &timeo, sk->allocation);
+ err = dn_confirm_accept(newsk, &timeo,
+ sk->sk_allocation);
}
release_sock(newsk);
return err;
@@ -1246,7 +1248,7 @@ static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
return val;
case TIOCOUTQ:
- amount = sk->sndbuf - atomic_read(&sk->wmem_alloc);
+ amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
if (amount < 0)
amount = 0;
err = put_user(amount, (int *)arg);
@@ -1257,9 +1259,10 @@ static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
if ((skb = skb_peek(&scp->other_receive_queue)) != NULL) {
amount = skb->len;
} else {
- struct sk_buff *skb = sk->receive_queue.next;
+ struct sk_buff *skb = sk->sk_receive_queue.next;
for(;;) {
- if (skb == (struct sk_buff *)&sk->receive_queue)
+ if (skb ==
+ (struct sk_buff *)&sk->sk_receive_queue)
break;
amount += skb->len;
skb = skb->next;
@@ -1284,15 +1287,15 @@ static int dn_listen(struct socket *sock, int backlog)
lock_sock(sk);
- if (sk->zapped)
+ if (sk->sk_zapped)
goto out;
- if ((DN_SK(sk)->state != DN_O) || (sk->state == TCP_LISTEN))
+ if ((DN_SK(sk)->state != DN_O) || (sk->sk_state == TCP_LISTEN))
goto out;
- sk->max_ack_backlog = backlog;
- sk->ack_backlog = 0;
- sk->state = TCP_LISTEN;
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_ack_backlog = 0;
+ sk->sk_state = TCP_LISTEN;
err = 0;
dn_rehash_sock(sk);
@@ -1325,7 +1328,7 @@ static int dn_shutdown(struct socket *sock, int how)
if (how != SHUTDOWN_MASK)
goto out;
- sk->shutdown = how;
+ sk->sk_shutdown = how;
dn_destroy_sock(sk);
err = 0;
@@ -1438,7 +1441,7 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char *opt
if (scp->state != DN_CR)
return -EINVAL;
timeo = sock_rcvtimeo(sk, 0);
- err = dn_confirm_accept(sk, &timeo, sk->allocation);
+ err = dn_confirm_accept(sk, &timeo, sk->sk_allocation);
return err;
case DSO_CONREJECT:
@@ -1447,8 +1450,8 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char *opt
return -EINVAL;
scp->state = DN_DR;
- sk->shutdown = SHUTDOWN_MASK;
- dn_nsp_send_disc(sk, 0x38, 0, sk->allocation);
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
break;
default:
@@ -1662,7 +1665,7 @@ static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int
if (cb->nsp_flags & 0x40) {
/* SOCK_SEQPACKET reads to EOM */
- if (sk->type == SOCK_SEQPACKET)
+ if (sk->sk_type == SOCK_SEQPACKET)
return 1;
/* so does SOCK_STREAM unless WAITALL is specified */
if (!(flags & MSG_WAITALL))
@@ -1685,7 +1688,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
{
struct sock *sk = sock->sk;
struct dn_scp *scp = DN_SK(sk);
- struct sk_buff_head *queue = &sk->receive_queue;
+ struct sk_buff_head *queue = &sk->sk_receive_queue;
int target = size > 1 ? 1 : 0;
int copied = 0;
int rv = 0;
@@ -1696,7 +1699,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
lock_sock(sk);
- if (sk->zapped) {
+ if (sk->sk_zapped) {
rv = -EADDRNOTAVAIL;
goto out;
}
@@ -1705,7 +1708,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
if (rv)
goto out;
- if (sk->shutdown & RCV_SHUTDOWN) {
+ if (sk->sk_shutdown & RCV_SHUTDOWN) {
if (!(flags & MSG_NOSIGNAL))
send_sig(SIGPIPE, current, 0);
rv = -EPIPE;
@@ -1728,7 +1731,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
* See if there is data ready to read, sleep if there isn't
*/
for(;;) {
- if (sk->err)
+ if (sk->sk_err)
goto out;
if (skb_queue_len(&scp->other_receive_queue)) {
@@ -1800,7 +1803,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
}
if (eor) {
- if (sk->type == SOCK_SEQPACKET)
+ if (sk->sk_type == SOCK_SEQPACKET)
break;
if (!(flags & MSG_WAITALL))
break;
@@ -1816,12 +1819,12 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
rv = copied;
- if (eor && (sk->type == SOCK_SEQPACKET))
+ if (eor && (sk->sk_type == SOCK_SEQPACKET))
msg->msg_flags |= MSG_EOR;
out:
if (rv == 0)
- rv = (flags & MSG_PEEK) ? -sk->err : sock_error(sk);
+ rv = (flags & MSG_PEEK) ? -sk->sk_err : sock_error(sk);
if ((rv >= 0) && msg->msg_name) {
memcpy(msg->msg_name, &scp->peer, sizeof(struct sockaddr_dn));
@@ -1950,13 +1953,13 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
if (err)
goto out_err;
- if (sk->shutdown & SEND_SHUTDOWN) {
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
err = -EPIPE;
goto out_err;
}
- if ((flags & MSG_TRYHARD) && sk->dst_cache)
- dst_negative_advice(&sk->dst_cache);
+ if ((flags & MSG_TRYHARD) && sk->sk_dst_cache)
+ dst_negative_advice(&sk->sk_dst_cache);
mss = scp->segsize_rem;
fctype = scp->services_rem & NSP_FC_MASK;
@@ -2053,7 +2056,7 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
}
sent += len;
- dn_nsp_queue_xmit(sk, skb, sk->allocation, flags & MSG_OOB);
+ dn_nsp_queue_xmit(sk, skb, sk->sk_allocation, flags & MSG_OOB);
skb = NULL;
scp->persist = dn_nsp_persist(sk);
@@ -2132,7 +2135,7 @@ static struct sock *dn_socket_get_next(struct seq_file *seq,
{
struct dn_iter_state *state = seq->private;
- n = n->next;
+ n = n->sk_next;
try_again:
if (n)
goto out;
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index 55775e928405..d96e596c5c6b 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -120,7 +120,7 @@ static void dn_ack(struct sock *sk, struct sk_buff *skb, unsigned short ack)
}
if (wakeup && !sock_flag(sk, SOCK_DEAD))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
}
/*
@@ -324,14 +324,14 @@ err_out:
static void dn_nsp_conn_init(struct sock *sk, struct sk_buff *skb)
{
- if (sk->ack_backlog >= sk->max_ack_backlog) {
+ if (sk->sk_ack_backlog >= sk->sk_max_ack_backlog) {
kfree_skb(skb);
return;
}
- sk->ack_backlog++;
- skb_queue_tail(&sk->receive_queue, skb);
- sk->state_change(sk);
+ sk->sk_ack_backlog++;
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_state_change(sk);
}
static void dn_nsp_conn_conf(struct sock *sk, struct sk_buff *skb)
@@ -351,7 +351,7 @@ static void dn_nsp_conn_conf(struct sock *sk, struct sk_buff *skb)
if ((scp->state == DN_CI) || (scp->state == DN_CD)) {
scp->persist = 0;
scp->addrrem = cb->src_port;
- sk->state = TCP_ESTABLISHED;
+ sk->sk_state = TCP_ESTABLISHED;
scp->state = DN_RUN;
scp->services_rem = cb->services;
scp->info_rem = cb->info;
@@ -369,7 +369,7 @@ static void dn_nsp_conn_conf(struct sock *sk, struct sk_buff *skb)
}
dn_nsp_send_link(sk, DN_NOCHANGE, 0);
if (!sock_flag(sk, SOCK_DEAD))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
}
out:
@@ -413,7 +413,7 @@ static void dn_nsp_disc_init(struct sock *sk, struct sk_buff *skb)
}
scp->addrrem = cb->src_port;
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
switch(scp->state) {
case DN_CI:
@@ -421,7 +421,7 @@ static void dn_nsp_disc_init(struct sock *sk, struct sk_buff *skb)
scp->state = DN_RJ;
break;
case DN_RUN:
- sk->shutdown |= SHUTDOWN_MASK;
+ sk->sk_shutdown |= SHUTDOWN_MASK;
scp->state = DN_DN;
break;
case DN_DI:
@@ -430,9 +430,9 @@ static void dn_nsp_disc_init(struct sock *sk, struct sk_buff *skb)
}
if (!sock_flag(sk, SOCK_DEAD)) {
- if (sk->socket->state != SS_UNCONNECTED)
- sk->socket->state = SS_DISCONNECTING;
- sk->state_change(sk);
+ if (sk->sk_socket->state != SS_UNCONNECTED)
+ sk->sk_socket->state = SS_DISCONNECTING;
+ sk->sk_state_change(sk);
}
/*
@@ -465,7 +465,7 @@ static void dn_nsp_disc_conf(struct sock *sk, struct sk_buff *skb)
reason = dn_ntohs(*(__u16 *)skb->data);
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
switch(scp->state) {
case DN_CI:
@@ -481,15 +481,15 @@ static void dn_nsp_disc_conf(struct sock *sk, struct sk_buff *skb)
scp->state = DN_DIC;
break;
case DN_RUN:
- sk->shutdown |= SHUTDOWN_MASK;
+ sk->sk_shutdown |= SHUTDOWN_MASK;
case DN_CC:
scp->state = DN_CN;
}
if (!sock_flag(sk, SOCK_DEAD)) {
- if (sk->socket->state != SS_UNCONNECTED)
- sk->socket->state = SS_DISCONNECTING;
- sk->state_change(sk);
+ if (sk->sk_socket->state != SS_UNCONNECTED)
+ sk->sk_socket->state = SS_DISCONNECTING;
+ sk->sk_state_change(sk);
}
scp->persist_fxn = dn_destroy_timer;
@@ -559,7 +559,7 @@ static void dn_nsp_linkservice(struct sock *sk, struct sk_buff *skb)
break;
}
if (wake_up && !sock_flag(sk, SOCK_DEAD))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
}
dn_nsp_send_oth_ack(sk);
@@ -580,7 +580,8 @@ static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig
/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
number of warnings when compiling with -W --ANK
*/
- if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf) {
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf) {
err = -ENOMEM;
goto out;
}
@@ -595,16 +596,16 @@ static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig
/* This code only runs from BH or BH protected context.
* Therefore the plain read_lock is ok here. -DaveM
*/
- read_lock(&sk->callback_lock);
+ read_lock(&sk->sk_callback_lock);
if (!sock_flag(sk, SOCK_DEAD)) {
- struct socket *sock = sk->socket;
- wake_up_interruptible(sk->sleep);
+ struct socket *sock = sk->sk_socket;
+ wake_up_interruptible(sk->sk_sleep);
if (sock && sock->fasync_list &&
!test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
__kill_fasync(sock->fasync_list, sig,
(sig == SIGURG) ? POLL_PRI : POLL_IN);
}
- read_unlock(&sk->callback_lock);
+ read_unlock(&sk->sk_callback_lock);
out:
return err;
}
@@ -651,7 +652,7 @@ static void dn_nsp_data(struct sock *sk, struct sk_buff *skb)
skb_pull(skb, 2);
if (seq_next(scp->numdat_rcv, segnum)) {
- if (dn_queue_skb(sk, skb, SIGIO, &sk->receive_queue) == 0) {
+ if (dn_queue_skb(sk, skb, SIGIO, &sk->sk_receive_queue) == 0) {
seq_add(&scp->numdat_rcv, 1);
queued = 1;
}
@@ -679,9 +680,9 @@ static void dn_returned_conn_init(struct sock *sk, struct sk_buff *skb)
if (scp->state == DN_CI) {
scp->state = DN_NC;
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
if (!sock_flag(sk, SOCK_DEAD))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
}
kfree_skb(skb);
@@ -884,8 +885,8 @@ int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
/* both data and ack frames can kick a CC socket into RUN */
if ((scp->state == DN_CC) && !sock_flag(sk, SOCK_DEAD)) {
scp->state = DN_RUN;
- sk->state = TCP_ESTABLISHED;
- sk->state_change(sk);
+ sk->sk_state = TCP_ESTABLISHED;
+ sk->sk_state_change(sk);
}
if ((cb->nsp_flags & 0x1c) == 0)
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index 40aa05ac122b..172dfd55f252 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -92,20 +92,20 @@ try_again:
}
memset(&fl, 0, sizeof(fl));
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
fl.fld_src = dn_saddr2dn(&scp->addr);
fl.fld_dst = dn_saddr2dn(&scp->peer);
dn_sk_ports_copy(&fl, scp);
fl.proto = DNPROTO_NSP;
- if (dn_route_output_sock(&sk->dst_cache, &fl, sk, 0) == 0) {
+ if (dn_route_output_sock(&sk->sk_dst_cache, &fl, sk, 0) == 0) {
dst = sk_dst_get(sk);
- sk->route_caps = dst->dev->features;
+ sk->sk_route_caps = dst->dev->features;
goto try_again;
}
- sk->err = EHOSTUNREACH;
+ sk->sk_err = EHOSTUNREACH;
if (!sock_flag(sk, SOCK_DEAD))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
}
@@ -155,40 +155,42 @@ struct sk_buff *dn_alloc_send_skb(struct sock *sk, int *size, int noblock, int *
break;
}
- if (sk->shutdown & SEND_SHUTDOWN) {
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
*err = EINVAL;
break;
}
- if (sk->err)
+ if (sk->sk_err)
break;
len = *size + 11;
- space = sk->sndbuf - atomic_read(&sk->wmem_alloc);
+ space = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
if (space < len) {
- if ((sk->socket->type == SOCK_STREAM) && (space >= (16 + 11)))
+ if ((sk->sk_socket->type == SOCK_STREAM) &&
+ (space >= (16 + 11)))
len = space;
}
if (space < len) {
- set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
if (noblock) {
*err = EWOULDBLOCK;
break;
}
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
+ clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
SOCK_SLEEP_PRE(sk)
- if ((sk->sndbuf - atomic_read(&sk->wmem_alloc)) < len)
+ if ((sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc)) <
+ len)
schedule();
SOCK_SLEEP_POST(sk)
continue;
}
- if ((skb = dn_alloc_skb(sk, len, sk->allocation)) == NULL)
+ if ((skb = dn_alloc_skb(sk, len, sk->sk_allocation)) == NULL)
continue;
*size = len - 11;
@@ -546,7 +548,7 @@ void dn_send_conn_ack (struct sock *sk)
struct sk_buff *skb = NULL;
struct nsp_conn_ack_msg *msg;
- if ((skb = dn_alloc_skb(sk, 3, sk->allocation)) == NULL)
+ if ((skb = dn_alloc_skb(sk, 3, sk->sk_allocation)) == NULL)
return;
msg = (struct nsp_conn_ack_msg *)skb_put(skb, 3);
@@ -662,7 +664,7 @@ void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg,
if (reason == 0)
reason = scp->discdata_out.opt_status;
- dn_nsp_do_disc(sk, msgflg, reason, gfp, sk->dst_cache, ddl,
+ dn_nsp_do_disc(sk, msgflg, reason, gfp, sk->sk_dst_cache, ddl,
scp->discdata_out.opt_data, scp->addrrem, scp->addrloc);
}
@@ -714,14 +716,15 @@ static int dn_nsp_retrans_conninit(struct sock *sk)
void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg)
{
struct dn_scp *scp = DN_SK(sk);
- struct sk_buff *skb = NULL;
struct nsp_conn_init_msg *msg;
unsigned char aux;
unsigned char menuver;
struct dn_skb_cb *cb;
unsigned char type = 1;
+ int allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC;
+ struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation);
- if ((skb = dn_alloc_skb(sk, 200, (msgflg == NSP_CI) ? sk->allocation : GFP_ATOMIC)) == NULL)
+ if (!skb)
return;
cb = DN_SKB_CB(skb);
diff --git a/net/decnet/dn_timer.c b/net/decnet/dn_timer.c
index 1fe6226a8b86..0aae8a633907 100644
--- a/net/decnet/dn_timer.c
+++ b/net/decnet/dn_timer.c
@@ -38,16 +38,16 @@ static void dn_slow_timer(unsigned long arg);
void dn_start_slow_timer(struct sock *sk)
{
- sk->timer.expires = jiffies + SLOW_INTERVAL;
- sk->timer.function = dn_slow_timer;
- sk->timer.data = (unsigned long)sk;
+ sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
+ sk->sk_timer.function = dn_slow_timer;
+ sk->sk_timer.data = (unsigned long)sk;
- add_timer(&sk->timer);
+ add_timer(&sk->sk_timer);
}
void dn_stop_slow_timer(struct sock *sk)
{
- del_timer(&sk->timer);
+ del_timer(&sk->sk_timer);
}
static void dn_slow_timer(unsigned long arg)
@@ -59,8 +59,8 @@ static void dn_slow_timer(unsigned long arg)
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
- sk->timer.expires = jiffies + HZ / 10;
- add_timer(&sk->timer);
+ sk->sk_timer.expires = jiffies + HZ / 10;
+ add_timer(&sk->sk_timer);
goto out;
}
@@ -102,9 +102,9 @@ static void dn_slow_timer(unsigned long arg)
scp->keepalive_fxn(sk);
}
- sk->timer.expires = jiffies + SLOW_INTERVAL;
+ sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
- add_timer(&sk->timer);
+ add_timer(&sk->sk_timer);
out:
bh_unlock_sock(sk);
sock_put(sk);
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 8b2f9359c5f7..f86a6259fd12 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -120,7 +120,7 @@ static void dnrmg_receive_user_sk(struct sock *sk, int len)
{
struct sk_buff *skb;
- while((skb = skb_dequeue(&sk->receive_queue)) != NULL) {
+ while((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
dnrmg_receive_user_skb(skb);
kfree_skb(skb);
}
@@ -145,7 +145,7 @@ static int __init init(void)
rv = nf_register_hook(&dnrmg_ops);
if (rv) {
- sock_release(dnrmg->socket);
+ sock_release(dnrmg->sk_socket);
}
return rv;
@@ -154,7 +154,7 @@ static int __init init(void)
static void __exit fini(void)
{
nf_unregister_hook(&dnrmg_ops);
- sock_release(dnrmg->socket);
+ sock_release(dnrmg->sk_socket);
}
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 1e2e6faa9262..d633d6f7be33 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -101,10 +101,10 @@ static void econet_remove_socket(struct sock **list, struct sock *sk)
while ((s = *list) != NULL) {
if (s == sk) {
- *list = s->next;
+ *list = s->sk_next;
break;
}
- list = &s->next;
+ list = &s->sk_next;
}
write_unlock_bh(&econet_lock);
@@ -115,7 +115,7 @@ static void econet_remove_socket(struct sock **list, struct sock *sk)
static void econet_insert_socket(struct sock **list, struct sock *sk)
{
write_lock_bh(&econet_lock);
- sk->next = *list;
+ sk->sk_next = *list;
sock_hold(sk);
write_unlock_bh(&econet_lock);
}
@@ -170,7 +170,7 @@ static int econet_recvmsg(struct kiocb *iocb, struct socket *sock,
err = memcpy_toiovec(msg->msg_iov, skb->data, copied);
if (err)
goto out_free;
- sk->stamp=skb->stamp;
+ sk->sk_stamp = skb->stamp;
if (msg->msg_name)
memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
@@ -364,7 +364,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
skb->protocol = proto;
skb->dev = dev;
- skb->priority = sk->priority;
+ skb->priority = sk->sk_priority;
if (err)
goto out_free;
@@ -500,13 +500,14 @@ static void econet_destroy_timer(unsigned long data)
{
struct sock *sk=(struct sock *)data;
- if (!atomic_read(&sk->wmem_alloc) && !atomic_read(&sk->rmem_alloc)) {
+ if (!atomic_read(&sk->sk_wmem_alloc) &&
+ !atomic_read(&sk->sk_rmem_alloc)) {
sk_free(sk);
return;
}
- sk->timer.expires=jiffies+10*HZ;
- add_timer(&sk->timer);
+ sk->sk_timer.expires = jiffies + 10 * HZ;
+ add_timer(&sk->sk_timer);
printk(KERN_DEBUG "econet socket destroy delayed\n");
}
@@ -527,21 +528,22 @@ static int econet_release(struct socket *sock)
* Now the socket is dead. No more input will appear.
*/
- sk->state_change(sk); /* It is useless. Just for sanity. */
+ sk->sk_state_change(sk); /* It is useless. Just for sanity. */
sock->sk = NULL;
- sk->socket = NULL;
+ sk->sk_socket = NULL;
sock_set_flag(sk, SOCK_DEAD);
/* Purge queues */
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
- if (atomic_read(&sk->rmem_alloc) || atomic_read(&sk->wmem_alloc)) {
- sk->timer.data=(unsigned long)sk;
- sk->timer.expires=jiffies+HZ;
- sk->timer.function=econet_destroy_timer;
- add_timer(&sk->timer);
+ if (atomic_read(&sk->sk_rmem_alloc) ||
+ atomic_read(&sk->sk_wmem_alloc)) {
+ sk->sk_timer.data = (unsigned long)sk;
+ sk->sk_timer.expires = jiffies + HZ;
+ sk->sk_timer.function = econet_destroy_timer;
+ add_timer(&sk->sk_timer);
return 0;
}
@@ -570,7 +572,7 @@ static int econet_create(struct socket *sock, int protocol)
if (sk == NULL)
goto out;
- sk->reuse = 1;
+ sk->sk_reuse = 1;
sock->ops = &econet_ops;
sock_init_data(sock,sk);
@@ -578,8 +580,8 @@ static int econet_create(struct socket *sock, int protocol)
if (!eo)
goto out_free;
memset(eo, 0, sizeof(*eo));
- sk->zapped=0;
- sk->family = PF_ECONET;
+ sk->sk_zapped = 0;
+ sk->sk_family = PF_ECONET;
eo->num = protocol;
econet_insert_socket(&econet_sklist, sk);
@@ -671,9 +673,10 @@ static int econet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg
switch(cmd) {
case SIOCGSTAMP:
- if(sk->stamp.tv_sec==0)
+ if (!sk->sk_stamp.tv_sec)
return -ENOENT;
- return copy_to_user((void *)arg, &sk->stamp, sizeof(struct timeval)) ? -EFAULT : 0;
+ return copy_to_user((void *)arg, &sk->sk_stamp,
+ sizeof(struct timeval)) ? -EFAULT : 0;
case SIOCSIFADDR:
case SIOCGIFADDR:
return ec_dev_ioctl(sock, cmd, (void *)arg);
@@ -733,7 +736,7 @@ static struct sock *ec_listening_socket(unsigned char port, unsigned char
(opt->net == net || opt->net == 0))
return sk;
- sk = sk->next;
+ sk = sk->sk_next;
}
return NULL;
@@ -990,9 +993,9 @@ static int __init aun_udp_initialise(void)
return error;
}
- udpsock->sk->reuse = 1;
- udpsock->sk->allocation = GFP_ATOMIC; /* we're going to call it
- from interrupts */
+ udpsock->sk->sk_reuse = 1;
+ udpsock->sk->sk_allocation = GFP_ATOMIC; /* we're going to call it
+ from interrupts */
error = udpsock->ops->bind(udpsock, (struct sockaddr *)&sin,
sizeof(sin));
@@ -1002,7 +1005,7 @@ static int __init aun_udp_initialise(void)
goto release;
}
- udpsock->sk->data_ready = aun_data_available;
+ udpsock->sk->sk_data_ready = aun_data_available;
return 0;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 343239eb91b2..214523c899d5 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -137,13 +137,12 @@ void inet_sock_destruct(struct sock *sk)
{
struct inet_opt *inet = inet_sk(sk);
- __skb_queue_purge(&sk->receive_queue);
- __skb_queue_purge(&sk->error_queue);
+ __skb_queue_purge(&sk->sk_receive_queue);
+ __skb_queue_purge(&sk->sk_error_queue);
- if (sk->type == SOCK_STREAM && sk->state != TCP_CLOSE) {
+ if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
printk("Attempt to release TCP socket in state %d %p\n",
- sk->state,
- sk);
+ sk->sk_state, sk);
return;
}
if (!sock_flag(sk, SOCK_DEAD)) {
@@ -151,14 +150,14 @@ void inet_sock_destruct(struct sock *sk)
return;
}
- BUG_TRAP(!atomic_read(&sk->rmem_alloc));
- BUG_TRAP(!atomic_read(&sk->wmem_alloc));
- BUG_TRAP(!sk->wmem_queued);
- BUG_TRAP(!sk->forward_alloc);
+ BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
+ BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
+ BUG_TRAP(!sk->sk_wmem_queued);
+ BUG_TRAP(!sk->sk_forward_alloc);
if (inet->opt)
kfree(inet->opt);
- dst_release(sk->dst_cache);
+ dst_release(sk->sk_dst_cache);
#ifdef INET_REFCNT_DEBUG
atomic_dec(&inet_sock_nr);
printk(KERN_DEBUG "INET socket %p released, %d are still alive\n",
@@ -168,8 +167,8 @@ void inet_sock_destruct(struct sock *sk)
void inet_sock_release(struct sock *sk)
{
- if (sk->prot->destroy)
- sk->prot->destroy(sk);
+ if (sk->sk_prot->destroy)
+ sk->sk_prot->destroy(sk);
/* Observation: when inet_sock_release is called, processes have
* no access to socket. But net still has.
@@ -178,7 +177,7 @@ void inet_sock_release(struct sock *sk)
* A. Remove from hash tables.
*/
- sk->prot->unhash(sk);
+ sk->sk_prot->unhash(sk);
/* In this point socket cannot receive new packets,
* but it is possible that some packets are in flight
@@ -198,9 +197,9 @@ void inet_sock_release(struct sock *sk)
xfrm_sk_free_policy(sk);
#ifdef INET_REFCNT_DEBUG
- if (atomic_read(&sk->refcnt) != 1)
+ if (atomic_read(&sk->sk_refcnt) != 1)
printk(KERN_DEBUG "Destruction inet %p delayed, c=%d\n",
- sk, atomic_read(&sk->refcnt));
+ sk, atomic_read(&sk->sk_refcnt));
#endif
sock_put(sk);
}
@@ -220,7 +219,7 @@ int inet_setsockopt(struct socket *sock, int level, int optname,
{
struct sock *sk = sock->sk;
- return sk->prot->setsockopt(sk, level, optname, optval, optlen);
+ return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
}
/*
@@ -236,7 +235,7 @@ int inet_getsockopt(struct socket *sock, int level, int optname,
{
struct sock *sk = sock->sk;
- return sk->prot->getsockopt(sk, level, optname, optval, optlen);
+ return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
}
/*
@@ -250,7 +249,7 @@ static int inet_autobind(struct sock *sk)
lock_sock(sk);
inet = inet_sk(sk);
if (!inet->num) {
- if (sk->prot->get_port(sk, 0)) {
+ if (sk->sk_prot->get_port(sk, 0)) {
release_sock(sk);
return -EAGAIN;
}
@@ -275,7 +274,7 @@ int inet_listen(struct socket *sock, int backlog)
if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
goto out;
- old_state = sk->state;
+ old_state = sk->sk_state;
if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
goto out;
@@ -287,7 +286,7 @@ int inet_listen(struct socket *sock, int backlog)
if (err)
goto out;
}
- sk->max_ack_backlog = backlog;
+ sk->sk_max_ack_backlog = backlog;
err = 0;
out:
@@ -368,10 +367,10 @@ static int inet_create(struct socket *sock, int protocol)
goto out_sk_free;
err = 0;
sock->ops = answer->ops;
- sk->prot = answer->prot;
- sk->no_check = answer->no_check;
+ sk->sk_prot = answer->prot;
+ sk->sk_no_check = answer->no_check;
if (INET_PROTOSW_REUSE & answer->flags)
- sk->reuse = 1;
+ sk->sk_reuse = 1;
rcu_read_unlock();
inet = inet_sk(sk);
@@ -392,11 +391,11 @@ static int inet_create(struct socket *sock, int protocol)
sock_init_data(sock, sk);
sk_set_owner(sk, THIS_MODULE);
- sk->destruct = inet_sock_destruct;
- sk->zapped = 0;
- sk->family = PF_INET;
- sk->protocol = protocol;
- sk->backlog_rcv = sk->prot->backlog_rcv;
+ sk->sk_destruct = inet_sock_destruct;
+ sk->sk_zapped = 0;
+ sk->sk_family = PF_INET;
+ sk->sk_protocol = protocol;
+ sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
inet->uc_ttl = -1;
inet->mc_loop = 1;
@@ -416,11 +415,11 @@ static int inet_create(struct socket *sock, int protocol)
*/
inet->sport = htons(inet->num);
/* Add to protocol hash chains. */
- sk->prot->hash(sk);
+ sk->sk_prot->hash(sk);
}
- if (sk->prot->init) {
- err = sk->prot->init(sk);
+ if (sk->sk_prot->init) {
+ err = sk->sk_prot->init(sk);
if (err)
inet_sock_release(sk);
}
@@ -458,9 +457,9 @@ int inet_release(struct socket *sock)
timeout = 0;
if (sock_flag(sk, SOCK_LINGER) &&
!(current->flags & PF_EXITING))
- timeout = sk->lingertime;
+ timeout = sk->sk_lingertime;
sock->sk = NULL;
- sk->prot->close(sk, timeout);
+ sk->sk_prot->close(sk, timeout);
}
return 0;
}
@@ -478,8 +477,8 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
int err;
/* If the socket has its own bind function then use it. (RAW) */
- if (sk->prot->bind) {
- err = sk->prot->bind(sk, uaddr, addr_len);
+ if (sk->sk_prot->bind) {
+ err = sk->sk_prot->bind(sk, uaddr, addr_len);
goto out;
}
err = -EINVAL;
@@ -520,7 +519,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
/* Check these errors (active socket, double bind). */
err = -EINVAL;
- if (sk->state != TCP_CLOSE || inet->num)
+ if (sk->sk_state != TCP_CLOSE || inet->num)
goto out_release_sock;
inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr;
@@ -528,16 +527,16 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
inet->saddr = 0; /* Use device */
/* Make sure we are allowed to bind here. */
- if (sk->prot->get_port(sk, snum)) {
+ if (sk->sk_prot->get_port(sk, snum)) {
inet->saddr = inet->rcv_saddr = 0;
err = -EADDRINUSE;
goto out_release_sock;
}
if (inet->rcv_saddr)
- sk->userlocks |= SOCK_BINDADDR_LOCK;
+ sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
if (snum)
- sk->userlocks |= SOCK_BINDPORT_LOCK;
+ sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
inet->sport = htons(inet->num);
inet->daddr = 0;
inet->dport = 0;
@@ -555,33 +554,33 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr,
struct sock *sk = sock->sk;
if (uaddr->sa_family == AF_UNSPEC)
- return sk->prot->disconnect(sk, flags);
+ return sk->sk_prot->disconnect(sk, flags);
if (!inet_sk(sk)->num && inet_autobind(sk))
return -EAGAIN;
- return sk->prot->connect(sk, (struct sockaddr *)uaddr, addr_len);
+ return sk->sk_prot->connect(sk, (struct sockaddr *)uaddr, addr_len);
}
static long inet_wait_for_connect(struct sock *sk, long timeo)
{
DEFINE_WAIT(wait);
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
- /* Basic assumption: if someone sets sk->err, he _must_
+ /* Basic assumption: if someone sets sk->sk_err, he _must_
* change state of the socket from TCP_SYN_*.
* Connect() does not allow to get error notifications
* without closing the socket.
*/
- while ((1 << sk->state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+ while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
if (signal_pending(current) || !timeo)
break;
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
}
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
return timeo;
}
@@ -599,7 +598,7 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
lock_sock(sk);
if (uaddr->sa_family == AF_UNSPEC) {
- err = sk->prot->disconnect(sk, flags);
+ err = sk->sk_prot->disconnect(sk, flags);
sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
goto out;
}
@@ -617,10 +616,10 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
break;
case SS_UNCONNECTED:
err = -EISCONN;
- if (sk->state != TCP_CLOSE)
+ if (sk->sk_state != TCP_CLOSE)
goto out;
- err = sk->prot->connect(sk, uaddr, addr_len);
+ err = sk->sk_prot->connect(sk, uaddr, addr_len);
if (err < 0)
goto out;
@@ -636,7 +635,7 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
- if ((1 << sk->state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
/* Error code is set above */
if (!timeo || !inet_wait_for_connect(sk, timeo))
goto out;
@@ -649,10 +648,10 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
/* Connection was closed by RST, timeout, ICMP error
* or another process disconnected us.
*/
- if (sk->state == TCP_CLOSE)
+ if (sk->sk_state == TCP_CLOSE)
goto sock_error;
- /* sk->err may be not zero now, if RECVERR was ordered by user
+ /* sk->sk_err may be not zero now, if RECVERR was ordered by user
* and error was received after socket entered established state.
* Hence, it is handled normally after connect() return successfully.
*/
@@ -666,7 +665,7 @@ out:
sock_error:
err = sock_error(sk) ? : -ECONNABORTED;
sock->state = SS_UNCONNECTED;
- if (sk->prot->disconnect(sk, flags))
+ if (sk->sk_prot->disconnect(sk, flags))
sock->state = SS_DISCONNECTING;
goto out;
}
@@ -679,14 +678,14 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sock *sk1 = sock->sk;
int err = -EINVAL;
- struct sock *sk2 = sk1->prot->accept(sk1, flags, &err);
+ struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err);
if (!sk2)
goto do_err;
lock_sock(sk2);
- BUG_TRAP((1 << sk2->state) &
+ BUG_TRAP((1 << sk2->sk_state) &
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE));
sock_graft(sk2, newsock);
@@ -712,7 +711,7 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr,
sin->sin_family = AF_INET;
if (peer) {
if (!inet->dport ||
- (((1 << sk->state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
+ (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
peer == 1))
return -ENOTCONN;
sin->sin_port = inet->dport;
@@ -737,8 +736,8 @@ int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
int addr_len = 0;
int err;
- err = sk->prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
- flags & ~MSG_DONTWAIT, &addr_len);
+ err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
+ flags & ~MSG_DONTWAIT, &addr_len);
if (err >= 0)
msg->msg_namelen = addr_len;
return err;
@@ -754,7 +753,7 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
if (!inet_sk(sk)->num && inet_autobind(sk))
return -EAGAIN;
- return sk->prot->sendmsg(iocb, sk, msg, size);
+ return sk->sk_prot->sendmsg(iocb, sk, msg, size);
}
@@ -766,8 +765,8 @@ ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, size_t
if (!inet_sk(sk)->num && inet_autobind(sk))
return -EAGAIN;
- if (sk->prot->sendpage)
- return sk->prot->sendpage(sk, page, offset, size, flags);
+ if (sk->sk_prot->sendpage)
+ return sk->sk_prot->sendpage(sk, page, offset, size, flags);
return sock_no_sendpage(sock, page, offset, size, flags);
}
@@ -788,22 +787,22 @@ int inet_shutdown(struct socket *sock, int how)
lock_sock(sk);
if (sock->state == SS_CONNECTING) {
- if ((1 << sk->state) &
+ if ((1 << sk->sk_state) &
(TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
sock->state = SS_DISCONNECTING;
else
sock->state = SS_CONNECTED;
}
- switch (sk->state) {
+ switch (sk->sk_state) {
case TCP_CLOSE:
err = -ENOTCONN;
/* Hack to wake up other listeners, who can poll for
POLLHUP, even on eg. unconnected UDP sockets -- RR */
default:
- sk->shutdown |= how;
- if (sk->prot->shutdown)
- sk->prot->shutdown(sk, how);
+ sk->sk_shutdown |= how;
+ if (sk->sk_prot->shutdown)
+ sk->sk_prot->shutdown(sk, how);
break;
/* Remaining two branches are temporary solution for missing
@@ -815,13 +814,13 @@ int inet_shutdown(struct socket *sock, int how)
break;
/* Fall through */
case TCP_SYN_SENT:
- err = sk->prot->disconnect(sk, O_NONBLOCK);
+ err = sk->sk_prot->disconnect(sk, O_NONBLOCK);
sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
break;
}
/* Wake up anyone sleeping in poll. */
- sk->state_change(sk);
+ sk->sk_state_change(sk);
release_sock(sk);
return err;
}
@@ -843,9 +842,9 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
switch (cmd) {
case SIOCGSTAMP:
- if (!sk->stamp.tv_sec)
+ if (!sk->sk_stamp.tv_sec)
err = -ENOENT;
- else if (copy_to_user((void *)arg, &sk->stamp,
+ else if (copy_to_user((void *)arg, &sk->sk_stamp,
sizeof(struct timeval)))
err = -EFAULT;
break;
@@ -873,8 +872,8 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
err = devinet_ioctl(cmd, (void *)arg);
break;
default:
- if (!sk->prot->ioctl ||
- (err = sk->prot->ioctl(sk, cmd, arg)) ==
+ if (!sk->sk_prot->ioctl ||
+ (err = sk->sk_prot->ioctl(sk, cmd, arg)) ==
-ENOIOCTLCMD)
err = dev_ioctl(cmd, (void *)arg);
break;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 9958e71d47d5..9ca1561ea5f0 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -234,13 +234,13 @@ static __inline__ void icmp_xmit_lock(void)
{
local_bh_disable();
- if (unlikely(!spin_trylock(&icmp_socket->sk->lock.slock)))
+ if (unlikely(!spin_trylock(&icmp_socket->sk->sk_lock.slock)))
BUG();
}
static void icmp_xmit_unlock(void)
{
- spin_unlock_bh(&icmp_socket->sk->lock.slock);
+ spin_unlock_bh(&icmp_socket->sk->sk_lock.slock);
}
/*
@@ -344,12 +344,12 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
icmp_param->head_len,
ipc, rt, MSG_DONTWAIT);
- if ((skb = skb_peek(&icmp_socket->sk->write_queue)) != NULL) {
+ if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
struct icmphdr *icmph = skb->h.icmph;
unsigned int csum = 0;
struct sk_buff *skb1;
- skb_queue_walk(&icmp_socket->sk->write_queue, skb1) {
+ skb_queue_walk(&icmp_socket->sk->sk_write_queue, skb1) {
csum = csum_add(csum, skb1->csum);
}
csum = csum_partial_copy_nocheck((void *)&icmp_param->data,
@@ -685,7 +685,7 @@ static void icmp_unreach(struct sk_buff *skb)
iph->saddr,
skb->dev->ifindex)) != NULL) {
raw_err(raw_sk, skb, info);
- raw_sk = raw_sk->next;
+ raw_sk = raw_sk->sk_next;
iph = (struct iphdr *)skb->data;
}
}
@@ -1101,8 +1101,8 @@ void __init icmp_init(struct net_proto_family *ops)
if (err < 0)
panic("Failed to create the ICMP control socket.\n");
- per_cpu(__icmp_socket, i)->sk->allocation = GFP_ATOMIC;
- per_cpu(__icmp_socket, i)->sk->sndbuf = SK_WMEM_MAX * 2;
+ per_cpu(__icmp_socket, i)->sk->sk_allocation = GFP_ATOMIC;
+ per_cpu(__icmp_socket, i)->sk->sk_sndbuf = SK_WMEM_MAX * 2;
inet = inet_sk(per_cpu(__icmp_socket, i)->sk);
inet->uc_ttl = -1;
inet->pmtudisc = IP_PMTUDISC_DONT;
@@ -1111,6 +1111,6 @@ void __init icmp_init(struct net_proto_family *ops)
* see it, we do not wish this socket to see incoming
* packets.
*/
- per_cpu(__icmp_socket, i)->sk->prot->unhash(per_cpu(__icmp_socket, i)->sk);
+ per_cpu(__icmp_socket, i)->sk->sk_prot->unhash(per_cpu(__icmp_socket, i)->sk);
}
}
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index fe561aa4a767..5c3e2042873e 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -167,9 +167,9 @@ int ip_call_ra_chain(struct sk_buff *skb)
/* If socket is bound to an interface, only report
* the packet if it came from that interface.
*/
- if (sk && inet_sk(sk)->num == protocol
- && ((sk->bound_dev_if == 0)
- || (sk->bound_dev_if == skb->dev->ifindex))) {
+ if (sk && inet_sk(sk)->num == protocol &&
+ (!sk->sk_bound_dev_if ||
+ sk->sk_bound_dev_if == skb->dev->ifindex)) {
if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
skb = ip_defrag(skb);
if (skb == NULL) {
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 37edd627ae8d..765c13b96d64 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -148,7 +148,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
iph->ttl = ip_select_ttl(inet, &rt->u.dst);
iph->daddr = rt->rt_dst;
iph->saddr = rt->rt_src;
- iph->protocol = sk->protocol;
+ iph->protocol = sk->sk_protocol;
iph->tot_len = htons(skb->len);
ip_select_ident(iph, &rt->u.dst, sk);
skb->nh.iph = iph;
@@ -159,7 +159,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
}
ip_send_check(iph);
- skb->priority = sk->priority;
+ skb->priority = sk->sk_priority;
/* Send it out. */
return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
@@ -316,12 +316,12 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
daddr = opt->faddr;
{
- struct flowi fl = { .oif = sk->bound_dev_if,
+ struct flowi fl = { .oif = sk->sk_bound_dev_if,
.nl_u = { .ip4_u =
{ .daddr = daddr,
.saddr = inet->saddr,
.tos = RT_CONN_FLAGS(sk) } },
- .proto = sk->protocol,
+ .proto = sk->sk_protocol,
.uli_u = { .ports =
{ .sport = inet->sport,
.dport = inet->dport } } };
@@ -351,7 +351,7 @@ packet_routed:
else
iph->frag_off = 0;
iph->ttl = ip_select_ttl(inet, &rt->u.dst);
- iph->protocol = sk->protocol;
+ iph->protocol = sk->sk_protocol;
iph->saddr = rt->rt_src;
iph->daddr = rt->rt_dst;
skb->nh.iph = iph;
@@ -363,7 +363,7 @@ packet_routed:
}
mtu = dst_pmtu(&rt->u.dst);
- if (skb->len > mtu && (sk->route_caps&NETIF_F_TSO)) {
+ if (skb->len > mtu && (sk->sk_route_caps & NETIF_F_TSO)) {
unsigned int hlen;
/* Hack zone: all this must be done by TCP. */
@@ -379,7 +379,7 @@ packet_routed:
/* Add an IP checksum. */
ip_send_check(iph);
- skb->priority = sk->priority;
+ skb->priority = sk->sk_priority;
return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
dst_output);
@@ -739,14 +739,14 @@ int ip_append_data(struct sock *sk,
if (flags&MSG_PROBE)
return 0;
- if (skb_queue_empty(&sk->write_queue)) {
+ if (skb_queue_empty(&sk->sk_write_queue)) {
/*
* setup for corking.
*/
opt = ipc->opt;
if (opt) {
if (inet->cork.opt == NULL)
- inet->cork.opt = kmalloc(sizeof(struct ip_options)+40, sk->allocation);
+ inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
inet->cork.flags |= IPCORK_OPT;
inet->cork.addr = ipc->addr;
@@ -805,7 +805,7 @@ int ip_append_data(struct sock *sk,
* it is not necessary. Not a big bug, but needs a fix.
*/
- if ((skb = skb_peek_tail(&sk->write_queue)) == NULL)
+ if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
goto alloc_new_skb;
while (length > 0) {
@@ -842,10 +842,11 @@ alloc_new_skb:
(flags & MSG_DONTWAIT), &err);
} else {
skb = NULL;
- if (atomic_read(&sk->wmem_alloc) <= 2*sk->sndbuf)
+ if (atomic_read(&sk->sk_wmem_alloc) <=
+ 2 * sk->sk_sndbuf)
skb = sock_wmalloc(sk,
alloclen + hh_len + 15, 1,
- sk->allocation);
+ sk->sk_allocation);
if (unlikely(skb == NULL))
err = -ENOBUFS;
}
@@ -883,7 +884,7 @@ alloc_new_skb:
/*
* Put the packet on the pending queue.
*/
- __skb_queue_tail(&sk->write_queue, skb);
+ __skb_queue_tail(&sk->sk_write_queue, skb);
continue;
}
@@ -922,7 +923,7 @@ alloc_new_skb:
} else if (i < MAX_SKB_FRAGS) {
if (copy > PAGE_SIZE)
copy = PAGE_SIZE;
- page = alloc_pages(sk->allocation, 0);
+ page = alloc_pages(sk->sk_allocation, 0);
if (page == NULL) {
err = -ENOMEM;
goto error;
@@ -933,7 +934,7 @@ alloc_new_skb:
skb_fill_page_desc(skb, i, page, 0, 0);
frag = &skb_shinfo(skb)->frags[i];
skb->truesize += PAGE_SIZE;
- atomic_add(PAGE_SIZE, &sk->wmem_alloc);
+ atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
} else {
err = -EMSGSIZE;
goto error;
@@ -978,7 +979,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
if (flags&MSG_PROBE)
return 0;
- if (skb_queue_empty(&sk->write_queue))
+ if (skb_queue_empty(&sk->sk_write_queue))
return -EINVAL;
rt = inet->cork.rt;
@@ -999,7 +1000,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
return -EMSGSIZE;
}
- if ((skb = skb_peek_tail(&sk->write_queue)) == NULL)
+ if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
return -EINVAL;
inet->cork.length += size;
@@ -1012,7 +1013,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
BUG_TRAP(len == 0);
skb = sock_wmalloc(sk, fragheaderlen + hh_len + 15, 1,
- sk->allocation);
+ sk->sk_allocation);
if (unlikely(!skb)) {
err = -ENOBUFS;
goto error;
@@ -1036,7 +1037,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
/*
* Put the packet on the pending queue.
*/
- __skb_queue_tail(&sk->write_queue, skb);
+ __skb_queue_tail(&sk->sk_write_queue, skb);
continue;
}
@@ -1088,14 +1089,14 @@ int ip_push_pending_frames(struct sock *sk)
__u8 ttl;
int err = 0;
- if ((skb = __skb_dequeue(&sk->write_queue)) == NULL)
+ if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
goto out;
tail_skb = &(skb_shinfo(skb)->frag_list);
/* move skb->data to ip header from ext header */
if (skb->data < skb->nh.raw)
__skb_pull(skb, skb->nh.raw - skb->data);
- while ((tmp_skb = __skb_dequeue(&sk->write_queue)) != NULL) {
+ while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
__skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
*tail_skb = tmp_skb;
tail_skb = &(tmp_skb->next);
@@ -1147,12 +1148,12 @@ int ip_push_pending_frames(struct sock *sk)
iph->id = htons(inet->id++);
}
iph->ttl = ttl;
- iph->protocol = sk->protocol;
+ iph->protocol = sk->sk_protocol;
iph->saddr = rt->rt_src;
iph->daddr = rt->rt_dst;
ip_send_check(iph);
- skb->priority = sk->priority;
+ skb->priority = sk->sk_priority;
skb->dst = dst_clone(&rt->u.dst);
/* Netfilter gets whole the not fragmented skb. */
@@ -1186,7 +1187,7 @@ void ip_flush_pending_frames(struct sock *sk)
struct inet_opt *inet = inet_sk(sk);
struct sk_buff *skb;
- while ((skb = __skb_dequeue_tail(&sk->write_queue)) != NULL)
+ while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
kfree_skb(skb);
inet->cork.flags &= ~IPCORK_OPT;
@@ -1257,7 +1258,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
.uli_u = { .ports =
{ .sport = skb->h.th->dest,
.dport = skb->h.th->source } },
- .proto = sk->protocol };
+ .proto = sk->sk_protocol };
if (ip_route_output_key(&rt, &fl))
return;
}
@@ -1270,11 +1271,11 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
*/
bh_lock_sock(sk);
inet->tos = skb->nh.iph->tos;
- sk->priority = skb->priority;
- sk->protocol = skb->nh.iph->protocol;
+ sk->sk_priority = skb->priority;
+ sk->sk_protocol = skb->nh.iph->protocol;
ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
&ipc, rt, MSG_DONTWAIT);
- if ((skb = skb_peek(&sk->write_queue)) != NULL) {
+ if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
if (arg->csumoffset >= 0)
*((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
skb->ip_summed = CHECKSUM_NONE;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 84400ae50e49..78b600d5d125 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -194,7 +194,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct s
{
struct ip_ra_chain *ra, *new_ra, **rap;
- if (sk->type != SOCK_RAW || inet_sk(sk)->num == IPPROTO_RAW)
+ if (sk->sk_type != SOCK_RAW || inet_sk(sk)->num == IPPROTO_RAW)
return -EINVAL;
new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
@@ -315,7 +315,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
int copied;
err = -EAGAIN;
- skb = skb_dequeue(&sk->error_queue);
+ skb = skb_dequeue(&sk->sk_error_queue);
if (skb == NULL)
goto out;
@@ -362,15 +362,14 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
err = copied;
/* Reset and regenerate socket error */
- spin_lock_irq(&sk->error_queue.lock);
- sk->err = 0;
- if ((skb2 = skb_peek(&sk->error_queue)) != NULL) {
- sk->err = SKB_EXT_ERR(skb2)->ee.ee_errno;
- spin_unlock_irq(&sk->error_queue.lock);
- sk->error_report(sk);
- } else {
- spin_unlock_irq(&sk->error_queue.lock);
- }
+ spin_lock_irq(&sk->sk_error_queue.lock);
+ sk->sk_err = 0;
+ if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
+ sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
+ spin_unlock_irq(&sk->sk_error_queue.lock);
+ sk->sk_error_report(sk);
+ } else
+ spin_unlock_irq(&sk->sk_error_queue.lock);
out_free_skb:
kfree_skb(skb);
@@ -431,12 +430,13 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int opt
err = ip_options_get(&opt, optval, optlen, 1);
if (err)
break;
- if (sk->type == SOCK_STREAM) {
+ if (sk->sk_type == SOCK_STREAM) {
struct tcp_opt *tp = tcp_sk(sk);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- if (sk->family == PF_INET ||
- (!((1<<sk->state)&(TCPF_LISTEN|TCPF_CLOSE))
- && inet->daddr != LOOPBACK4_IPV6)) {
+ if (sk->sk_family == PF_INET ||
+ (!((1 << sk->sk_state) &
+ (TCPF_LISTEN | TCPF_CLOSE)) &&
+ inet->daddr != LOOPBACK4_IPV6)) {
#endif
if (inet->opt)
tp->ext_header_len -= inet->opt->optlen;
@@ -483,7 +483,7 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int opt
inet->cmsg_flags &= ~IP_CMSG_RETOPTS;
break;
case IP_TOS: /* This sets both TOS and Precedence */
- if (sk->type == SOCK_STREAM) {
+ if (sk->sk_type == SOCK_STREAM) {
val &= ~3;
val |= inet->tos & 3;
}
@@ -494,7 +494,7 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int opt
}
if (inet->tos != val) {
inet->tos = val;
- sk->priority = rt_tos2priority(val);
+ sk->sk_priority = rt_tos2priority(val);
sk_dst_reset(sk);
}
break;
@@ -506,7 +506,7 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int opt
inet->uc_ttl = val;
break;
case IP_HDRINCL:
- if(sk->type!=SOCK_RAW) {
+ if (sk->sk_type != SOCK_RAW) {
err = -ENOPROTOOPT;
break;
}
@@ -520,10 +520,10 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int opt
case IP_RECVERR:
inet->recverr = !!val;
if (!val)
- skb_queue_purge(&sk->error_queue);
+ skb_queue_purge(&sk->sk_error_queue);
break;
case IP_MULTICAST_TTL:
- if (sk->type == SOCK_STREAM)
+ if (sk->sk_type == SOCK_STREAM)
goto e_inval;
if (optlen<1)
goto e_inval;
@@ -543,7 +543,7 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int opt
struct ip_mreqn mreq;
struct net_device *dev = NULL;
- if (sk->type == SOCK_STREAM)
+ if (sk->sk_type == SOCK_STREAM)
goto e_inval;
/*
* Check the arguments are allowable
@@ -581,7 +581,8 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int opt
break;
err = -EINVAL;
- if (sk->bound_dev_if && mreq.imr_ifindex != sk->bound_dev_if)
+ if (sk->sk_bound_dev_if &&
+ mreq.imr_ifindex != sk->sk_bound_dev_if)
break;
inet->mc_index = mreq.imr_ifindex;
@@ -998,7 +999,7 @@ int ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *op
release_sock(sk);
- if (sk->type != SOCK_STREAM)
+ if (sk->sk_type != SOCK_STREAM)
return -ENOPROTOOPT;
msg.msg_control = optval;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index b6c5c1c872c4..f89cff8b3c25 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -860,7 +860,8 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char *optval,int optlen)
switch(optname)
{
case MRT_INIT:
- if(sk->type!=SOCK_RAW || inet_sk(sk)->num!=IPPROTO_IGMP)
+ if (sk->sk_type != SOCK_RAW ||
+ inet_sk(sk)->num != IPPROTO_IGMP)
return -EOPNOTSUPP;
if(optlen!=sizeof(int))
return -ENOPROTOOPT;
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index 37ced15b419d..8c8ceb7d7b2a 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -1288,7 +1288,7 @@ getorigdst(struct sock *sk, int optval, void *user, int *len)
IPPROTO_TCP } };
/* We only do TCP at the moment: is there a better way? */
- if (strcmp(sk->prot->name, "TCP") != 0) {
+ if (strcmp(sk->sk_prot->name, "TCP")) {
DEBUGP("SO_ORIGINAL_DST: Not a TCP socket\n");
return -ENOPROTOOPT;
}
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index ca12c5b7c2be..7b3d403d8aae 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -535,14 +535,14 @@ ipq_rcv_sk(struct sock *sk, int len)
if (down_trylock(&ipqnl_sem))
return;
- while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
ipq_rcv_skb(skb);
kfree_skb(skb);
}
up(&ipqnl_sem);
- } while (ipqnl && ipqnl->receive_queue.qlen);
+ } while (ipqnl && ipqnl->sk_receive_queue.qlen);
}
static int
@@ -691,7 +691,7 @@ cleanup_sysctl:
proc_net_remove(IPQ_PROC_FS_NAME);
cleanup_ipqnl:
- sock_release(ipqnl->socket);
+ sock_release(ipqnl->sk_socket);
down(&ipqnl_sem);
up(&ipqnl_sem);
diff --git a/net/ipv4/netfilter/ipchains_core.c b/net/ipv4/netfilter/ipchains_core.c
index 77b02d79f1fa..34c6771761e0 100644
--- a/net/ipv4/netfilter/ipchains_core.c
+++ b/net/ipv4/netfilter/ipchains_core.c
@@ -1836,7 +1836,7 @@ int ipfw_init_or_cleanup(int init)
cleanup_netlink:
#if defined(CONFIG_NETLINK_DEV) || defined(CONFIG_NETLINK_DEV_MODULE)
- sock_release(ipfwsk->socket);
+ sock_release(ipfwsk->sk_socket);
cleanup_nothing:
#endif
diff --git a/net/ipv4/netfilter/ipfwadm_core.c b/net/ipv4/netfilter/ipfwadm_core.c
index 1f6cb59f826e..9497eb2898a2 100644
--- a/net/ipv4/netfilter/ipfwadm_core.c
+++ b/net/ipv4/netfilter/ipfwadm_core.c
@@ -1435,7 +1435,7 @@ int ipfw_init_or_cleanup(int init)
cleanup:
#ifdef CONFIG_IP_FIREWALL_NETLINK
- sock_release(ipfwsk->socket);
+ sock_release(ipfwsk->sk_socket);
#endif
unregister_netdevice_notifier(&ipfw_dev_notifier);
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 51023be976ab..30b12db07db3 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -336,7 +336,7 @@ static int __init init(void)
return -ENOMEM;
if (ipt_register_target(&ipt_ulog_reg) != 0) {
- sock_release(nflognl->socket);
+ sock_release(nflognl->sk_socket);
return -EINVAL;
}
@@ -351,7 +351,7 @@ static void __exit fini(void)
DEBUGP("ipt_ULOG: cleanup_module\n");
ipt_unregister_target(&ipt_ulog_reg);
- sock_release(nflognl->socket);
+ sock_release(nflognl->sk_socket);
/* remove pending timers and free allocated skb's */
for (i = 0; i < ULOG_MAXNLGROUPS; i++) {
diff --git a/net/ipv4/netfilter/ipt_owner.c b/net/ipv4/netfilter/ipt_owner.c
index 5cd5efa04d04..73b67609f390 100644
--- a/net/ipv4/netfilter/ipt_owner.c
+++ b/net/ipv4/netfilter/ipt_owner.c
@@ -28,7 +28,8 @@ match_comm(const struct sk_buff *skb, const char *comm)
if(files) {
spin_lock(&files->file_lock);
for (i=0; i < files->max_fds; i++) {
- if (fcheck_files(files, i) == skb->sk->socket->file) {
+ if (fcheck_files(files, i) ==
+ skb->sk->sk_socket->file) {
spin_unlock(&files->file_lock);
task_unlock(p);
read_unlock(&tasklist_lock);
@@ -59,7 +60,8 @@ match_pid(const struct sk_buff *skb, pid_t pid)
if(files) {
spin_lock(&files->file_lock);
for (i=0; i < files->max_fds; i++) {
- if (fcheck_files(files, i) == skb->sk->socket->file) {
+ if (fcheck_files(files, i) ==
+ skb->sk->sk_socket->file) {
spin_unlock(&files->file_lock);
task_unlock(p);
read_unlock(&tasklist_lock);
@@ -78,7 +80,7 @@ static int
match_sid(const struct sk_buff *skb, pid_t sid)
{
struct task_struct *g, *p;
- struct file *file = skb->sk->socket->file;
+ struct file *file = skb->sk->sk_socket->file;
int i, found=0;
read_lock(&tasklist_lock);
@@ -119,17 +121,17 @@ match(const struct sk_buff *skb,
{
const struct ipt_owner_info *info = matchinfo;
- if (!skb->sk || !skb->sk->socket || !skb->sk->socket->file)
+ if (!skb->sk || !skb->sk->sk_socket || !skb->sk->sk_socket->file)
return 0;
if(info->match & IPT_OWNER_UID) {
- if((skb->sk->socket->file->f_uid != info->uid) ^
+ if ((skb->sk->sk_socket->file->f_uid != info->uid) ^
!!(info->invert & IPT_OWNER_UID))
return 0;
}
if(info->match & IPT_OWNER_GID) {
- if((skb->sk->socket->file->f_gid != info->gid) ^
+ if ((skb->sk->sk_socket->file->f_gid != info->gid) ^
!!(info->invert & IPT_OWNER_GID))
return 0;
}
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 6c1ebbaf52cc..cec64c76d99c 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -89,11 +89,11 @@ static void raw_v4_hash(struct sock *sk)
(RAWV4_HTABLE_SIZE - 1)];
write_lock_bh(&raw_v4_lock);
- if ((sk->next = *skp) != NULL)
- (*skp)->pprev = &sk->next;
+ if ((sk->sk_next = *skp) != NULL)
+ (*skp)->sk_pprev = &sk->sk_next;
*skp = sk;
- sk->pprev = skp;
- sock_prot_inc_use(sk->prot);
+ sk->sk_pprev = skp;
+ sock_prot_inc_use(sk->sk_prot);
sock_hold(sk);
write_unlock_bh(&raw_v4_lock);
}
@@ -101,12 +101,12 @@ static void raw_v4_hash(struct sock *sk)
static void raw_v4_unhash(struct sock *sk)
{
write_lock_bh(&raw_v4_lock);
- if (sk->pprev) {
- if (sk->next)
- sk->next->pprev = sk->pprev;
- *sk->pprev = sk->next;
- sk->pprev = NULL;
- sock_prot_dec_use(sk->prot);
+ if (sk->sk_pprev) {
+ if (sk->sk_next)
+ sk->sk_next->sk_pprev = sk->sk_pprev;
+ *sk->sk_pprev = sk->sk_next;
+ sk->sk_pprev = NULL;
+ sock_prot_dec_use(sk->sk_prot);
__sock_put(sk);
}
write_unlock_bh(&raw_v4_lock);
@@ -118,13 +118,13 @@ struct sock *__raw_v4_lookup(struct sock *sk, unsigned short num,
{
struct sock *s = sk;
- for (s = sk; s; s = s->next) {
+ for (; s; s = s->sk_next) {
struct inet_opt *inet = inet_sk(s);
if (inet->num == num &&
!(inet->daddr && inet->daddr != raddr) &&
!(inet->rcv_saddr && inet->rcv_saddr != laddr) &&
- !(s->bound_dev_if && s->bound_dev_if != dif))
+ !(s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
break; /* gotcha */
}
return s;
@@ -174,7 +174,7 @@ void raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash)
if (clone)
raw_rcv(sk, clone);
}
- sk = __raw_v4_lookup(sk->next, iph->protocol,
+ sk = __raw_v4_lookup(sk->sk_next, iph->protocol,
iph->saddr, iph->daddr,
skb->dev->ifindex);
}
@@ -195,7 +195,7 @@ void raw_err (struct sock *sk, struct sk_buff *skb, u32 info)
2. Socket is connected (otherwise the error indication
is useless without ip_recverr and error is hard.
*/
- if (!inet->recverr && sk->state != TCP_ESTABLISHED)
+ if (!inet->recverr && sk->sk_state != TCP_ESTABLISHED)
return;
switch (type) {
@@ -231,8 +231,8 @@ void raw_err (struct sock *sk, struct sk_buff *skb, u32 info)
}
if (inet->recverr || harderr) {
- sk->err = err;
- sk->error_report(sk);
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
}
}
@@ -288,7 +288,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, int length,
goto error;
skb_reserve(skb, hh_len);
- skb->priority = sk->priority;
+ skb->priority = sk->sk_priority;
skb->dst = dst_clone(&rt->u.dst);
skb->nh.iph = iph = (struct iphdr *)skb_put(skb, length);
@@ -390,14 +390,14 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
*/
} else {
err = -EINVAL;
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
goto out;
daddr = inet->daddr;
}
ipc.addr = inet->saddr;
ipc.opt = NULL;
- ipc.oif = sk->bound_dev_if;
+ ipc.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
err = ip_cmsg_send(msg, &ipc);
@@ -426,7 +426,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
daddr = ipc.opt->faddr;
}
}
- tos = RT_TOS(inet->tos) | sk->localroute;
+ tos = RT_TOS(inet->tos) | sk->sk_localroute;
if (msg->msg_flags & MSG_DONTROUTE)
tos |= RTO_ONLINK;
@@ -443,7 +443,9 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
{ .daddr = daddr,
.saddr = saddr,
.tos = tos } },
- .proto = inet->hdrincl ? IPPROTO_RAW : sk->protocol };
+ .proto = inet->hdrincl ? IPPROTO_RAW :
+ sk->sk_protocol,
+ };
err = ip_route_output_flow(&rt, &fl, sk, !(msg->msg_flags&MSG_DONTWAIT));
}
if (err)
@@ -506,7 +508,7 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
int ret = -EINVAL;
int chk_addr_ret;
- if (sk->state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
+ if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
goto out;
chk_addr_ret = inet_addr_type(addr->sin_addr.s_addr);
ret = -EADDRNOTAVAIL;
@@ -645,18 +647,18 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ: {
- int amount = atomic_read(&sk->wmem_alloc);
+ int amount = atomic_read(&sk->sk_wmem_alloc);
return put_user(amount, (int *)arg);
}
case SIOCINQ: {
struct sk_buff *skb;
int amount = 0;
- spin_lock_irq(&sk->receive_queue.lock);
- skb = skb_peek(&sk->receive_queue);
+ spin_lock_irq(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL)
amount = skb->len;
- spin_unlock_irq(&sk->receive_queue.lock);
+ spin_unlock_irq(&sk->sk_receive_queue.lock);
return put_user(amount, (int *)arg);
}
@@ -700,8 +702,8 @@ static struct sock *raw_get_first(struct seq_file *seq)
for (state->bucket = 0; state->bucket < RAWV4_HTABLE_SIZE; ++state->bucket) {
sk = raw_v4_htable[state->bucket];
- while (sk && sk->family != PF_INET)
- sk = sk->next;
+ while (sk && sk->sk_family != PF_INET)
+ sk = sk->sk_next;
if (sk)
break;
}
@@ -713,10 +715,10 @@ static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk)
struct raw_iter_state* state = raw_seq_private(seq);
do {
- sk = sk->next;
+ sk = sk->sk_next;
try_again:
;
- } while (sk && sk->family != PF_INET);
+ } while (sk && sk->sk_family != PF_INET);
if (!sk && ++state->bucket < RAWV4_HTABLE_SIZE) {
sk = raw_v4_htable[state->bucket];
@@ -768,10 +770,11 @@ static __inline__ char *get_raw_sock(struct sock *sp, char *tmpbuf, int i)
sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
- i, src, srcp, dest, destp, sp->state,
- atomic_read(&sp->wmem_alloc), atomic_read(&sp->rmem_alloc),
+ i, src, srcp, dest, destp, sp->sk_state,
+ atomic_read(&sp->sk_wmem_alloc),
+ atomic_read(&sp->sk_rmem_alloc),
0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
- atomic_read(&sp->refcnt), sp);
+ atomic_read(&sp->sk_refcnt), sp);
return tmpbuf;
}
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 20b54ef6281d..2e6ab52bff3b 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -103,7 +103,7 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
child = tp->af_specific->syn_recv_sock(sk, skb, req, dst);
if (child) {
- sk_set_owner(child, sk->owner);
+ sk_set_owner(child, sk->sk_owner);
tcp_acceptq_queue(sk, req, child);
} else
tcp_openreq_free(req);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c09776935947..21489fafd41e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -294,7 +294,7 @@ int tcp_mem_schedule(struct sock *sk, int size, int kind)
{
int amt = TCP_PAGES(size);
- sk->forward_alloc += amt * TCP_MEM_QUANTUM;
+ sk->sk_forward_alloc += amt * TCP_MEM_QUANTUM;
atomic_add(amt, &tcp_memory_allocated);
/* Under limit. */
@@ -315,18 +315,16 @@ int tcp_mem_schedule(struct sock *sk, int size, int kind)
tcp_enter_memory_pressure();
if (kind) {
- if (atomic_read(&sk->rmem_alloc) < sysctl_tcp_rmem[0])
+ if (atomic_read(&sk->sk_rmem_alloc) < sysctl_tcp_rmem[0])
return 1;
- } else {
- if (sk->wmem_queued < sysctl_tcp_wmem[0])
- return 1;
- }
+ } else if (sk->sk_wmem_queued < sysctl_tcp_wmem[0])
+ return 1;
if (!tcp_memory_pressure ||
sysctl_tcp_mem[2] > atomic_read(&tcp_sockets_allocated) *
- TCP_PAGES(sk->wmem_queued +
- atomic_read(&sk->rmem_alloc) +
- sk->forward_alloc))
+ TCP_PAGES(sk->sk_wmem_queued +
+ atomic_read(&sk->sk_rmem_alloc) +
+ sk->sk_forward_alloc))
return 1;
suppress_allocation:
@@ -337,22 +335,22 @@ suppress_allocation:
/* Fail only if socket is _under_ its sndbuf.
* In this case we cannot block, so that we have to fail.
*/
- if (sk->wmem_queued + size >= sk->sndbuf)
+ if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
return 1;
}
/* Alas. Undo changes. */
- sk->forward_alloc -= amt * TCP_MEM_QUANTUM;
+ sk->sk_forward_alloc -= amt * TCP_MEM_QUANTUM;
atomic_sub(amt, &tcp_memory_allocated);
return 0;
}
void __tcp_mem_reclaim(struct sock *sk)
{
- if (sk->forward_alloc >= TCP_MEM_QUANTUM) {
- atomic_sub(sk->forward_alloc / TCP_MEM_QUANTUM,
+ if (sk->sk_forward_alloc >= TCP_MEM_QUANTUM) {
+ atomic_sub(sk->sk_forward_alloc / TCP_MEM_QUANTUM,
&tcp_memory_allocated);
- sk->forward_alloc &= TCP_MEM_QUANTUM - 1;
+ sk->sk_forward_alloc &= TCP_MEM_QUANTUM - 1;
if (tcp_memory_pressure &&
atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0])
tcp_memory_pressure = 0;
@@ -363,8 +361,8 @@ void tcp_rfree(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
- atomic_sub(skb->truesize, &sk->rmem_alloc);
- sk->forward_alloc += skb->truesize;
+ atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
+ sk->sk_forward_alloc += skb->truesize;
}
/*
@@ -389,8 +387,8 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
struct sock *sk = sock->sk;
struct tcp_opt *tp = tcp_sk(sk);
- poll_wait(file, sk->sleep, wait);
- if (sk->state == TCP_LISTEN)
+ poll_wait(file, sk->sk_sleep, wait);
+ if (sk->sk_state == TCP_LISTEN)
return tcp_listen_poll(sk, wait);
/* Socket is not locked. We are protected from async events
@@ -399,7 +397,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
*/
mask = 0;
- if (sk->err)
+ if (sk->sk_err)
mask = POLLERR;
/*
@@ -429,15 +427,15 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
* NOTE. Check for TCP_CLOSE is added. The goal is to prevent
* blocking on fresh not-connected or disconnected socket. --ANK
*/
- if (sk->shutdown == SHUTDOWN_MASK || sk->state == TCP_CLOSE)
+ if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
mask |= POLLHUP;
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLIN | POLLRDNORM;
/* Connected? */
- if ((1 << sk->state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+ if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
/* Potential race condition. If read of tp below will
- * escape above sk->state, we can be illegally awaken
+ * escape above sk->sk_state, we can be illegally awaken
* in SYN_* states. */
if ((tp->rcv_nxt != tp->copied_seq) &&
(tp->urg_seq != tp->copied_seq ||
@@ -445,12 +443,13 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
mask |= POLLIN | POLLRDNORM;
- if (!(sk->shutdown & SEND_SHUTDOWN)) {
+ if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
if (tcp_wspace(sk) >= tcp_min_write_space(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else { /* send SIGIO later */
- set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
- set_bit(SOCK_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_NOSPACE,
+ &sk->sk_socket->flags);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
/* Race breaker. If space is freed after
* wspace test but before the flags are set,
@@ -472,15 +471,15 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
*/
void tcp_write_space(struct sock *sk)
{
- struct socket *sock = sk->socket;
+ struct socket *sock = sk->sk_socket;
if (tcp_wspace(sk) >= tcp_min_write_space(sk) && sock) {
clear_bit(SOCK_NOSPACE, &sock->flags);
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
- if (sock->fasync_list && !(sk->shutdown & SEND_SHUTDOWN))
+ if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
sock_wake_async(sock, 2, POLL_OUT);
}
}
@@ -492,11 +491,11 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
switch (cmd) {
case SIOCINQ:
- if (sk->state == TCP_LISTEN)
+ if (sk->sk_state == TCP_LISTEN)
return -EINVAL;
lock_sock(sk);
- if ((1 << sk->state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
+ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
answ = 0;
else if (sock_flag(sk, SOCK_URGINLINE) ||
!tp->urg_data ||
@@ -505,9 +504,9 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
answ = tp->rcv_nxt - tp->copied_seq;
/* Subtract 1, if FIN is in queue. */
- if (answ && !skb_queue_empty(&sk->receive_queue))
+ if (answ && !skb_queue_empty(&sk->sk_receive_queue))
answ -=
- ((struct sk_buff*)sk->receive_queue.prev)->h.th->fin;
+ ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
} else
answ = tp->urg_seq - tp->copied_seq;
release_sock(sk);
@@ -516,10 +515,10 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
break;
case SIOCOUTQ:
- if (sk->state == TCP_LISTEN)
+ if (sk->sk_state == TCP_LISTEN)
return -EINVAL;
- if ((1 << sk->state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
+ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
answ = 0;
else
answ = tp->write_seq - tp->snd_una;
@@ -538,8 +537,8 @@ int tcp_listen_start(struct sock *sk)
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_listen_opt *lopt;
- sk->max_ack_backlog = 0;
- sk->ack_backlog = 0;
+ sk->sk_max_ack_backlog = 0;
+ sk->sk_ack_backlog = 0;
tp->accept_queue = tp->accept_queue_tail = NULL;
tp->syn_wait_lock = RW_LOCK_UNLOCKED;
tcp_delack_init(tp);
@@ -563,17 +562,17 @@ int tcp_listen_start(struct sock *sk)
* It is OK, because this socket enters to hash table only
* after validation is complete.
*/
- sk->state = TCP_LISTEN;
- if (!sk->prot->get_port(sk, inet->num)) {
+ sk->sk_state = TCP_LISTEN;
+ if (!sk->sk_prot->get_port(sk, inet->num)) {
inet->sport = htons(inet->num);
sk_dst_reset(sk);
- sk->prot->hash(sk);
+ sk->sk_prot->hash(sk);
return 0;
}
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
write_lock_bh(&tp->syn_wait_lock);
tp->listen_opt = NULL;
write_unlock_bh(&tp->syn_wait_lock);
@@ -649,7 +648,7 @@ static void tcp_listen_stop (struct sock *sk)
tcp_acceptq_removed(sk);
tcp_openreq_fastfree(req);
}
- BUG_TRAP(!sk->ack_backlog);
+ BUG_TRAP(!sk->sk_ack_backlog);
}
/*
@@ -663,24 +662,24 @@ static int wait_for_tcp_connect(struct sock *sk, int flags, long *timeo_p)
struct task_struct *tsk = current;
DEFINE_WAIT(wait);
- while ((1 << sk->state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
- if (sk->err)
+ while ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
+ if (sk->sk_err)
return sock_error(sk);
- if ((1 << sk->state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV))
+ if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV))
return -EPIPE;
if (!*timeo_p)
return -EAGAIN;
if (signal_pending(tsk))
return sock_intr_errno(*timeo_p);
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
tp->write_pending++;
release_sock(sk);
*timeo_p = schedule_timeout(*timeo_p);
lock_sock(sk);
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
tp->write_pending--;
}
return 0;
@@ -688,7 +687,7 @@ static int wait_for_tcp_connect(struct sock *sk, int flags, long *timeo_p)
static inline int tcp_memory_free(struct sock *sk)
{
- return sk->wmem_queued < sk->sndbuf;
+ return sk->sk_wmem_queued < sk->sk_sndbuf;
}
/*
@@ -706,21 +705,21 @@ static int wait_for_tcp_memory(struct sock *sk, long *timeo)
current_timeo = vm_wait = (net_random() % (HZ / 5)) + 2;
for (;;) {
- set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
- if (sk->err || (sk->shutdown & SEND_SHUTDOWN))
+ if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto do_error;
if (!*timeo)
goto do_nonblock;
if (signal_pending(current))
goto do_interrupted;
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+ clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
if (tcp_memory_free(sk) && !vm_wait)
break;
- set_bit(SOCK_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
tp->write_pending++;
release_sock(sk);
if (!tcp_memory_free(sk) || vm_wait)
@@ -739,7 +738,7 @@ static int wait_for_tcp_memory(struct sock *sk, long *timeo)
*timeo = current_timeo;
}
out:
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
return err;
do_error:
@@ -796,7 +795,7 @@ static inline void skb_entail(struct sock *sk, struct tcp_opt *tp,
TCP_SKB_CB(skb)->end_seq = tp->write_seq;
TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
TCP_SKB_CB(skb)->sacked = 0;
- __skb_queue_tail(&sk->write_queue, skb);
+ __skb_queue_tail(&sk->sk_write_queue, skb);
tcp_charge_skb(sk, skb);
if (!tp->send_head)
tp->send_head = skb;
@@ -816,7 +815,7 @@ static inline void tcp_push(struct sock *sk, struct tcp_opt *tp, int flags,
int mss_now, int nonagle)
{
if (tp->send_head) {
- struct sk_buff *skb = sk->write_queue.prev;
+ struct sk_buff *skb = sk->sk_write_queue.prev;
if (!(flags & MSG_MORE) || forced_push(tp))
tcp_mark_push(tp, skb);
tcp_mark_urg(tp, flags, skb);
@@ -844,21 +843,21 @@ ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
/* Wait for a connection to finish. */
- if ((1 << sk->state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
+ if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
if ((err = wait_for_tcp_connect(sk, 0, &timeo)) != 0)
goto out_err;
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+ clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
copied = 0;
err = -EPIPE;
- if (sk->err || (sk->shutdown & SEND_SHUTDOWN))
+ if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto do_error;
while (psize > 0) {
- struct sk_buff *skb = sk->write_queue.prev;
+ struct sk_buff *skb = sk->sk_write_queue.prev;
struct page *page = pages[poffset / PAGE_SIZE];
int copy, i;
int offset = poffset % PAGE_SIZE;
@@ -870,7 +869,7 @@ new_segment:
goto wait_for_sndbuf;
skb = tcp_alloc_pskb(sk, 0, tp->mss_cache,
- sk->allocation);
+ sk->sk_allocation);
if (!skb)
goto wait_for_memory;
@@ -917,7 +916,7 @@ new_segment:
continue;
wait_for_sndbuf:
- set_bit(SOCK_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
if (copied)
tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, 1);
@@ -948,8 +947,8 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
#define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
- if (!(sk->route_caps & NETIF_F_SG) ||
- !(sk->route_caps & TCP_ZC_CSUM_FLAGS))
+ if (!(sk->sk_route_caps & NETIF_F_SG) ||
+ !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS))
return sock_no_sendpage(sock, page, offset, size, flags);
#undef TCP_ZC_CSUM_FLAGS
@@ -985,8 +984,8 @@ static inline int tcp_copy_to_page(struct sock *sk, char *from,
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
- sk->wmem_queued += copy;
- sk->forward_alloc -= copy;
+ sk->sk_wmem_queued += copy;
+ sk->sk_forward_alloc -= copy;
return 0;
}
@@ -1016,7 +1015,7 @@ static inline int select_size(struct sock *sk, struct tcp_opt *tp)
{
int tmp = tp->mss_cache_std;
- if (sk->route_caps & NETIF_F_SG) {
+ if (sk->sk_route_caps & NETIF_F_SG) {
int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
if (tmp >= pgbreak &&
@@ -1044,12 +1043,12 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
/* Wait for a connection to finish. */
- if ((1 << sk->state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
+ if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
if ((err = wait_for_tcp_connect(sk, flags, &timeo)) != 0)
goto out_err;
/* This should be in poll */
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+ clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
@@ -1059,7 +1058,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
copied = 0;
err = -EPIPE;
- if (sk->err || (sk->shutdown & SEND_SHUTDOWN))
+ if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto do_error;
while (--iovlen >= 0) {
@@ -1071,7 +1070,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
while (seglen > 0) {
int copy;
- skb = sk->write_queue.prev;
+ skb = sk->sk_write_queue.prev;
if (!tp->send_head ||
(copy = mss_now - skb->len) <= 0) {
@@ -1084,14 +1083,16 @@ new_segment:
goto wait_for_sndbuf;
skb = tcp_alloc_pskb(sk, select_size(sk, tp),
- 0, sk->allocation);
+ 0, sk->sk_allocation);
if (!skb)
goto wait_for_memory;
/*
* Check whether we can use HW checksum.
*/
- if (sk->route_caps & (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM))
+ if (sk->sk_route_caps &
+ (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM |
+ NETIF_F_HW_CSUM))
skb->ip_summed = CHECKSUM_HW;
skb_entail(sk, tp, skb);
@@ -1122,7 +1123,7 @@ new_segment:
merge = 1;
} else if (i == MAX_SKB_FRAGS ||
(!i &&
- !(sk->route_caps & NETIF_F_SG))) {
+ !(sk->sk_route_caps & NETIF_F_SG))) {
/* Need to add new fragment and cannot
* do this because interface is non-SG,
* or because all the page slots are
@@ -1205,7 +1206,7 @@ new_segment:
continue;
wait_for_sndbuf:
- set_bit(SOCK_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
if (copied)
tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, 1);
@@ -1258,7 +1259,7 @@ static int tcp_recv_urg(struct sock *sk, long timeo,
tp->urg_data == TCP_URG_READ)
return -EINVAL; /* Yes this is right ! */
- if (sk->state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
+ if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
return -ENOTCONN;
if (tp->urg_data & TCP_URG_VALID) {
@@ -1281,7 +1282,7 @@ static int tcp_recv_urg(struct sock *sk, long timeo,
return err ? -EFAULT : len;
}
- if (sk->state == TCP_CLOSE || (sk->shutdown & RCV_SHUTDOWN))
+ if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
return 0;
/* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
@@ -1301,7 +1302,7 @@ static int tcp_recv_urg(struct sock *sk, long timeo,
static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
{
- __skb_unlink(skb, &sk->receive_queue);
+ __skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb);
}
@@ -1317,7 +1318,7 @@ static void cleanup_rbuf(struct sock *sk, int copied)
int time_to_ack = 0;
#if TCP_DEBUG
- struct sk_buff *skb = skb_peek(&sk->receive_queue);
+ struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
#endif
@@ -1335,7 +1336,7 @@ static void cleanup_rbuf(struct sock *sk, int copied)
* in queue.
*/
(copied > 0 && (tp->ack.pending & TCP_ACK_PUSHED) &&
- !tp->ack.pingpong && !atomic_read(&sk->rmem_alloc)))
+ !tp->ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
time_to_ack = 1;
}
@@ -1345,7 +1346,7 @@ static void cleanup_rbuf(struct sock *sk, int copied)
* Even if window raised up to infinity, do not send window open ACK
* in states, where we will not receive more. It is useless.
*/
- if (copied > 0 && !time_to_ack && !(sk->shutdown & RCV_SHUTDOWN)) {
+ if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
__u32 rcv_window_now = tcp_receive_window(tp);
/* Optimize, __tcp_select_window() is not cheap. */
@@ -1365,7 +1366,7 @@ static void cleanup_rbuf(struct sock *sk, int copied)
tcp_send_ack(sk);
}
-/* Now socket state including sk->err is changed only under lock,
+/* Now socket state including sk->sk_err is changed only under lock,
* hence we may omit checks after joining wait queue.
* We check receive queue before schedule() only as optimization;
* it is very likely that release_sock() added new data.
@@ -1375,18 +1376,18 @@ static long tcp_data_wait(struct sock *sk, long timeo)
{
DEFINE_WAIT(wait);
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
- set_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
release_sock(sk);
- if (skb_queue_empty(&sk->receive_queue))
+ if (skb_queue_empty(&sk->sk_receive_queue))
timeo = schedule_timeout(timeo);
lock_sock(sk);
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
+ clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
return timeo;
}
@@ -1401,7 +1402,7 @@ static void tcp_prequeue_process(struct sock *sk)
* necessary */
local_bh_disable();
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
- sk->backlog_rcv(sk, skb);
+ sk->sk_backlog_rcv(sk, skb);
local_bh_enable();
/* Clear memory counter. */
@@ -1413,7 +1414,7 @@ static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
struct sk_buff *skb;
u32 offset;
- skb_queue_walk(&sk->receive_queue, skb) {
+ skb_queue_walk(&sk->sk_receive_queue, skb) {
offset = seq - TCP_SKB_CB(skb)->seq;
if (skb->h.th->syn)
offset--;
@@ -1445,7 +1446,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
u32 offset;
int copied = 0;
- if (sk->state == TCP_LISTEN)
+ if (sk->sk_state == TCP_LISTEN)
return -ENOTCONN;
while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
if (offset < skb->len) {
@@ -1511,7 +1512,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
TCP_CHECK_TIMER(sk);
err = -ENOTCONN;
- if (sk->state == TCP_LISTEN)
+ if (sk->sk_state == TCP_LISTEN)
goto out;
timeo = sock_rcvtimeo(sk, nonblock);
@@ -1549,7 +1550,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
/* Next get a buffer. */
- skb = skb_peek(&sk->receive_queue);
+ skb = skb_peek(&sk->sk_receive_queue);
do {
if (!skb)
break;
@@ -1571,17 +1572,17 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
goto found_fin_ok;
BUG_TRAP(flags & MSG_PEEK);
skb = skb->next;
- } while (skb != (struct sk_buff *)&sk->receive_queue);
+ } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
/* Well, if we have backlog, try to process it now yet. */
- if (copied >= target && !sk->backlog.tail)
+ if (copied >= target && !sk->sk_backlog.tail)
break;
if (copied) {
- if (sk->err ||
- sk->state == TCP_CLOSE ||
- (sk->shutdown & RCV_SHUTDOWN) ||
+ if (sk->sk_err ||
+ sk->sk_state == TCP_CLOSE ||
+ (sk->sk_shutdown & RCV_SHUTDOWN) ||
!timeo ||
(flags & MSG_PEEK))
break;
@@ -1589,15 +1590,15 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (sock_flag(sk, SOCK_DONE))
break;
- if (sk->err) {
+ if (sk->sk_err) {
copied = sock_error(sk);
break;
}
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
- if (sk->state == TCP_CLOSE) {
+ if (sk->sk_state == TCP_CLOSE) {
if (!sock_flag(sk, SOCK_DONE)) {
/* This occurs when user tries to read
* from never connected socket.
@@ -1825,7 +1826,7 @@ static unsigned char new_state[16] = {
static int tcp_close_state(struct sock *sk)
{
- int next = (int)new_state[sk->state];
+ int next = (int)new_state[sk->sk_state];
int ns = next & TCP_STATE_MASK;
tcp_set_state(sk, ns);
@@ -1848,7 +1849,7 @@ void tcp_shutdown(struct sock *sk, int how)
return;
/* If we've already sent a FIN, or it's a closed state, skip this. */
- if ((1 << sk->state) &
+ if ((1 << sk->sk_state) &
(TCPF_ESTABLISHED | TCPF_SYN_SENT |
TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
/* Clear out any half completed packets. FIN if needed. */
@@ -1864,26 +1865,26 @@ void tcp_shutdown(struct sock *sk, int how)
static inline int closing(struct sock *sk)
{
- return (1 << sk->state) &
+ return (1 << sk->sk_state) &
(TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK);
}
static __inline__ void tcp_kill_sk_queues(struct sock *sk)
{
/* First the read buffer. */
- __skb_queue_purge(&sk->receive_queue);
+ __skb_queue_purge(&sk->sk_receive_queue);
/* Next, the error queue. */
- __skb_queue_purge(&sk->error_queue);
+ __skb_queue_purge(&sk->sk_error_queue);
/* Next, the write queue. */
- BUG_TRAP(skb_queue_empty(&sk->write_queue));
+ BUG_TRAP(skb_queue_empty(&sk->sk_write_queue));
/* Account for returned memory. */
tcp_mem_reclaim(sk);
- BUG_TRAP(!sk->wmem_queued);
- BUG_TRAP(!sk->forward_alloc);
+ BUG_TRAP(!sk->sk_wmem_queued);
+ BUG_TRAP(!sk->sk_forward_alloc);
/* It is _impossible_ for the backlog to contain anything
* when we get here. All user references to this socket
@@ -1899,33 +1900,33 @@ static __inline__ void tcp_kill_sk_queues(struct sock *sk)
*/
void tcp_destroy_sock(struct sock *sk)
{
- BUG_TRAP(sk->state == TCP_CLOSE);
+ BUG_TRAP(sk->sk_state == TCP_CLOSE);
BUG_TRAP(sock_flag(sk, SOCK_DEAD));
/* It cannot be in hash table! */
- BUG_TRAP(!sk->pprev);
+ BUG_TRAP(!sk->sk_pprev);
/* If it has not 0 inet_sk(sk)->num, it must be bound */
- BUG_TRAP(!inet_sk(sk)->num || sk->prev);
+ BUG_TRAP(!inet_sk(sk)->num || sk->sk_prev);
#ifdef TCP_DEBUG
- if (sk->zapped) {
+ if (sk->sk_zapped) {
printk(KERN_DEBUG "TCP: double destroy sk=%p\n", sk);
sock_hold(sk);
}
- sk->zapped = 1;
+ sk->sk_zapped = 1;
#endif
- sk->prot->destroy(sk);
+ sk->sk_prot->destroy(sk);
tcp_kill_sk_queues(sk);
xfrm_sk_free_policy(sk);
#ifdef INET_REFCNT_DEBUG
- if (atomic_read(&sk->refcnt) != 1) {
+ if (atomic_read(&sk->sk_refcnt) != 1) {
printk(KERN_DEBUG "Destruction TCP %p delayed, c=%d\n",
- sk, atomic_read(&sk->refcnt));
+ sk, atomic_read(&sk->sk_refcnt));
}
#endif
@@ -1939,9 +1940,9 @@ void tcp_close(struct sock *sk, long timeout)
int data_was_unread = 0;
lock_sock(sk);
- sk->shutdown = SHUTDOWN_MASK;
+ sk->sk_shutdown = SHUTDOWN_MASK;
- if (sk->state == TCP_LISTEN) {
+ if (sk->sk_state == TCP_LISTEN) {
tcp_set_state(sk, TCP_CLOSE);
/* Special case. */
@@ -1954,7 +1955,7 @@ void tcp_close(struct sock *sk, long timeout)
* descriptor close, not protocol-sourced closes, because the
* reader process may not have drained the data yet!
*/
- while ((skb = __skb_dequeue(&sk->receive_queue)) != NULL) {
+ while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
skb->h.th->fin;
data_was_unread += len;
@@ -1977,9 +1978,9 @@ void tcp_close(struct sock *sk, long timeout)
NET_INC_STATS_USER(TCPAbortOnClose);
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_KERNEL);
- } else if (sock_flag(sk, SOCK_LINGER) && !sk->lingertime) {
+ } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
/* Check zero linger _after_ checking for unread data. */
- sk->prot->disconnect(sk, 0);
+ sk->sk_prot->disconnect(sk, 0);
NET_INC_STATS_USER(TCPAbortOnData);
} else if (tcp_close_state(sk)) {
/* We FIN if the application ate all the data before
@@ -2015,7 +2016,8 @@ void tcp_close(struct sock *sk, long timeout)
DEFINE_WAIT(wait);
do {
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait,
+ TASK_INTERRUPTIBLE);
if (!closing(sk))
break;
release_sock(sk);
@@ -2023,7 +2025,7 @@ void tcp_close(struct sock *sk, long timeout)
lock_sock(sk);
} while (!signal_pending(tsk) && timeout);
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
}
adjudge_to_death:
@@ -2055,7 +2057,7 @@ adjudge_to_death:
* linger2 option. --ANK
*/
- if (sk->state == TCP_FIN_WAIT2) {
+ if (sk->sk_state == TCP_FIN_WAIT2) {
struct tcp_opt *tp = tcp_sk(sk);
if (tp->linger2 < 0) {
tcp_set_state(sk, TCP_CLOSE);
@@ -2073,10 +2075,10 @@ adjudge_to_death:
}
}
}
- if (sk->state != TCP_CLOSE) {
+ if (sk->sk_state != TCP_CLOSE) {
tcp_mem_reclaim(sk);
if (atomic_read(&tcp_orphan_count) > sysctl_tcp_max_orphans ||
- (sk->wmem_queued > SOCK_MIN_SNDBUF &&
+ (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
if (net_ratelimit())
printk(KERN_INFO "TCP: too many of orphaned "
@@ -2088,7 +2090,7 @@ adjudge_to_death:
}
atomic_inc(&tcp_orphan_count);
- if (sk->state == TCP_CLOSE)
+ if (sk->sk_state == TCP_CLOSE)
tcp_destroy_sock(sk);
/* Otherwise, socket is reprieved until protocol close. */
@@ -2111,10 +2113,9 @@ int tcp_disconnect(struct sock *sk, int flags)
{
struct inet_opt *inet = inet_sk(sk);
struct tcp_opt *tp = tcp_sk(sk);
- int old_state;
int err = 0;
+ int old_state = sk->sk_state;
- old_state = sk->state;
if (old_state != TCP_CLOSE)
tcp_set_state(sk, TCP_CLOSE);
@@ -2128,21 +2129,21 @@ int tcp_disconnect(struct sock *sk, int flags)
* states
*/
tcp_send_active_reset(sk, gfp_any());
- sk->err = ECONNRESET;
+ sk->sk_err = ECONNRESET;
} else if (old_state == TCP_SYN_SENT)
- sk->err = ECONNRESET;
+ sk->sk_err = ECONNRESET;
tcp_clear_xmit_timers(sk);
- __skb_queue_purge(&sk->receive_queue);
+ __skb_queue_purge(&sk->sk_receive_queue);
tcp_writequeue_purge(sk);
__skb_queue_purge(&tp->out_of_order_queue);
inet->dport = 0;
- if (!(sk->userlocks & SOCK_BINDADDR_LOCK))
+ if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
inet_reset_saddr(sk);
- sk->shutdown = 0;
+ sk->sk_shutdown = 0;
sock_reset_flag(sk, SOCK_DONE);
tp->srtt = 0;
if ((tp->write_seq += tp->max_window + 2) == 0)
@@ -2161,9 +2162,9 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_sack_reset(tp);
__sk_dst_reset(sk);
- BUG_TRAP(!inet->num || sk->prev);
+ BUG_TRAP(!inet->num || sk->sk_prev);
- sk->error_report(sk);
+ sk->sk_error_report(sk);
return err;
}
@@ -2192,7 +2193,8 @@ static int wait_for_connect(struct sock *sk, long timeo)
* having to remove and re-insert us on the wait queue.
*/
for (;;) {
- prepare_to_wait_exclusive(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait_exclusive(sk->sk_sleep, &wait,
+ TASK_INTERRUPTIBLE);
release_sock(sk);
if (!tp->accept_queue)
timeo = schedule_timeout(timeo);
@@ -2201,7 +2203,7 @@ static int wait_for_connect(struct sock *sk, long timeo)
if (tp->accept_queue)
break;
err = -EINVAL;
- if (sk->state != TCP_LISTEN)
+ if (sk->sk_state != TCP_LISTEN)
break;
err = sock_intr_errno(timeo);
if (signal_pending(current))
@@ -2210,7 +2212,7 @@ static int wait_for_connect(struct sock *sk, long timeo)
if (!timeo)
break;
}
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
return err;
}
@@ -2231,7 +2233,7 @@ struct sock *tcp_accept(struct sock *sk, int flags, int *err)
* and that it has something pending.
*/
error = -EINVAL;
- if (sk->state != TCP_LISTEN)
+ if (sk->sk_state != TCP_LISTEN)
goto out;
/* Find already established connection */
@@ -2255,7 +2257,7 @@ struct sock *tcp_accept(struct sock *sk, int flags, int *err)
newsk = req->sk;
tcp_acceptq_removed(sk);
tcp_openreq_fastfree(req);
- BUG_TRAP(newsk->state != TCP_SYN_RECV);
+ BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
release_sock(sk);
return newsk;
@@ -2343,7 +2345,8 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval,
else {
tp->keepalive_time = val * HZ;
if (sock_flag(sk, SOCK_KEEPOPEN) &&
- !((1 << sk->state) & (TCPF_CLOSE | TCPF_LISTEN))) {
+ !((1 << sk->sk_state) &
+ (TCPF_CLOSE | TCPF_LISTEN))) {
__u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
if (tp->keepalive_time > elapsed)
elapsed = tp->keepalive_time - elapsed;
@@ -2396,7 +2399,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval,
case TCP_WINDOW_CLAMP:
if (!val) {
- if (sk->state != TCP_CLOSE) {
+ if (sk->sk_state != TCP_CLOSE) {
err = -EINVAL;
break;
}
@@ -2411,7 +2414,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval,
tp->ack.pingpong = 1;
} else {
tp->ack.pingpong = 0;
- if ((1 << sk->state) &
+ if ((1 << sk->sk_state) &
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
tcp_ack_scheduled(tp)) {
tp->ack.pending |= TCP_ACK_PUSHED;
@@ -2451,7 +2454,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval,
switch (optname) {
case TCP_MAXSEG:
val = tp->mss_cache_std;
- if (!val && ((1 << sk->state) & (TCPF_CLOSE | TCPF_LISTEN)))
+ if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
val = tp->user_mss;
break;
case TCP_NODELAY:
@@ -2490,7 +2493,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval,
if (get_user(len, optlen))
return -EFAULT;
- info.tcpi_state = sk->state;
+ info.tcpi_state = sk->sk_state;
info.tcpi_ca_state = tp->ca_state;
info.tcpi_retransmits = tp->retransmits;
info.tcpi_probes = tp->probes_out;
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index e61b52e1bed9..07eb7b387309 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -54,32 +54,32 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
nlh = NLMSG_PUT(skb, pid, seq, TCPDIAG_GETSOCK, sizeof(*r));
r = NLMSG_DATA(nlh);
- if (sk->state != TCP_TIME_WAIT) {
+ if (sk->sk_state != TCP_TIME_WAIT) {
if (ext & (1<<(TCPDIAG_MEMINFO-1)))
minfo = TCPDIAG_PUT(skb, TCPDIAG_MEMINFO, sizeof(*minfo));
if (ext & (1<<(TCPDIAG_INFO-1)))
info = TCPDIAG_PUT(skb, TCPDIAG_INFO, sizeof(*info));
}
- r->tcpdiag_family = sk->family;
- r->tcpdiag_state = sk->state;
+ r->tcpdiag_family = sk->sk_family;
+ r->tcpdiag_state = sk->sk_state;
r->tcpdiag_timer = 0;
r->tcpdiag_retrans = 0;
- r->id.tcpdiag_if = sk->bound_dev_if;
+ r->id.tcpdiag_if = sk->sk_bound_dev_if;
r->id.tcpdiag_cookie[0] = (u32)(unsigned long)sk;
r->id.tcpdiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
if (r->tcpdiag_state == TCP_TIME_WAIT) {
struct tcp_tw_bucket *tw = (struct tcp_tw_bucket*)sk;
- long tmo = tw->ttd - jiffies;
+ long tmo = tw->tw_ttd - jiffies;
if (tmo < 0)
tmo = 0;
- r->id.tcpdiag_sport = tw->sport;
- r->id.tcpdiag_dport = tw->dport;
- r->id.tcpdiag_src[0] = tw->rcv_saddr;
- r->id.tcpdiag_dst[0] = tw->daddr;
- r->tcpdiag_state = tw->substate;
+ r->id.tcpdiag_sport = tw->tw_sport;
+ r->id.tcpdiag_dport = tw->tw_dport;
+ r->id.tcpdiag_src[0] = tw->tw_rcv_saddr;
+ r->id.tcpdiag_dst[0] = tw->tw_daddr;
+ r->tcpdiag_state = tw->tw_substate;
r->tcpdiag_timer = 3;
r->tcpdiag_expires = (tmo*1000+HZ-1)/HZ;
r->tcpdiag_rqueue = 0;
@@ -89,9 +89,9 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
#ifdef CONFIG_IPV6
if (r->tcpdiag_family == AF_INET6) {
ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_src,
- &tw->v6_rcv_saddr);
+ &tw->tw_v6_rcv_saddr);
ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_dst,
- &tw->v6_daddr);
+ &tw->tw_v6_daddr);
}
#endif
nlh->nlmsg_len = skb->tail - b;
@@ -124,10 +124,10 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
r->tcpdiag_timer = 4;
r->tcpdiag_retrans = tp->probes_out;
r->tcpdiag_expires = EXPIRES_IN_MS(tp->timeout);
- } else if (timer_pending(&sk->timer)) {
+ } else if (timer_pending(&sk->sk_timer)) {
r->tcpdiag_timer = 2;
r->tcpdiag_retrans = tp->probes_out;
- r->tcpdiag_expires = EXPIRES_IN_MS(sk->timer.expires);
+ r->tcpdiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires);
} else {
r->tcpdiag_timer = 0;
r->tcpdiag_expires = 0;
@@ -140,16 +140,16 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
r->tcpdiag_inode = sock_i_ino(sk);
if (minfo) {
- minfo->tcpdiag_rmem = atomic_read(&sk->rmem_alloc);
- minfo->tcpdiag_wmem = sk->wmem_queued;
- minfo->tcpdiag_fmem = sk->forward_alloc;
- minfo->tcpdiag_tmem = atomic_read(&sk->wmem_alloc);
+ minfo->tcpdiag_rmem = atomic_read(&sk->sk_rmem_alloc);
+ minfo->tcpdiag_wmem = sk->sk_wmem_queued;
+ minfo->tcpdiag_fmem = sk->sk_forward_alloc;
+ minfo->tcpdiag_tmem = atomic_read(&sk->sk_wmem_alloc);
}
if (info) {
u32 now = tcp_time_stamp;
- info->tcpi_state = sk->state;
+ info->tcpi_state = sk->sk_state;
info->tcpi_ca_state = tp->ca_state;
info->tcpi_retransmits = tp->retransmits;
info->tcpi_probes = tp->probes_out;
@@ -264,7 +264,7 @@ static int tcpdiag_get_exact(struct sk_buff *in_skb, struct nlmsghdr *nlh)
out:
if (sk) {
- if (sk->state == TCP_TIME_WAIT)
+ if (sk->sk_state == TCP_TIME_WAIT)
tcp_tw_put((struct tcp_tw_bucket*)sk);
else
sock_put(sk);
@@ -325,7 +325,7 @@ int tcpdiag_bc_run(char *bc, int len, struct sock *sk)
yes = ntohs(inet->dport) <= op[1].no;
break;
case TCPDIAG_BC_AUTO:
- yes = !(sk->userlocks&SOCK_BINDPORT_LOCK);
+ yes = !(sk->sk_userlocks & SOCK_BINDPORT_LOCK);
break;
case TCPDIAG_BC_S_COND:
case TCPDIAG_BC_D_COND:
@@ -344,7 +344,7 @@ int tcpdiag_bc_run(char *bc, int len, struct sock *sk)
break;
#ifdef CONFIG_IPV6
- if (sk->family == AF_INET6) {
+ if (sk->sk_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
if (op->code == TCPDIAG_BC_S_COND)
@@ -362,7 +362,8 @@ int tcpdiag_bc_run(char *bc, int len, struct sock *sk)
if (bitstring_match(addr, cond->addr, cond->prefix_len))
break;
- if (sk->family == AF_INET6 && cond->family == AF_INET) {
+ if (sk->sk_family == AF_INET6 &&
+ cond->family == AF_INET) {
if (addr[0] == 0 && addr[1] == 0 &&
addr[2] == htonl(0xffff) &&
bitstring_match(addr+3, cond->addr, cond->prefix_len))
@@ -466,7 +467,7 @@ int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb)
for (sk = tcp_listening_hash[i], num = 0;
sk != NULL;
- sk = sk->next, num++) {
+ sk = sk->sk_next, num++) {
struct inet_opt *inet = inet_sk(sk);
if (num < s_num)
continue;
@@ -506,12 +507,12 @@ skip_listen_ht:
for (sk = head->chain, num = 0;
sk != NULL;
- sk = sk->next, num++) {
+ sk = sk->sk_next, num++) {
struct inet_opt *inet = inet_sk(sk);
if (num < s_num)
continue;
- if (!(r->tcpdiag_states&(1<<sk->state)))
+ if (!(r->tcpdiag_states & (1 << sk->sk_state)))
continue;
if (r->id.tcpdiag_sport != inet->sport &&
r->id.tcpdiag_sport)
@@ -531,12 +532,12 @@ skip_listen_ht:
if (r->tcpdiag_states&TCPF_TIME_WAIT) {
for (sk = tcp_ehash[i+tcp_ehash_size].chain;
sk != NULL;
- sk = sk->next, num++) {
+ sk = sk->sk_next, num++) {
struct inet_opt *inet = inet_sk(sk);
if (num < s_num)
continue;
- if (!(r->tcpdiag_states&(1<<sk->zapped)))
+ if (!(r->tcpdiag_states & (1 << sk->sk_zapped)))
continue;
if (r->id.tcpdiag_sport != inet->sport &&
r->id.tcpdiag_sport)
@@ -622,7 +623,7 @@ static void tcpdiag_rcv(struct sock *sk, int len)
{
struct sk_buff *skb;
- while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
tcpdiag_rcv_skb(skb);
kfree_skb(skb);
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 82afc9e4c9b0..39e02e34f233 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -184,7 +184,7 @@ static __inline__ int tcp_in_quickack_mode(struct tcp_opt *tp)
/* Buffer size and advertised window tuning.
*
- * 1. Tuning sk->sndbuf, when connection enters established state.
+ * 1. Tuning sk->sk_sndbuf, when connection enters established state.
*/
static void tcp_fixup_sndbuf(struct sock *sk)
@@ -192,8 +192,8 @@ static void tcp_fixup_sndbuf(struct sock *sk)
int sndmem = tcp_sk(sk)->mss_clamp + MAX_TCP_HEADER + 16 +
sizeof(struct sk_buff);
- if (sk->sndbuf < 3*sndmem)
- sk->sndbuf = min(3*sndmem, sysctl_tcp_wmem[2]);
+ if (sk->sk_sndbuf < 3 * sndmem)
+ sk->sk_sndbuf = min(3 * sndmem, sysctl_tcp_wmem[2]);
}
/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -276,8 +276,8 @@ static void tcp_fixup_rcvbuf(struct sock *sk)
*/
while (tcp_win_from_space(rcvmem) < tp->advmss)
rcvmem += 128;
- if (sk->rcvbuf < 4*rcvmem)
- sk->rcvbuf = min(4*rcvmem, sysctl_tcp_rmem[2]);
+ if (sk->sk_rcvbuf < 4 * rcvmem)
+ sk->sk_rcvbuf = min(4 * rcvmem, sysctl_tcp_rmem[2]);
}
/* 4. Try to fixup all. It is made iimediately after connection enters
@@ -288,9 +288,9 @@ static void tcp_init_buffer_space(struct sock *sk)
struct tcp_opt *tp = tcp_sk(sk);
int maxwin;
- if (!(sk->userlocks&SOCK_RCVBUF_LOCK))
+ if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
tcp_fixup_rcvbuf(sk);
- if (!(sk->userlocks&SOCK_SNDBUF_LOCK))
+ if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
tcp_fixup_sndbuf(sk);
maxwin = tcp_full_space(sk);
@@ -331,15 +331,16 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_opt *tp)
* do not clamp window. Try to expand rcvbuf instead.
*/
if (ofo_win) {
- if (sk->rcvbuf < sysctl_tcp_rmem[2] &&
- !(sk->userlocks&SOCK_RCVBUF_LOCK) &&
+ if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
+ !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
!tcp_memory_pressure &&
atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0])
- sk->rcvbuf = min(atomic_read(&sk->rmem_alloc), sysctl_tcp_rmem[2]);
+ sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
+ sysctl_tcp_rmem[2]);
}
- if (atomic_read(&sk->rmem_alloc) > sk->rcvbuf) {
+ if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) {
app_win += ofo_win;
- if (atomic_read(&sk->rmem_alloc) >= 2*sk->rcvbuf)
+ if (atomic_read(&sk->sk_rmem_alloc) >= 2 * sk->sk_rcvbuf)
app_win >>= 1;
if (app_win > tp->ack.rcv_mss)
app_win -= tp->ack.rcv_mss;
@@ -778,9 +779,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
/* So, SACKs for already sent large segments will be lost.
* Not good, but alternative is to resegment the queue. */
- if (sk->route_caps&NETIF_F_TSO) {
- sk->route_caps &= ~NETIF_F_TSO;
- sk->no_largesend = 1;
+ if (sk->sk_route_caps & NETIF_F_TSO) {
+ sk->sk_route_caps &= ~NETIF_F_TSO;
+ sk->sk_no_largesend = 1;
tp->mss_cache = tp->mss_cache_std;
}
@@ -1128,13 +1129,13 @@ static int tcp_check_sack_reneging(struct sock *sk, struct tcp_opt *tp)
* receiver _host_ is heavily congested (or buggy).
* Do processing similar to RTO timeout.
*/
- if ((skb = skb_peek(&sk->write_queue)) != NULL &&
+ if ((skb = skb_peek(&sk->sk_write_queue)) != NULL &&
(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
NET_INC_STATS_BH(TCPSACKReneging);
tcp_enter_loss(sk, 1);
tp->retransmits++;
- tcp_retransmit_skb(sk, skb_peek(&sk->write_queue));
+ tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue));
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
return 1;
}
@@ -1153,7 +1154,8 @@ static inline int tcp_skb_timedout(struct tcp_opt *tp, struct sk_buff *skb)
static inline int tcp_head_timedout(struct sock *sk, struct tcp_opt *tp)
{
- return tp->packets_out && tcp_skb_timedout(tp, skb_peek(&sk->write_queue));
+ return tp->packets_out &&
+ tcp_skb_timedout(tp, skb_peek(&sk->sk_write_queue));
}
/* Linux NewReno/SACK/FACK/ECN state machine.
@@ -1840,7 +1842,7 @@ static int tcp_clean_rtx_queue(struct sock *sk)
int acked = 0;
__s32 seq_rtt = -1;
- while((skb = skb_peek(&sk->write_queue)) && (skb != tp->send_head)) {
+ while ((skb = skb_peek(&sk->sk_write_queue)) && skb != tp->send_head) {
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
__u8 sacked = scb->sacked;
@@ -2080,7 +2082,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
/* We passed data and got it acked, remove any soft error
* log. Something worked...
*/
- sk->err_soft = 0;
+ sk->sk_err_soft = 0;
tp->rcv_tstamp = tcp_time_stamp;
prior_packets = tp->packets_out;
if (!prior_packets)
@@ -2107,7 +2109,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
}
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP))
- dst_confirm(sk->dst_cache);
+ dst_confirm(sk->sk_dst_cache);
return 1;
@@ -2339,21 +2341,21 @@ static inline int tcp_sequence(struct tcp_opt *tp, u32 seq, u32 end_seq)
static void tcp_reset(struct sock *sk)
{
/* We want the right error as BSD sees it (and indeed as we do). */
- switch (sk->state) {
+ switch (sk->sk_state) {
case TCP_SYN_SENT:
- sk->err = ECONNREFUSED;
+ sk->sk_err = ECONNREFUSED;
break;
case TCP_CLOSE_WAIT:
- sk->err = EPIPE;
+ sk->sk_err = EPIPE;
break;
case TCP_CLOSE:
return;
default:
- sk->err = ECONNRESET;
+ sk->sk_err = ECONNRESET;
}
if (!sock_flag(sk, SOCK_DEAD))
- sk->error_report(sk);
+ sk->sk_error_report(sk);
tcp_done(sk);
}
@@ -2378,10 +2380,10 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
tcp_schedule_ack(tp);
- sk->shutdown |= RCV_SHUTDOWN;
+ sk->sk_shutdown |= RCV_SHUTDOWN;
sock_reset_flag(sk, SOCK_DONE);
- switch(sk->state) {
+ switch (sk->sk_state) {
case TCP_SYN_RECV:
case TCP_ESTABLISHED:
/* Move to CLOSE_WAIT */
@@ -2416,7 +2418,8 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
/* Only TCP_LISTEN and TCP_CLOSE are left, in these
* cases we should never reach this piece of code.
*/
- printk("tcp_fin: Impossible, sk->state=%d\n", sk->state);
+ printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n",
+ __FUNCTION__, sk->sk_state);
break;
};
@@ -2429,10 +2432,11 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
tcp_mem_reclaim(sk);
if (!sock_flag(sk, SOCK_DEAD)) {
- sk->state_change(sk);
+ sk->sk_state_change(sk);
/* Do not send POLL_HUP for half duplex close. */
- if (sk->shutdown == SHUTDOWN_MASK || sk->state == TCP_CLOSE)
+ if (sk->sk_shutdown == SHUTDOWN_MASK ||
+ sk->sk_state == TCP_CLOSE)
sk_wake_async(sk, 1, POLL_HUP);
else
sk_wake_async(sk, 1, POLL_IN);
@@ -2650,7 +2654,7 @@ static void tcp_ofo_queue(struct sock *sk)
TCP_SKB_CB(skb)->end_seq);
__skb_unlink(skb, skb->list);
- __skb_queue_tail(&sk->receive_queue, skb);
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
if(skb->h.th->fin)
tcp_fin(skb, sk, skb->h.th);
@@ -2659,7 +2663,7 @@ static void tcp_ofo_queue(struct sock *sk)
static inline int tcp_rmem_schedule(struct sock *sk, struct sk_buff *skb)
{
- return (int)skb->truesize <= sk->forward_alloc ||
+ return (int)skb->truesize <= sk->sk_forward_alloc ||
tcp_mem_schedule(sk, skb->truesize, 1);
}
@@ -2714,13 +2718,13 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
if (eaten <= 0) {
queue_and_out:
if (eaten < 0 &&
- (atomic_read(&sk->rmem_alloc) > sk->rcvbuf ||
+ (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
!tcp_rmem_schedule(sk, skb))) {
if (tcp_prune_queue(sk) < 0 || !tcp_rmem_schedule(sk, skb))
goto drop;
}
tcp_set_owner_r(skb, sk);
- __skb_queue_tail(&sk->receive_queue, skb);
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
}
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
if(skb->len)
@@ -2746,7 +2750,7 @@ queue_and_out:
if (eaten > 0)
__kfree_skb(skb);
else if (!sock_flag(sk, SOCK_DEAD))
- sk->data_ready(sk, 0);
+ sk->sk_data_ready(sk, 0);
return;
}
@@ -2787,7 +2791,7 @@ drop:
TCP_ECN_check_ce(tp, skb);
- if (atomic_read(&sk->rmem_alloc) > sk->rcvbuf ||
+ if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
!tcp_rmem_schedule(sk, skb)) {
if (tcp_prune_queue(sk) < 0 || !tcp_rmem_schedule(sk, skb))
goto drop;
@@ -3024,18 +3028,18 @@ static int tcp_prune_queue(struct sock *sk)
NET_INC_STATS_BH(PruneCalled);
- if (atomic_read(&sk->rmem_alloc) >= sk->rcvbuf)
+ if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
tcp_clamp_window(sk, tp);
else if (tcp_memory_pressure)
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
tcp_collapse_ofo_queue(sk);
- tcp_collapse(sk, sk->receive_queue.next,
- (struct sk_buff*)&sk->receive_queue,
+ tcp_collapse(sk, sk->sk_receive_queue.next,
+ (struct sk_buff*)&sk->sk_receive_queue,
tp->copied_seq, tp->rcv_nxt);
tcp_mem_reclaim(sk);
- if (atomic_read(&sk->rmem_alloc) <= sk->rcvbuf)
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
return 0;
/* Collapsing did not help, destructive actions follow.
@@ -3057,7 +3061,7 @@ static int tcp_prune_queue(struct sock *sk)
tcp_mem_reclaim(sk);
}
- if(atomic_read(&sk->rmem_alloc) <= sk->rcvbuf)
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
return 0;
/* If we are really being abused, tell the caller to silently
@@ -3081,7 +3085,7 @@ void tcp_cwnd_application_limited(struct sock *sk)
struct tcp_opt *tp = tcp_sk(sk);
if (tp->ca_state == TCP_CA_Open &&
- sk->socket && !test_bit(SOCK_NOSPACE, &sk->socket->flags)) {
+ sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
/* Limited by application or receiver window. */
u32 win_used = max(tp->snd_cwnd_used, 2U);
if (win_used < tp->snd_cwnd) {
@@ -3105,7 +3109,7 @@ static void tcp_new_space(struct sock *sk)
struct tcp_opt *tp = tcp_sk(sk);
if (tp->packets_out < tp->snd_cwnd &&
- !(sk->userlocks&SOCK_SNDBUF_LOCK) &&
+ !(sk->sk_userlocks & SOCK_SNDBUF_LOCK) &&
!tcp_memory_pressure &&
atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
int sndmem = max_t(u32, tp->mss_clamp, tp->mss_cache) +
@@ -3113,12 +3117,12 @@ static void tcp_new_space(struct sock *sk)
demanded = max_t(unsigned int, tp->snd_cwnd,
tp->reordering + 1);
sndmem *= 2*demanded;
- if (sndmem > sk->sndbuf)
- sk->sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
+ if (sndmem > sk->sk_sndbuf)
+ sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
tp->snd_cwnd_stamp = tcp_time_stamp;
}
- sk->write_space(sk);
+ sk->sk_write_space(sk);
}
static inline void tcp_check_space(struct sock *sk)
@@ -3127,7 +3131,8 @@ static inline void tcp_check_space(struct sock *sk)
if (tp->queue_shrunk) {
tp->queue_shrunk = 0;
- if (sk->socket && test_bit(SOCK_NOSPACE, &sk->socket->flags))
+ if (sk->sk_socket &&
+ test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
tcp_new_space(sk);
}
}
@@ -3249,7 +3254,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
!sock_flag(sk, SOCK_URGINLINE) &&
tp->copied_seq != tp->rcv_nxt) {
- struct sk_buff *skb = skb_peek(&sk->receive_queue);
+ struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
tp->copied_seq++;
if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
__skb_unlink(skb, skb->list);
@@ -3285,7 +3290,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)
BUG();
tp->urg_data = TCP_URG_VALID | tmp;
if (!sock_flag(sk, SOCK_DEAD))
- sk->data_ready(sk,0);
+ sk->sk_data_ready(sk, 0);
}
}
}
@@ -3483,14 +3488,14 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
tp->rcv_nxt == tp->rcv_wup)
tcp_store_ts_recent(tp);
- if ((int)skb->truesize > sk->forward_alloc)
+ if ((int)skb->truesize > sk->sk_forward_alloc)
goto step5;
NET_INC_STATS_BH(TCPHPHits);
/* Bulk data transfer: receiver */
__skb_pull(skb,tcp_header_len);
- __skb_queue_tail(&sk->receive_queue, skb);
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
tcp_set_owner_r(skb, sk);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
}
@@ -3519,7 +3524,7 @@ no_ack:
if (eaten)
__kfree_skb(skb);
else
- sk->data_ready(sk, 0);
+ sk->sk_data_ready(sk, 0);
return 0;
}
}
@@ -3659,7 +3664,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
TCP_ECN_rcv_synack(tp, th);
if (tp->ecn_flags&TCP_ECN_OK)
- sk->no_largesend = 1;
+ sk->sk_no_largesend = 1;
tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
tcp_ack(sk, skb, FLAG_SLOWPATH);
@@ -3715,7 +3720,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
tcp_set_state(sk, TCP_ESTABLISHED);
if (!sock_flag(sk, SOCK_DEAD)) {
- sk->state_change(sk);
+ sk->sk_state_change(sk);
sk_wake_async(sk, 0, POLL_OUT);
}
@@ -3787,7 +3792,7 @@ discard:
TCP_ECN_rcv_syn(tp, th);
if (tp->ecn_flags&TCP_ECN_OK)
- sk->no_largesend = 1;
+ sk->sk_no_largesend = 1;
tcp_sync_mss(sk, tp->pmtu_cookie);
tcp_initialize_rcv_mss(sk);
@@ -3840,7 +3845,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
tp->saw_tstamp = 0;
- switch (sk->state) {
+ switch (sk->sk_state) {
case TCP_CLOSE:
goto discard;
@@ -3928,20 +3933,20 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
if (th->ack) {
int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH);
- switch(sk->state) {
+ switch(sk->sk_state) {
case TCP_SYN_RECV:
if (acceptable) {
tp->copied_seq = tp->rcv_nxt;
mb();
tcp_set_state(sk, TCP_ESTABLISHED);
- sk->state_change(sk);
+ sk->sk_state_change(sk);
/* Note, that this wakeup is only for marginal
* crossed SYN case. Passively open sockets
- * are not waked up, because sk->sleep == NULL
- * and sk->socket == NULL.
+ * are not waked up, because sk->sk_sleep ==
+ * NULL and sk->sk_socket == NULL.
*/
- if (sk->socket) {
+ if (sk->sk_socket) {
sk_wake_async(sk,0,POLL_OUT);
}
@@ -3974,12 +3979,12 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
case TCP_FIN_WAIT1:
if (tp->snd_una == tp->write_seq) {
tcp_set_state(sk, TCP_FIN_WAIT2);
- sk->shutdown |= SEND_SHUTDOWN;
- dst_confirm(sk->dst_cache);
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ dst_confirm(sk->sk_dst_cache);
if (!sock_flag(sk, SOCK_DEAD))
/* Wake up lingering close() */
- sk->state_change(sk);
+ sk->sk_state_change(sk);
else {
int tmo;
@@ -4032,7 +4037,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
tcp_urg(sk, skb, th);
/* step 7: process the segment text */
- switch (sk->state) {
+ switch (sk->sk_state) {
case TCP_CLOSE_WAIT:
case TCP_CLOSING:
case TCP_LAST_ACK:
@@ -4044,7 +4049,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
* RFC 1122 says we MUST send a reset.
* BSD 4.4 also does reset.
*/
- if (sk->shutdown & RCV_SHUTDOWN) {
+ if (sk->sk_shutdown & RCV_SHUTDOWN) {
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
NET_INC_STATS_BH(TCPAbortOnData);
@@ -4060,7 +4065,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
}
/* tcp_data could move socket to TIME-WAIT */
- if (sk->state != TCP_CLOSE) {
+ if (sk->sk_state != TCP_CLOSE) {
tcp_data_snd_check(sk);
tcp_ack_snd_check(sk);
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3f16c2a57aed..efc7e05f184c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -161,12 +161,12 @@ static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
struct tcp_bind_bucket *tb;
spin_lock(&head->lock);
- tb = (struct tcp_bind_bucket *)sk->prev;
- if ((child->bind_next = tb->owners) != NULL)
- tb->owners->bind_pprev = &child->bind_next;
+ tb = (struct tcp_bind_bucket *)sk->sk_prev;
+ if ((child->sk_bind_next = tb->owners) != NULL)
+ tb->owners->sk_bind_pprev = &child->sk_bind_next;
tb->owners = child;
- child->bind_pprev = &tb->owners;
- child->prev = (struct sock *)tb;
+ child->sk_bind_pprev = &tb->owners;
+ child->sk_prev = (struct sock *)tb;
spin_unlock(&head->lock);
}
@@ -181,25 +181,25 @@ void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
unsigned short snum)
{
inet_sk(sk)->num = snum;
- if ((sk->bind_next = tb->owners) != NULL)
- tb->owners->bind_pprev = &sk->bind_next;
+ if ((sk->sk_bind_next = tb->owners) != NULL)
+ tb->owners->sk_bind_pprev = &sk->sk_bind_next;
tb->owners = sk;
- sk->bind_pprev = &tb->owners;
- sk->prev = (struct sock *)tb;
+ sk->sk_bind_pprev = &tb->owners;
+ sk->sk_prev = (struct sock *)tb;
}
static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
{
struct inet_opt *inet = inet_sk(sk);
struct sock *sk2 = tb->owners;
- int sk_reuse = sk->reuse;
+ int reuse = sk->sk_reuse;
- for ( ; sk2; sk2 = sk2->bind_next) {
+ for (; sk2; sk2 = sk2->sk_bind_next) {
if (sk != sk2 &&
!ipv6_only_sock(sk2) &&
- sk->bound_dev_if == sk2->bound_dev_if) {
- if (!sk_reuse || !sk2->reuse ||
- sk2->state == TCP_LISTEN) {
+ sk->sk_bound_dev_if == sk2->sk_bound_dev_if) {
+ if (!reuse || !sk2->sk_reuse ||
+ sk2->sk_state == TCP_LISTEN) {
struct inet_opt *inet2 = inet_sk(sk2);
if (!inet2->rcv_saddr || !inet->rcv_saddr ||
inet2->rcv_saddr == inet->rcv_saddr)
@@ -262,9 +262,10 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
break;
}
if (tb && tb->owners) {
- if (sk->reuse > 1)
+ if (sk->sk_reuse > 1)
goto success;
- if (tb->fastreuse > 0 && sk->reuse && sk->state != TCP_LISTEN) {
+ if (tb->fastreuse > 0 &&
+ sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
goto success;
} else {
ret = 1;
@@ -276,16 +277,17 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
goto fail_unlock;
if (!tb->owners) {
- if (sk->reuse && sk->state != TCP_LISTEN)
+ if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
tb->fastreuse = 1;
else
tb->fastreuse = 0;
- } else if (tb->fastreuse && (!sk->reuse || sk->state == TCP_LISTEN))
+ } else if (tb->fastreuse &&
+ (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
tb->fastreuse = 0;
success:
- if (!sk->prev)
+ if (!sk->sk_prev)
tcp_bind_hash(sk, tb, snum);
- BUG_TRAP(sk->prev == (struct sock *)tb);
+ BUG_TRAP(sk->sk_prev == (struct sock *)tb);
ret = 0;
fail_unlock:
@@ -305,11 +307,11 @@ static void __tcp_put_port(struct sock *sk)
struct tcp_bind_bucket *tb;
spin_lock(&head->lock);
- tb = (struct tcp_bind_bucket *) sk->prev;
- if (sk->bind_next)
- sk->bind_next->bind_pprev = sk->bind_pprev;
- *(sk->bind_pprev) = sk->bind_next;
- sk->prev = NULL;
+ tb = (struct tcp_bind_bucket *)sk->sk_prev;
+ if (sk->sk_bind_next)
+ sk->sk_bind_next->sk_bind_pprev = sk->sk_bind_pprev;
+ *(sk->sk_bind_pprev) = sk->sk_bind_next;
+ sk->sk_prev = NULL;
inet->num = 0;
tcp_bucket_destroy(tb);
spin_unlock(&head->lock);
@@ -355,29 +357,29 @@ static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
struct sock **skp;
rwlock_t *lock;
- BUG_TRAP(!sk->pprev);
- if (listen_possible && sk->state == TCP_LISTEN) {
+ BUG_TRAP(!sk->sk_pprev);
+ if (listen_possible && sk->sk_state == TCP_LISTEN) {
skp = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
lock = &tcp_lhash_lock;
tcp_listen_wlock();
} else {
- skp = &tcp_ehash[(sk->hashent = tcp_sk_hashfn(sk))].chain;
- lock = &tcp_ehash[sk->hashent].lock;
+ skp = &tcp_ehash[(sk->sk_hashent = tcp_sk_hashfn(sk))].chain;
+ lock = &tcp_ehash[sk->sk_hashent].lock;
write_lock(lock);
}
- if ((sk->next = *skp) != NULL)
- (*skp)->pprev = &sk->next;
+ if ((sk->sk_next = *skp) != NULL)
+ (*skp)->sk_pprev = &sk->sk_next;
*skp = sk;
- sk->pprev = skp;
- sock_prot_inc_use(sk->prot);
+ sk->sk_pprev = skp;
+ sock_prot_inc_use(sk->sk_prot);
write_unlock(lock);
- if (listen_possible && sk->state == TCP_LISTEN)
+ if (listen_possible && sk->sk_state == TCP_LISTEN)
wake_up(&tcp_lhash_wait);
}
static void tcp_v4_hash(struct sock *sk)
{
- if (sk->state != TCP_CLOSE) {
+ if (sk->sk_state != TCP_CLOSE) {
local_bh_disable();
__tcp_v4_hash(sk, 1);
local_bh_enable();
@@ -388,30 +390,30 @@ void tcp_unhash(struct sock *sk)
{
rwlock_t *lock;
- if (!sk->pprev)
+ if (!sk->sk_pprev)
goto ende;
- if (sk->state == TCP_LISTEN) {
+ if (sk->sk_state == TCP_LISTEN) {
local_bh_disable();
tcp_listen_wlock();
lock = &tcp_lhash_lock;
} else {
- struct tcp_ehash_bucket *head = &tcp_ehash[sk->hashent];
+ struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
lock = &head->lock;
write_lock_bh(&head->lock);
}
- if (sk->pprev) {
- if (sk->next)
- sk->next->pprev = sk->pprev;
- *sk->pprev = sk->next;
- sk->pprev = NULL;
- sock_prot_dec_use(sk->prot);
+ if (sk->sk_pprev) {
+ if (sk->sk_next)
+ sk->sk_next->sk_pprev = sk->sk_pprev;
+ *sk->sk_pprev = sk->sk_next;
+ sk->sk_pprev = NULL;
+ sock_prot_dec_use(sk->sk_prot);
}
write_unlock_bh(lock);
ende:
- if (sk->state == TCP_LISTEN)
+ if (sk->sk_state == TCP_LISTEN)
wake_up(&tcp_lhash_wait);
}
@@ -428,20 +430,20 @@ static struct sock *__tcp_v4_lookup_listener(struct sock *sk, u32 daddr,
int score, hiscore;
hiscore=-1;
- for (; sk; sk = sk->next) {
+ for (; sk; sk = sk->sk_next) {
struct inet_opt *inet = inet_sk(sk);
if (inet->num == hnum && !ipv6_only_sock(sk)) {
__u32 rcv_saddr = inet->rcv_saddr;
- score = (sk->family == PF_INET ? 1 : 0);
+ score = (sk->sk_family == PF_INET ? 1 : 0);
if (rcv_saddr) {
if (rcv_saddr != daddr)
continue;
score+=2;
}
- if (sk->bound_dev_if) {
- if (sk->bound_dev_if != dif)
+ if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if != dif)
continue;
score+=2;
}
@@ -467,10 +469,10 @@ inline struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum,
if (sk) {
struct inet_opt *inet = inet_sk(sk);
- if (inet->num == hnum && !sk->next &&
+ if (inet->num == hnum && !sk->sk_next &&
(!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
- (sk->family == PF_INET || !ipv6_only_sock(sk)) &&
- !sk->bound_dev_if)
+ (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
+ !sk->sk_bound_dev_if)
goto sherry_cache;
sk = __tcp_v4_lookup_listener(sk, daddr, hnum, dif);
}
@@ -502,13 +504,13 @@ static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
int hash = tcp_hashfn(daddr, hnum, saddr, sport);
head = &tcp_ehash[hash];
read_lock(&head->lock);
- for (sk = head->chain; sk; sk = sk->next) {
+ for (sk = head->chain; sk; sk = sk->sk_next) {
if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
goto hit; /* You sunk my battleship! */
}
/* Must check for a TIME_WAIT'er before going to listener hash. */
- for (sk = (head + tcp_ehash_size)->chain; sk; sk = sk->next)
+ for (sk = (head + tcp_ehash_size)->chain; sk; sk = sk->sk_next)
if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
goto hit;
out:
@@ -555,7 +557,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
struct inet_opt *inet = inet_sk(sk);
u32 daddr = inet->rcv_saddr;
u32 saddr = inet->daddr;
- int dif = sk->bound_dev_if;
+ int dif = sk->sk_bound_dev_if;
TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
__u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
int hash = tcp_hashfn(daddr, lport, saddr, inet->dport);
@@ -567,7 +569,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
/* Check TIME-WAIT sockets first. */
for (skp = &(head + tcp_ehash_size)->chain; (sk2 = *skp) != NULL;
- skp = &sk2->next) {
+ skp = &sk2->sk_next) {
tw = (struct tcp_tw_bucket *)sk2;
if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
@@ -587,15 +589,15 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
fall back to VJ's scheme and use initial
timestamp retrieved from peer table.
*/
- if (tw->ts_recent_stamp &&
+ if (tw->tw_ts_recent_stamp &&
(!twp || (sysctl_tcp_tw_reuse &&
xtime.tv_sec -
- tw->ts_recent_stamp > 1))) {
+ tw->tw_ts_recent_stamp > 1))) {
if ((tp->write_seq =
- tw->snd_nxt + 65535 + 2) == 0)
+ tw->tw_snd_nxt + 65535 + 2) == 0)
tp->write_seq = 1;
- tp->ts_recent = tw->ts_recent;
- tp->ts_recent_stamp = tw->ts_recent_stamp;
+ tp->ts_recent = tw->tw_ts_recent;
+ tp->ts_recent_stamp = tw->tw_ts_recent_stamp;
sock_hold(sk2);
skp = &head->chain;
goto unique;
@@ -606,7 +608,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
tw = NULL;
/* And established part... */
- for (skp = &head->chain; (sk2 = *skp) != NULL; skp = &sk2->next) {
+ for (skp = &head->chain; (sk2 = *skp) != NULL; skp = &sk2->sk_next) {
if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif))
goto not_unique;
}
@@ -616,14 +618,14 @@ unique:
* in hash table socket with a funny identity. */
inet->num = lport;
inet->sport = htons(lport);
- BUG_TRAP(!sk->pprev);
- if ((sk->next = *skp) != NULL)
- (*skp)->pprev = &sk->next;
+ BUG_TRAP(!sk->sk_pprev);
+ if ((sk->sk_next = *skp) != NULL)
+ (*skp)->sk_pprev = &sk->sk_next;
*skp = sk;
- sk->pprev = skp;
- sk->hashent = hash;
- sock_prot_inc_use(sk->prot);
+ sk->sk_pprev = skp;
+ sk->sk_hashent = hash;
+ sock_prot_inc_use(sk->sk_prot);
write_unlock(&head->lock);
if (twp) {
@@ -727,7 +729,7 @@ ok:
spin_unlock(&tcp_portalloc_lock);
tcp_bind_hash(sk, tb, rover);
- if (!sk->pprev) {
+ if (!sk->sk_pprev) {
inet_sk(sk)->sport = htons(rover);
__tcp_v4_hash(sk, 0);
}
@@ -743,9 +745,9 @@ ok:
}
head = &tcp_bhash[tcp_bhashfn(snum)];
- tb = (struct tcp_bind_bucket *)sk->prev;
+ tb = (struct tcp_bind_bucket *)sk->sk_prev;
spin_lock_bh(&head->lock);
- if (tb->owners == sk && !sk->bind_next) {
+ if (tb->owners == sk && !sk->sk_bind_next) {
__tcp_v4_hash(sk, 0);
spin_unlock_bh(&head->lock);
return 0;
@@ -784,7 +786,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
}
tmp = ip_route_connect(&rt, nexthop, inet->saddr,
- RT_CONN_FLAGS(sk), sk->bound_dev_if,
+ RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
IPPROTO_TCP,
inet->sport, usin->sin_port, sk);
if (tmp < 0)
@@ -871,7 +873,7 @@ failure:
/* This unhashes the socket and releases the local port, if necessary. */
tcp_set_state(sk, TCP_CLOSE);
ip_rt_put(rt);
- sk->route_caps = 0;
+ sk->sk_route_caps = 0;
inet->dport = 0;
return err;
}
@@ -943,7 +945,7 @@ static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
* send out by Linux are always <576bytes so they should go through
* unfragmented).
*/
- if (sk->state == TCP_LISTEN)
+ if (sk->sk_state == TCP_LISTEN)
return;
/* We don't check in the destentry if pmtu discovery is forbidden
@@ -961,7 +963,7 @@ static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
* for the case, if this connection will not able to recover.
*/
if (mtu < dst_pmtu(dst) && ip_dont_fragment(sk, dst))
- sk->err_soft = EMSGSIZE;
+ sk->sk_err_soft = EMSGSIZE;
mtu = dst_pmtu(dst);
@@ -1017,7 +1019,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
ICMP_INC_STATS_BH(IcmpInErrors);
return;
}
- if (sk->state == TCP_TIME_WAIT) {
+ if (sk->sk_state == TCP_TIME_WAIT) {
tcp_tw_put((struct tcp_tw_bucket *)sk);
return;
}
@@ -1029,12 +1031,12 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
if (sock_owned_by_user(sk))
NET_INC_STATS_BH(LockDroppedIcmps);
- if (sk->state == TCP_CLOSE)
+ if (sk->sk_state == TCP_CLOSE)
goto out;
tp = tcp_sk(sk);
seq = ntohl(th->seq);
- if (sk->state != TCP_LISTEN &&
+ if (sk->sk_state != TCP_LISTEN &&
!between(seq, tp->snd_una, tp->snd_nxt)) {
NET_INC_STATS(OutOfWindowIcmps);
goto out;
@@ -1070,7 +1072,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
goto out;
}
- switch (sk->state) {
+ switch (sk->sk_state) {
struct open_request *req, **prev;
case TCP_LISTEN:
if (sock_owned_by_user(sk))
@@ -1106,13 +1108,13 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
*/
if (!sock_owned_by_user(sk)) {
TCP_INC_STATS_BH(TcpAttemptFails);
- sk->err = err;
+ sk->sk_err = err;
- sk->error_report(sk);
+ sk->sk_error_report(sk);
tcp_done(sk);
} else {
- sk->err_soft = err;
+ sk->sk_err_soft = err;
}
goto out;
}
@@ -1135,10 +1137,10 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
inet = inet_sk(sk);
if (!sock_owned_by_user(sk) && inet->recverr) {
- sk->err = err;
- sk->error_report(sk);
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
} else { /* Only an error on timeout */
- sk->err_soft = err;
+ sk->sk_err_soft = err;
}
out:
@@ -1269,8 +1271,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
{
struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
- tcp_v4_send_ack(skb, tw->snd_nxt, tw->rcv_nxt,
- tw->rcv_wnd >> tw->rcv_wscale, tw->ts_recent);
+ tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
+ tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
tcp_tw_put(tw);
}
@@ -1286,7 +1288,7 @@ static struct dst_entry* tcp_v4_route_req(struct sock *sk,
{
struct rtable *rt;
struct ip_options *opt = req->af.v4_req.opt;
- struct flowi fl = { .oif = sk->bound_dev_if,
+ struct flowi fl = { .oif = sk->sk_bound_dev_if,
.nl_u = { .ip4_u =
{ .daddr = ((opt && opt->srr) ?
opt->faddr :
@@ -1586,7 +1588,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
if (!newsk)
goto exit;
- newsk->dst_cache = dst;
+ newsk->sk_dst_cache = dst;
tcp_v4_setup_caps(newsk, dst);
newtp = tcp_sk(newsk);
@@ -1641,7 +1643,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
tcp_v4_iif(skb));
if (nsk) {
- if (nsk->state != TCP_TIME_WAIT) {
+ if (nsk->sk_state != TCP_TIME_WAIT) {
bh_lock_sock(nsk);
return nsk;
}
@@ -1693,7 +1695,7 @@ static int tcp_v4_checksum_init(struct sk_buff *skb)
*/
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{
- if (sk->state == TCP_ESTABLISHED) { /* Fast path */
+ if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
TCP_CHECK_TIMER(sk);
if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
goto reset;
@@ -1704,7 +1706,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
goto csum_err;
- if (sk->state == TCP_LISTEN) {
+ if (sk->sk_state == TCP_LISTEN) {
struct sock *nsk = tcp_v4_hnd_req(sk, skb);
if (!nsk)
goto discard;
@@ -1789,7 +1791,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
goto no_tcp_socket;
process:
- if (sk->state == TCP_TIME_WAIT)
+ if (sk->sk_state == TCP_TIME_WAIT)
goto do_time_wait;
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
@@ -1870,8 +1872,8 @@ do_time_wait:
*/
static void __tcp_v4_rehash(struct sock *sk)
{
- sk->prot->unhash(sk);
- sk->prot->hash(sk);
+ sk->sk_prot->unhash(sk);
+ sk->sk_prot->hash(sk);
}
static int tcp_v4_reselect_saddr(struct sock *sk)
@@ -1888,8 +1890,8 @@ static int tcp_v4_reselect_saddr(struct sock *sk)
/* Query new route. */
err = ip_route_connect(&rt, daddr, 0,
- RT_TOS(inet->tos) | sk->localroute,
- sk->bound_dev_if,
+ RT_TOS(inet->tos) | sk->sk_localroute,
+ sk->sk_bound_dev_if,
IPPROTO_TCP,
inet->sport, inet->dport, sk);
if (err)
@@ -1941,7 +1943,7 @@ int tcp_v4_rebuild_header(struct sock *sk)
daddr = inet->opt->faddr;
{
- struct flowi fl = { .oif = sk->bound_dev_if,
+ struct flowi fl = { .oif = sk->sk_bound_dev_if,
.nl_u = { .ip4_u =
{ .daddr = daddr,
.saddr = inet->saddr,
@@ -1960,13 +1962,13 @@ int tcp_v4_rebuild_header(struct sock *sk)
}
/* Routing failed... */
- sk->route_caps = 0;
+ sk->sk_route_caps = 0;
if (!sysctl_ip_dynaddr ||
- sk->state != TCP_SYN_SENT ||
- (sk->userlocks & SOCK_BINDADDR_LOCK) ||
+ sk->sk_state != TCP_SYN_SENT ||
+ (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
(err = tcp_v4_reselect_saddr(sk)) != 0)
- sk->err_soft=-err;
+ sk->sk_err_soft = -err;
return err;
}
@@ -2023,14 +2025,14 @@ int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
{
struct inet_peer *peer = NULL;
- peer = inet_getpeer(tw->daddr, 1);
+ peer = inet_getpeer(tw->tw_daddr, 1);
if (peer) {
- if ((s32)(peer->tcp_ts - tw->ts_recent) <= 0 ||
+ if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 ||
(peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
- peer->tcp_ts_stamp <= tw->ts_recent_stamp)) {
- peer->tcp_ts_stamp = tw->ts_recent_stamp;
- peer->tcp_ts = tw->ts_recent;
+ peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) {
+ peer->tcp_ts_stamp = tw->tw_ts_recent_stamp;
+ peer->tcp_ts = tw->tw_ts_recent;
}
inet_putpeer(peer);
return 1;
@@ -2083,15 +2085,15 @@ static int tcp_v4_init_sock(struct sock *sk)
tp->reordering = sysctl_tcp_reordering;
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
- sk->write_space = tcp_write_space;
- sk->use_write_queue = 1;
+ sk->sk_write_space = tcp_write_space;
+ sk->sk_use_write_queue = 1;
tp->af_specific = &ipv4_specific;
- sk->sndbuf = sysctl_tcp_wmem[1];
- sk->rcvbuf = sysctl_tcp_rmem[1];
+ sk->sk_sndbuf = sysctl_tcp_wmem[1];
+ sk->sk_rcvbuf = sysctl_tcp_rmem[1];
atomic_inc(&tcp_sockets_allocated);
@@ -2114,7 +2116,7 @@ static int tcp_v4_destroy_sock(struct sock *sk)
__skb_queue_purge(&tp->ucopy.prequeue);
/* Clean up a referenced TCP bind bucket. */
- if (sk->prev)
+ if (sk->sk_prev)
tcp_put_port(sk);
/* If sendmsg cached page exists, toss it. */
@@ -2142,7 +2144,7 @@ static void *listening_get_first(struct seq_file *seq)
if (!sk)
continue;
++st->num;
- if (sk->family == st->family) {
+ if (sk->sk_family == st->family) {
rc = sk;
goto out;
}
@@ -2195,14 +2197,14 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
get_req:
req = tp->listen_opt->syn_table[st->sbucket];
}
- sk = st->syn_wait_sk->next;
+ sk = st->syn_wait_sk->sk_next;
st->state = TCP_SEQ_STATE_LISTENING;
read_unlock_bh(&tp->syn_wait_lock);
} else
- sk = sk->next;
+ sk = sk->sk_next;
get_sk:
while (sk) {
- if (sk->family == st->family) {
+ if (sk->sk_family == st->family) {
cur = sk;
goto out;
}
@@ -2216,7 +2218,7 @@ get_sk:
goto get_req;
}
read_unlock_bh(&tp->syn_wait_lock);
- sk = sk->next;
+ sk = sk->sk_next;
}
if (++st->bucket < TCP_LHTABLE_SIZE) {
sk = tcp_listening_hash[st->bucket];
@@ -2248,8 +2250,8 @@ static void *established_get_first(struct seq_file *seq)
read_lock(&tcp_ehash[st->bucket].lock);
for (sk = tcp_ehash[st->bucket].chain; sk;
- sk = sk->next, ++st->num) {
- if (sk->family != st->family)
+ sk = sk->sk_next, ++st->num) {
+ if (sk->sk_family != st->family)
continue;
rc = sk;
goto out;
@@ -2257,8 +2259,8 @@ static void *established_get_first(struct seq_file *seq)
st->state = TCP_SEQ_STATE_TIME_WAIT;
for (tw = (struct tcp_tw_bucket *)
tcp_ehash[st->bucket + tcp_ehash_size].chain;
- tw; tw = (struct tcp_tw_bucket *)tw->next, ++st->num) {
- if (tw->family != st->family)
+ tw; tw = (struct tcp_tw_bucket *)tw->tw_next, ++st->num) {
+ if (tw->tw_family != st->family)
continue;
rc = tw;
goto out;
@@ -2278,11 +2280,11 @@ static void *established_get_next(struct seq_file *seq, void *cur)
if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
tw = cur;
- tw = (struct tcp_tw_bucket *)tw->next;
+ tw = (struct tcp_tw_bucket *)tw->tw_next;
get_tw:
- while (tw && tw->family != st->family) {
+ while (tw && tw->tw_family != st->family) {
++st->num;
- tw = (struct tcp_tw_bucket *)tw->next;
+ tw = (struct tcp_tw_bucket *)tw->tw_next;
}
if (tw) {
cur = tw;
@@ -2298,11 +2300,11 @@ get_tw:
goto out;
}
} else
- sk = sk->next;
+ sk = sk->sk_next;
- while (sk && sk->family != st->family) {
+ while (sk && sk->sk_family != st->family) {
++st->num;
- sk = sk->next;
+ sk = sk->sk_next;
}
if (!sk) {
st->state = TCP_SEQ_STATE_TIME_WAIT;
@@ -2482,7 +2484,7 @@ static void get_openreq4(struct sock *sk, struct open_request *req,
uid,
0, /* non standard timer */
0, /* open_requests have no inode */
- atomic_read(&sk->refcnt),
+ atomic_read(&sk->sk_refcnt),
req);
}
@@ -2503,9 +2505,9 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
} else if (tp->pending == TCP_TIME_PROBE0) {
timer_active = 4;
timer_expires = tp->timeout;
- } else if (timer_pending(&sp->timer)) {
+ } else if (timer_pending(&sp->sk_timer)) {
timer_active = 2;
- timer_expires = sp->timer.expires;
+ timer_expires = sp->sk_timer.expires;
} else {
timer_active = 0;
timer_expires = jiffies;
@@ -2513,14 +2515,14 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
"%08X %5d %8d %lu %d %p %u %u %u %u %d",
- i, src, srcp, dest, destp, sp->state,
+ i, src, srcp, dest, destp, sp->sk_state,
tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
timer_active, timer_expires - jiffies,
tp->retransmits,
sock_i_uid(sp),
tp->probes_out,
sock_i_ino(sp),
- atomic_read(&sp->refcnt), sp,
+ atomic_read(&sp->sk_refcnt), sp,
tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong,
tp->snd_cwnd,
tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
@@ -2530,21 +2532,21 @@ static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i)
{
unsigned int dest, src;
__u16 destp, srcp;
- int ttd = tw->ttd - jiffies;
+ int ttd = tw->tw_ttd - jiffies;
if (ttd < 0)
ttd = 0;
- dest = tw->daddr;
- src = tw->rcv_saddr;
- destp = ntohs(tw->dport);
- srcp = ntohs(tw->sport);
+ dest = tw->tw_daddr;
+ src = tw->tw_rcv_saddr;
+ destp = ntohs(tw->tw_dport);
+ srcp = ntohs(tw->tw_sport);
sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08X %08X %5d %8d %d %d %p",
- i, src, srcp, dest, destp, tw->substate, 0, 0,
+ i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
3, ttd, 0, 0, 0, 0,
- atomic_read(&tw->refcnt), tw);
+ atomic_read(&tw->tw_refcnt), tw);
}
#define TMPSZ 150
@@ -2627,12 +2629,12 @@ void __init tcp_v4_init(struct net_proto_family *ops)
int err = sock_create(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
if (err < 0)
panic("Failed to create the TCP control socket.\n");
- tcp_socket->sk->allocation = GFP_ATOMIC;
+ tcp_socket->sk->sk_allocation = GFP_ATOMIC;
inet_sk(tcp_socket->sk)->uc_ttl = -1;
/* Unhash it so that IP input processing does not even
* see it, we do not wish this socket to see incoming
* packets.
*/
- tcp_socket->sk->prot->unhash(tcp_socket->sk);
+ tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index ae762b5c5d7a..e0f6ea8075c1 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -61,32 +61,33 @@ static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
struct tcp_bind_bucket *tb;
/* Unlink from established hashes. */
- ehead = &tcp_ehash[tw->hashent];
+ ehead = &tcp_ehash[tw->tw_hashent];
write_lock(&ehead->lock);
- if (!tw->pprev) {
+ if (!tw->tw_pprev) {
write_unlock(&ehead->lock);
return;
}
- if(tw->next)
- tw->next->pprev = tw->pprev;
- *(tw->pprev) = tw->next;
- tw->pprev = NULL;
+ if (tw->tw_next)
+ tw->tw_next->sk_pprev = tw->tw_pprev;
+ *(tw->tw_pprev) = tw->tw_next;
+ tw->tw_pprev = NULL;
write_unlock(&ehead->lock);
/* Disassociate with bind bucket. */
- bhead = &tcp_bhash[tcp_bhashfn(tw->num)];
+ bhead = &tcp_bhash[tcp_bhashfn(tw->tw_num)];
spin_lock(&bhead->lock);
- tb = tw->tb;
- if(tw->bind_next)
- tw->bind_next->bind_pprev = tw->bind_pprev;
- *(tw->bind_pprev) = tw->bind_next;
- tw->tb = NULL;
+ tb = tw->tw_tb;
+ if (tw->tw_bind_next)
+ tw->tw_bind_next->sk_bind_pprev = tw->tw_bind_pprev;
+ *(tw->tw_bind_pprev) = tw->tw_bind_next;
+ tw->tw_tb = NULL;
tcp_bucket_destroy(tb);
spin_unlock(&bhead->lock);
#ifdef INET_REFCNT_DEBUG
- if (atomic_read(&tw->refcnt) != 1) {
- printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw, atomic_read(&tw->refcnt));
+ if (atomic_read(&tw->tw_refcnt) != 1) {
+ printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw,
+ atomic_read(&tw->tw_refcnt));
}
#endif
tcp_tw_put(tw);
@@ -128,33 +129,34 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
int paws_reject = 0;
tp.saw_tstamp = 0;
- if (th->doff > (sizeof(struct tcphdr)>>2) && tw->ts_recent_stamp) {
+ if (th->doff > (sizeof(struct tcphdr) >> 2) && tw->tw_ts_recent_stamp) {
tcp_parse_options(skb, &tp, 0);
if (tp.saw_tstamp) {
- tp.ts_recent = tw->ts_recent;
- tp.ts_recent_stamp = tw->ts_recent_stamp;
+ tp.ts_recent = tw->tw_ts_recent;
+ tp.ts_recent_stamp = tw->tw_ts_recent_stamp;
paws_reject = tcp_paws_check(&tp, th->rst);
}
}
- if (tw->substate == TCP_FIN_WAIT2) {
+ if (tw->tw_substate == TCP_FIN_WAIT2) {
/* Just repeat all the checks of tcp_rcv_state_process() */
/* Out of window, send ACK */
if (paws_reject ||
!tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
- tw->rcv_nxt, tw->rcv_nxt + tw->rcv_wnd))
+ tw->tw_rcv_nxt,
+ tw->tw_rcv_nxt + tw->tw_rcv_wnd))
return TCP_TW_ACK;
if (th->rst)
goto kill;
- if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->rcv_nxt))
+ if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt))
goto kill_with_rst;
/* Dup ACK? */
- if (!after(TCP_SKB_CB(skb)->end_seq, tw->rcv_nxt) ||
+ if (!after(TCP_SKB_CB(skb)->end_seq, tw->tw_rcv_nxt) ||
TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
tcp_tw_put(tw);
return TCP_TW_SUCCESS;
@@ -163,7 +165,8 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
/* New data or FIN. If new data arrive after half-duplex close,
* reset.
*/
- if (!th->fin || TCP_SKB_CB(skb)->end_seq != tw->rcv_nxt+1) {
+ if (!th->fin ||
+ TCP_SKB_CB(skb)->end_seq != tw->tw_rcv_nxt + 1) {
kill_with_rst:
tcp_tw_deschedule(tw);
tcp_tw_put(tw);
@@ -171,11 +174,11 @@ kill_with_rst:
}
/* FIN arrived, enter true time-wait state. */
- tw->substate = TCP_TIME_WAIT;
- tw->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+ tw->tw_substate = TCP_TIME_WAIT;
+ tw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
if (tp.saw_tstamp) {
- tw->ts_recent_stamp = xtime.tv_sec;
- tw->ts_recent = tp.rcv_tsval;
+ tw->tw_ts_recent_stamp = xtime.tv_sec;
+ tw->tw_ts_recent = tp.rcv_tsval;
}
/* I am shamed, but failed to make it more elegant.
@@ -183,10 +186,10 @@ kill_with_rst:
* to generalize to IPv6. Taking into account that IPv6
* do not undertsnad recycling in any case, it not
* a big problem in practice. --ANK */
- if (tw->family == AF_INET &&
- sysctl_tcp_tw_recycle && tw->ts_recent_stamp &&
+ if (tw->tw_family == AF_INET &&
+ sysctl_tcp_tw_recycle && tw->tw_ts_recent_stamp &&
tcp_v4_tw_remember_stamp(tw))
- tcp_tw_schedule(tw, tw->timeout);
+ tcp_tw_schedule(tw, tw->tw_timeout);
else
tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
return TCP_TW_ACK;
@@ -210,7 +213,7 @@ kill_with_rst:
*/
if (!paws_reject &&
- (TCP_SKB_CB(skb)->seq == tw->rcv_nxt &&
+ (TCP_SKB_CB(skb)->seq == tw->tw_rcv_nxt &&
(TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
/* In window segment, it may be only reset or bare ack. */
@@ -229,8 +232,8 @@ kill:
tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
if (tp.saw_tstamp) {
- tw->ts_recent = tp.rcv_tsval;
- tw->ts_recent_stamp = xtime.tv_sec;
+ tw->tw_ts_recent = tp.rcv_tsval;
+ tw->tw_ts_recent_stamp = xtime.tv_sec;
}
tcp_tw_put(tw);
@@ -255,9 +258,9 @@ kill:
*/
if (th->syn && !th->rst && !th->ack && !paws_reject &&
- (after(TCP_SKB_CB(skb)->seq, tw->rcv_nxt) ||
- (tp.saw_tstamp && (s32)(tw->ts_recent - tp.rcv_tsval) < 0))) {
- u32 isn = tw->snd_nxt+65535+2;
+ (after(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt) ||
+ (tp.saw_tstamp && (s32)(tw->tw_ts_recent - tp.rcv_tsval) < 0))) {
+ u32 isn = tw->tw_snd_nxt + 65535 + 2;
if (isn == 0)
isn++;
TCP_SKB_CB(skb)->when = isn;
@@ -293,7 +296,7 @@ kill:
*/
static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
{
- struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->hashent];
+ struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent];
struct tcp_bind_hashbucket *bhead;
struct sock **head, *sktw;
@@ -303,33 +306,33 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
*/
bhead = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)];
spin_lock(&bhead->lock);
- tw->tb = (struct tcp_bind_bucket *)sk->prev;
- BUG_TRAP(sk->prev!=NULL);
- if ((tw->bind_next = tw->tb->owners) != NULL)
- tw->tb->owners->bind_pprev = &tw->bind_next;
- tw->tb->owners = (struct sock*)tw;
- tw->bind_pprev = &tw->tb->owners;
+ tw->tw_tb = (struct tcp_bind_bucket *)sk->sk_prev;
+ BUG_TRAP(sk->sk_prev);
+ if ((tw->tw_bind_next = tw->tw_tb->owners) != NULL)
+ tw->tw_tb->owners->sk_bind_pprev = &tw->tw_bind_next;
+ tw->tw_tb->owners = (struct sock *)tw;
+ tw->tw_bind_pprev = &tw->tw_tb->owners;
spin_unlock(&bhead->lock);
write_lock(&ehead->lock);
/* Step 2: Remove SK from established hash. */
- if (sk->pprev) {
- if(sk->next)
- sk->next->pprev = sk->pprev;
- *sk->pprev = sk->next;
- sk->pprev = NULL;
- sock_prot_dec_use(sk->prot);
+ if (sk->sk_pprev) {
+ if (sk->sk_next)
+ sk->sk_next->sk_pprev = sk->sk_pprev;
+ *sk->sk_pprev = sk->sk_next;
+ sk->sk_pprev = NULL;
+ sock_prot_dec_use(sk->sk_prot);
}
/* Step 3: Hash TW into TIMEWAIT half of established hash table. */
head = &(ehead + tcp_ehash_size)->chain;
sktw = (struct sock *)tw;
- if((sktw->next = *head) != NULL)
- (*head)->pprev = &sktw->next;
+ if ((sktw->sk_next = *head) != NULL)
+ (*head)->sk_pprev = &sktw->sk_next;
*head = sktw;
- sktw->pprev = head;
- atomic_inc(&tw->refcnt);
+ sktw->sk_pprev = head;
+ atomic_inc(&tw->tw_refcnt);
write_unlock(&ehead->lock);
}
@@ -354,33 +357,33 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
int rto = (tp->rto<<2) - (tp->rto>>1);
/* Give us an identity. */
- tw->daddr = inet->daddr;
- tw->rcv_saddr = inet->rcv_saddr;
- tw->bound_dev_if= sk->bound_dev_if;
- tw->num = inet->num;
- tw->state = TCP_TIME_WAIT;
- tw->substate = state;
- tw->sport = inet->sport;
- tw->dport = inet->dport;
- tw->family = sk->family;
- tw->reuse = sk->reuse;
- tw->rcv_wscale = tp->rcv_wscale;
- atomic_set(&tw->refcnt, 1);
-
- tw->hashent = sk->hashent;
- tw->rcv_nxt = tp->rcv_nxt;
- tw->snd_nxt = tp->snd_nxt;
- tw->rcv_wnd = tcp_receive_window(tp);
- tw->ts_recent = tp->ts_recent;
- tw->ts_recent_stamp= tp->ts_recent_stamp;
- tw->pprev_death = NULL;
+ tw->tw_daddr = inet->daddr;
+ tw->tw_rcv_saddr = inet->rcv_saddr;
+ tw->tw_bound_dev_if = sk->sk_bound_dev_if;
+ tw->tw_num = inet->num;
+ tw->tw_state = TCP_TIME_WAIT;
+ tw->tw_substate = state;
+ tw->tw_sport = inet->sport;
+ tw->tw_dport = inet->dport;
+ tw->tw_family = sk->sk_family;
+ tw->tw_reuse = sk->sk_reuse;
+ tw->tw_rcv_wscale = tp->rcv_wscale;
+ atomic_set(&tw->tw_refcnt, 1);
+
+ tw->tw_hashent = sk->sk_hashent;
+ tw->tw_rcv_nxt = tp->rcv_nxt;
+ tw->tw_snd_nxt = tp->snd_nxt;
+ tw->tw_rcv_wnd = tcp_receive_window(tp);
+ tw->tw_ts_recent = tp->ts_recent;
+ tw->tw_ts_recent_stamp = tp->ts_recent_stamp;
+ tw->tw_pprev_death = NULL;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- if(tw->family == PF_INET6) {
+ if (tw->tw_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
- ipv6_addr_copy(&tw->v6_daddr, &np->daddr);
- ipv6_addr_copy(&tw->v6_rcv_saddr, &np->rcv_saddr);
+ ipv6_addr_copy(&tw->tw_v6_daddr, &np->daddr);
+ ipv6_addr_copy(&tw->tw_v6_rcv_saddr, &np->rcv_saddr);
}
#endif
/* Linkage updates. */
@@ -391,9 +394,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
timeo = rto;
if (recycle_ok) {
- tw->timeout = rto;
+ tw->tw_timeout = rto;
} else {
- tw->timeout = TCP_TIMEWAIT_LEN;
+ tw->tw_timeout = TCP_TIMEWAIT_LEN;
if (state == TCP_TIME_WAIT)
timeo = TCP_TIMEWAIT_LEN;
}
@@ -443,10 +446,10 @@ static void tcp_twkill(unsigned long dummy)
goto out;
while((tw = tcp_tw_death_row[tcp_tw_death_row_slot]) != NULL) {
- tcp_tw_death_row[tcp_tw_death_row_slot] = tw->next_death;
- if (tw->next_death)
- tw->next_death->pprev_death = tw->pprev_death;
- tw->pprev_death = NULL;
+ tcp_tw_death_row[tcp_tw_death_row_slot] = tw->tw_next_death;
+ if (tw->tw_next_death)
+ tw->tw_next_death->tw_pprev_death = tw->tw_pprev_death;
+ tw->tw_pprev_death = NULL;
spin_unlock(&tw_death_lock);
tcp_timewait_kill(tw);
@@ -474,11 +477,11 @@ out:
void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
{
spin_lock(&tw_death_lock);
- if (tw->pprev_death) {
- if(tw->next_death)
- tw->next_death->pprev_death = tw->pprev_death;
- *tw->pprev_death = tw->next_death;
- tw->pprev_death = NULL;
+ if (tw->tw_pprev_death) {
+ if (tw->tw_next_death)
+ tw->tw_next_death->tw_pprev_death = tw->tw_pprev_death;
+ *tw->tw_pprev_death = tw->tw_next_death;
+ tw->tw_pprev_death = NULL;
tcp_tw_put(tw);
if (--tcp_tw_count == 0)
del_timer(&tcp_tw_timer);
@@ -530,14 +533,14 @@ void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
spin_lock(&tw_death_lock);
/* Unlink it, if it was scheduled */
- if (tw->pprev_death) {
- if(tw->next_death)
- tw->next_death->pprev_death = tw->pprev_death;
- *tw->pprev_death = tw->next_death;
- tw->pprev_death = NULL;
+ if (tw->tw_pprev_death) {
+ if (tw->tw_next_death)
+ tw->tw_next_death->tw_pprev_death = tw->tw_pprev_death;
+ *tw->tw_pprev_death = tw->tw_next_death;
+ tw->tw_pprev_death = NULL;
tcp_tw_count--;
} else
- atomic_inc(&tw->refcnt);
+ atomic_inc(&tw->tw_refcnt);
if (slot >= TCP_TW_RECYCLE_SLOTS) {
/* Schedule to slow timer */
@@ -548,11 +551,11 @@ void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
if (slot >= TCP_TWKILL_SLOTS)
slot = TCP_TWKILL_SLOTS-1;
}
- tw->ttd = jiffies + timeo;
+ tw->tw_ttd = jiffies + timeo;
slot = (tcp_tw_death_row_slot + slot) & (TCP_TWKILL_SLOTS - 1);
tpp = &tcp_tw_death_row[slot];
} else {
- tw->ttd = jiffies + (slot<<TCP_TW_RECYCLE_TICK);
+ tw->tw_ttd = jiffies + (slot << TCP_TW_RECYCLE_TICK);
if (tcp_twcal_hand < 0) {
tcp_twcal_hand = 0;
@@ -567,10 +570,10 @@ void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
tpp = &tcp_twcal_row[slot];
}
- if((tw->next_death = *tpp) != NULL)
- (*tpp)->pprev_death = &tw->next_death;
+ if ((tw->tw_next_death = *tpp) != NULL)
+ (*tpp)->tw_pprev_death = &tw->tw_next_death;
*tpp = tw;
- tw->pprev_death = tpp;
+ tw->tw_pprev_death = tpp;
if (tcp_tw_count++ == 0)
mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
@@ -597,8 +600,8 @@ void tcp_twcal_tick(unsigned long dummy)
struct tcp_tw_bucket *tw;
while((tw = tcp_twcal_row[slot]) != NULL) {
- tcp_twcal_row[slot] = tw->next_death;
- tw->pprev_death = NULL;
+ tcp_twcal_row[slot] = tw->tw_next_death;
+ tw->tw_pprev_death = NULL;
tcp_timewait_kill(tw);
tcp_tw_put(tw);
@@ -639,18 +642,18 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
/* allocate the newsk from the same slab of the master sock,
* if not, at sk_free time we'll try to free it from the wrong
* slabcache (i.e. is it TCPv4 or v6?) -acme */
- struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, 0, sk->slab);
+ struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, 0, sk->sk_slab);
if(newsk != NULL) {
struct tcp_opt *newtp;
struct sk_filter *filter;
memcpy(newsk, sk, sizeof(struct tcp_sock));
- newsk->state = TCP_SYN_RECV;
+ newsk->sk_state = TCP_SYN_RECV;
/* SANITY */
- newsk->pprev = NULL;
- newsk->prev = NULL;
+ newsk->sk_pprev = NULL;
+ newsk->sk_prev = NULL;
/* Clone the TCP header template */
inet_sk(newsk)->dport = req->rmt_port;
@@ -658,29 +661,29 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
sock_lock_init(newsk);
bh_lock_sock(newsk);
- newsk->dst_lock = RW_LOCK_UNLOCKED;
- atomic_set(&newsk->rmem_alloc, 0);
- skb_queue_head_init(&newsk->receive_queue);
- atomic_set(&newsk->wmem_alloc, 0);
- skb_queue_head_init(&newsk->write_queue);
- atomic_set(&newsk->omem_alloc, 0);
- newsk->wmem_queued = 0;
- newsk->forward_alloc = 0;
+ newsk->sk_dst_lock = RW_LOCK_UNLOCKED;
+ atomic_set(&newsk->sk_rmem_alloc, 0);
+ skb_queue_head_init(&newsk->sk_receive_queue);
+ atomic_set(&newsk->sk_wmem_alloc, 0);
+ skb_queue_head_init(&newsk->sk_write_queue);
+ atomic_set(&newsk->sk_omem_alloc, 0);
+ newsk->sk_wmem_queued = 0;
+ newsk->sk_forward_alloc = 0;
sock_reset_flag(newsk, SOCK_DONE);
- newsk->userlocks = sk->userlocks & ~SOCK_BINDPORT_LOCK;
- newsk->backlog.head = newsk->backlog.tail = NULL;
- newsk->callback_lock = RW_LOCK_UNLOCKED;
- skb_queue_head_init(&newsk->error_queue);
- newsk->write_space = tcp_write_space;
+ newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
+ newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
+ newsk->sk_callback_lock = RW_LOCK_UNLOCKED;
+ skb_queue_head_init(&newsk->sk_error_queue);
+ newsk->sk_write_space = tcp_write_space;
- if ((filter = newsk->filter) != NULL)
+ if ((filter = newsk->sk_filter) != NULL)
sk_filter_charge(newsk, filter);
if (unlikely(xfrm_sk_clone_policy(newsk))) {
/* It is still raw copy of parent, so invalidate
* destructor and make plain sk_free() */
- newsk->destruct = NULL;
+ newsk->sk_destruct = NULL;
sk_free(newsk);
return NULL;
}
@@ -744,9 +747,9 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
memset(&newtp->syn_wait_lock, 0, sizeof(newtp->syn_wait_lock));
/* Back to base struct sock members. */
- newsk->err = 0;
- newsk->priority = 0;
- atomic_set(&newsk->refcnt, 2);
+ newsk->sk_err = 0;
+ newsk->sk_priority = 0;
+ atomic_set(&newsk->sk_refcnt, 2);
#ifdef INET_REFCNT_DEBUG
atomic_inc(&inet_sock_nr);
#endif
@@ -755,9 +758,9 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
if (sock_flag(newsk, SOCK_KEEPOPEN))
tcp_reset_keepalive_timer(newsk,
keepalive_time_when(newtp));
- newsk->socket = NULL;
- newsk->sleep = NULL;
- newsk->owner = NULL;
+ newsk->sk_socket = NULL;
+ newsk->sk_sleep = NULL;
+ newsk->sk_owner = NULL;
newtp->tstamp_ok = req->tstamp_ok;
if((newtp->sack_ok = req->sack_ok) != 0) {
@@ -791,7 +794,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
newtp->mss_clamp = req->mss;
TCP_ECN_openreq_child(newtp, req);
if (newtp->ecn_flags&TCP_ECN_OK)
- newsk->no_largesend = 1;
+ newsk->sk_no_largesend = 1;
TCP_INC_STATS_BH(TcpPassiveOpens);
}
@@ -967,7 +970,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
if (child == NULL)
goto listen_overflow;
- sk_set_owner(child, sk->owner);
+ sk_set_owner(child, sk->sk_owner);
tcp_synq_unlink(tp, req, prev);
tcp_synq_removed(sk, req);
@@ -999,14 +1002,14 @@ int tcp_child_process(struct sock *parent, struct sock *child,
struct sk_buff *skb)
{
int ret = 0;
- int state = child->state;
+ int state = child->sk_state;
if (!sock_owned_by_user(child)) {
ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
/* Wakeup parent, send SIGIO */
- if (state == TCP_SYN_RECV && child->state != state)
- parent->data_ready(parent, 0);
+ if (state == TCP_SYN_RECV && child->sk_state != state)
+ parent->sk_data_ready(parent, 0);
} else {
/* Alas, it is possible again, because we do lookup
* in main socket hash table and lock on listening
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e1482b56deb9..f70553f3e2c5 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -48,7 +48,7 @@ static __inline__
void update_send_head(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb)
{
tp->send_head = skb->next;
- if (tp->send_head == (struct sk_buff *) &sk->write_queue)
+ if (tp->send_head == (struct sk_buff *)&sk->sk_write_queue)
tp->send_head = NULL;
tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
if (tp->packets_out++ == 0)
@@ -309,13 +309,13 @@ void tcp_send_skb(struct sock *sk, struct sk_buff *skb, int force_queue, unsigne
/* Advance write_seq and place onto the write_queue. */
tp->write_seq = TCP_SKB_CB(skb)->end_seq;
- __skb_queue_tail(&sk->write_queue, skb);
+ __skb_queue_tail(&sk->sk_write_queue, skb);
tcp_charge_skb(sk, skb);
if (!force_queue && tp->send_head == NULL && tcp_snd_test(tp, skb, cur_mss, tp->nonagle)) {
/* Send it out now. */
TCP_SKB_CB(skb)->when = tcp_time_stamp;
- if (tcp_transmit_skb(sk, skb_clone(skb, sk->allocation)) == 0) {
+ if (!tcp_transmit_skb(sk, skb_clone(skb, sk->sk_allocation))) {
tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
tcp_minshall_update(tp, cur_mss, skb);
if (tp->packets_out++ == 0)
@@ -339,7 +339,7 @@ void tcp_push_one(struct sock *sk, unsigned cur_mss)
if (tcp_snd_test(tp, skb, cur_mss, 1)) {
/* Send it out now. */
TCP_SKB_CB(skb)->when = tcp_time_stamp;
- if (tcp_transmit_skb(sk, skb_clone(skb, sk->allocation)) == 0) {
+ if (!tcp_transmit_skb(sk, skb_clone(skb, sk->sk_allocation))) {
tp->send_head = NULL;
tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
if (tp->packets_out++ == 0)
@@ -587,7 +587,7 @@ int tcp_sync_mss(struct sock *sk, u32 pmtu)
tp->pmtu_cookie = pmtu;
tp->mss_cache = tp->mss_cache_std = mss_now;
- if (sk->route_caps&NETIF_F_TSO) {
+ if (sk->sk_route_caps & NETIF_F_TSO) {
int large_mss;
large_mss = 65535 - tp->af_specific->net_header_len -
@@ -620,7 +620,7 @@ int tcp_write_xmit(struct sock *sk, int nonagle)
* In time closedown will finish, we empty the write queue and all
* will be happy.
*/
- if(sk->state != TCP_CLOSE) {
+ if (sk->sk_state != TCP_CLOSE) {
struct sk_buff *skb;
int sent_pkts = 0;
@@ -886,16 +886,17 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
/* Do not sent more than we queued. 1/4 is reserved for possible
* copying overhead: frgagmentation, tunneling, mangling etc.
*/
- if (atomic_read(&sk->wmem_alloc) > min(sk->wmem_queued+(sk->wmem_queued>>2),sk->sndbuf))
+ if (atomic_read(&sk->sk_wmem_alloc) >
+ min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
return -EAGAIN;
if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
BUG();
- if (sk->route_caps&NETIF_F_TSO) {
- sk->route_caps &= ~NETIF_F_TSO;
- sk->no_largesend = 1;
+ if (sk->sk_route_caps & NETIF_F_TSO) {
+ sk->sk_route_caps &= ~NETIF_F_TSO;
+ sk->sk_no_largesend = 1;
tp->mss_cache = tp->mss_cache_std;
}
@@ -924,7 +925,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
(skb->len < (cur_mss >> 1)) &&
(skb->next != tp->send_head) &&
- (skb->next != (struct sk_buff *)&sk->write_queue) &&
+ (skb->next != (struct sk_buff *)&sk->sk_write_queue) &&
(skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(skb->next)->nr_frags == 0) &&
(sysctl_tcp_retrans_collapse != 0))
tcp_retrans_try_collapse(sk, skb, cur_mss);
@@ -1013,7 +1014,8 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
else
NET_INC_STATS_BH(TCPSlowStartRetrans);
- if (skb == skb_peek(&sk->write_queue))
+ if (skb ==
+ skb_peek(&sk->sk_write_queue))
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
}
@@ -1059,7 +1061,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
if(tcp_retransmit_skb(sk, skb))
break;
- if (skb == skb_peek(&sk->write_queue))
+ if (skb == skb_peek(&sk->sk_write_queue))
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
NET_INC_STATS_BH(TCPForwardRetrans);
@@ -1073,7 +1075,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
void tcp_send_fin(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
- struct sk_buff *skb = skb_peek_tail(&sk->write_queue);
+ struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue);
unsigned int mss_now;
/* Optimization, tack on the FIN if we have a queue of
@@ -1149,7 +1151,7 @@ int tcp_send_synack(struct sock *sk)
{
struct sk_buff* skb;
- skb = skb_peek(&sk->write_queue);
+ skb = skb_peek(&sk->sk_write_queue);
if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) {
printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
return -EFAULT;
@@ -1159,8 +1161,8 @@ int tcp_send_synack(struct sock *sk)
struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
if (nskb == NULL)
return -ENOMEM;
- __skb_unlink(skb, &sk->write_queue);
- __skb_queue_head(&sk->write_queue, nskb);
+ __skb_unlink(skb, &sk->sk_write_queue);
+ __skb_queue_head(&sk->sk_write_queue, nskb);
tcp_free_skb(sk, skb);
tcp_charge_skb(sk, nskb);
skb = nskb;
@@ -1275,7 +1277,7 @@ static inline void tcp_connect_init(struct sock *sk)
tp->rcv_ssthresh = tp->rcv_wnd;
- sk->err = 0;
+ sk->sk_err = 0;
sock_reset_flag(sk, SOCK_DONE);
tp->snd_wnd = 0;
tcp_init_wl(tp, tp->write_seq, 0);
@@ -1300,7 +1302,7 @@ int tcp_connect(struct sock *sk)
tcp_connect_init(sk);
- buff = alloc_skb(MAX_TCP_HEADER + 15, sk->allocation);
+ buff = alloc_skb(MAX_TCP_HEADER + 15, sk->sk_allocation);
if (unlikely(buff == NULL))
return -ENOBUFS;
@@ -1319,7 +1321,7 @@ int tcp_connect(struct sock *sk)
/* Send it off. */
TCP_SKB_CB(buff)->when = tcp_time_stamp;
tp->retrans_stamp = TCP_SKB_CB(buff)->when;
- __skb_queue_tail(&sk->write_queue, buff);
+ __skb_queue_tail(&sk->sk_write_queue, buff);
tcp_charge_skb(sk, buff);
tp->packets_out++;
tcp_transmit_skb(sk, skb_clone(buff, GFP_KERNEL));
@@ -1388,7 +1390,7 @@ void tcp_send_delayed_ack(struct sock *sk)
void tcp_send_ack(struct sock *sk)
{
/* If we have been reset, we may not send again. */
- if(sk->state != TCP_CLOSE) {
+ if (sk->sk_state != TCP_CLOSE) {
struct tcp_opt *tp = tcp_sk(sk);
struct sk_buff *buff;
@@ -1456,7 +1458,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
int tcp_write_wakeup(struct sock *sk)
{
- if (sk->state != TCP_CLOSE) {
+ if (sk->sk_state != TCP_CLOSE) {
struct tcp_opt *tp = tcp_sk(sk);
struct sk_buff *skb;
@@ -1481,9 +1483,9 @@ int tcp_write_wakeup(struct sock *sk)
return -1;
/* SWS override triggered forced fragmentation.
* Disable TSO, the connection is too sick. */
- if (sk->route_caps&NETIF_F_TSO) {
- sk->no_largesend = 1;
- sk->route_caps &= ~NETIF_F_TSO;
+ if (sk->sk_route_caps & NETIF_F_TSO) {
+ sk->sk_no_largesend = 1;
+ sk->sk_route_caps &= ~NETIF_F_TSO;
tp->mss_cache = tp->mss_cache_std;
}
}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 18f5c2e2bb4a..0ed9f305232f 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -57,9 +57,9 @@ void tcp_init_xmit_timers(struct sock *sk)
tp->delack_timer.data = (unsigned long) sk;
tp->ack.pending = 0;
- init_timer(&sk->timer);
- sk->timer.function=&tcp_keepalive_timer;
- sk->timer.data = (unsigned long) sk;
+ init_timer(&sk->sk_timer);
+ sk->sk_timer.function = &tcp_keepalive_timer;
+ sk->sk_timer.data = (unsigned long)sk;
}
void tcp_clear_xmit_timers(struct sock *sk)
@@ -77,14 +77,14 @@ void tcp_clear_xmit_timers(struct sock *sk)
del_timer(&tp->delack_timer))
__sock_put(sk);
- if(timer_pending(&sk->timer) && del_timer(&sk->timer))
+ if (timer_pending(&sk->sk_timer) && del_timer(&sk->sk_timer))
__sock_put(sk);
}
static void tcp_write_err(struct sock *sk)
{
- sk->err = sk->err_soft ? : ETIMEDOUT;
- sk->error_report(sk);
+ sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
+ sk->sk_error_report(sk);
tcp_done(sk);
NET_INC_STATS_BH(TCPAbortOnTimeout);
@@ -112,11 +112,11 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset)
orphans <<= 1;
/* If some dubious ICMP arrived, penalize even more. */
- if (sk->err_soft)
+ if (sk->sk_err_soft)
orphans <<= 1;
if (orphans >= sysctl_tcp_max_orphans ||
- (sk->wmem_queued > SOCK_MIN_SNDBUF &&
+ (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
if (net_ratelimit())
printk(KERN_INFO "Out of socket memory\n");
@@ -142,7 +142,7 @@ static int tcp_orphan_retries(struct sock *sk, int alive)
int retries = sysctl_tcp_orphan_retries; /* May be zero. */
/* We know from an ICMP that something is wrong. */
- if (sk->err_soft && !alive)
+ if (sk->sk_err_soft && !alive)
retries = 0;
/* However, if socket sent something recently, select some safe
@@ -159,9 +159,9 @@ static int tcp_write_timeout(struct sock *sk)
struct tcp_opt *tp = tcp_sk(sk);
int retry_until;
- if ((1<<sk->state)&(TCPF_SYN_SENT|TCPF_SYN_RECV)) {
+ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
if (tp->retransmits)
- dst_negative_advice(&sk->dst_cache);
+ dst_negative_advice(&sk->sk_dst_cache);
retry_until = tp->syn_retries ? : sysctl_tcp_syn_retries;
} else {
if (tp->retransmits >= sysctl_tcp_retries1) {
@@ -185,7 +185,7 @@ static int tcp_write_timeout(struct sock *sk)
Golden words :-).
*/
- dst_negative_advice(&sk->dst_cache);
+ dst_negative_advice(&sk->sk_dst_cache);
}
retry_until = sysctl_tcp_retries2;
@@ -224,7 +224,7 @@ static void tcp_delack_timer(unsigned long data)
tcp_mem_reclaim(sk);
- if (sk->state == TCP_CLOSE || !(tp->ack.pending&TCP_ACK_TIMER))
+ if (sk->sk_state == TCP_CLOSE || !(tp->ack.pending & TCP_ACK_TIMER))
goto out;
if ((long)(tp->ack.timeout - jiffies) > 0) {
@@ -241,7 +241,7 @@ static void tcp_delack_timer(unsigned long data)
skb_queue_len(&tp->ucopy.prequeue));
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
- sk->backlog_rcv(sk, skb);
+ sk->sk_backlog_rcv(sk, skb);
tp->ucopy.memory = 0;
}
@@ -325,10 +325,10 @@ static void tcp_retransmit_timer(struct sock *sk)
if (tp->packets_out == 0)
goto out;
- BUG_TRAP(!skb_queue_empty(&sk->write_queue));
+ BUG_TRAP(!skb_queue_empty(&sk->sk_write_queue));
if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
- !((1<<sk->state)&(TCPF_SYN_SENT|TCPF_SYN_RECV))) {
+ !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
/* Receiver dastardly shrinks window. Our retransmits
* become zero probes, but we should not timeout this
* connection. If the socket is an orphan, time it out,
@@ -347,7 +347,7 @@ static void tcp_retransmit_timer(struct sock *sk)
goto out;
}
tcp_enter_loss(sk, 0);
- tcp_retransmit_skb(sk, skb_peek(&sk->write_queue));
+ tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue));
__sk_dst_reset(sk);
goto out_reset_timer;
}
@@ -381,7 +381,7 @@ static void tcp_retransmit_timer(struct sock *sk)
tcp_enter_loss(sk, 0);
}
- if (tcp_retransmit_skb(sk, skb_peek(&sk->write_queue)) > 0) {
+ if (tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue)) > 0) {
/* Retransmission failed because of local congestion,
* do not backoff.
*/
@@ -433,7 +433,7 @@ static void tcp_write_timer(unsigned long data)
goto out_unlock;
}
- if (sk->state == TCP_CLOSE || !tp->pending)
+ if (sk->sk_state == TCP_CLOSE || !tp->pending)
goto out;
if ((long)(tp->timeout - jiffies) > 0) {
@@ -556,19 +556,19 @@ static void tcp_synack_timer(struct sock *sk)
void tcp_delete_keepalive_timer (struct sock *sk)
{
- if (timer_pending(&sk->timer) && del_timer (&sk->timer))
+ if (timer_pending(&sk->sk_timer) && del_timer (&sk->sk_timer))
__sock_put(sk);
}
void tcp_reset_keepalive_timer (struct sock *sk, unsigned long len)
{
- if (!mod_timer(&sk->timer, jiffies+len))
+ if (!mod_timer(&sk->sk_timer, jiffies + len))
sock_hold(sk);
}
void tcp_set_keepalive(struct sock *sk, int val)
{
- if ((1<<sk->state)&(TCPF_CLOSE|TCPF_LISTEN))
+ if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
return;
if (val && !sock_flag(sk, SOCK_KEEPOPEN))
@@ -592,12 +592,12 @@ static void tcp_keepalive_timer (unsigned long data)
goto out;
}
- if (sk->state == TCP_LISTEN) {
+ if (sk->sk_state == TCP_LISTEN) {
tcp_synack_timer(sk);
goto out;
}
- if (sk->state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
+ if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
if (tp->linger2 >= 0) {
int tmo = tcp_fin_time(tp) - TCP_TIMEWAIT_LEN;
@@ -610,7 +610,7 @@ static void tcp_keepalive_timer (unsigned long data)
goto death;
}
- if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->state == TCP_CLOSE)
+ if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
goto out;
elapsed = keepalive_time_when(tp);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 762de6d007fd..46f8b32c74c2 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -148,7 +148,7 @@ static int udp_v4_get_port(struct sock *sk, unsigned short snum)
do {
if (++size >= best_size_so_far)
goto next;
- } while ((sk2 = sk2->next) != NULL);
+ } while ((sk2 = sk2->sk_next) != NULL);
best_size_so_far = size;
best = result;
next:;
@@ -171,28 +171,28 @@ gotit:
for (sk2 = udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
sk2 != NULL;
- sk2 = sk2->next) {
+ sk2 = sk2->sk_next) {
struct inet_opt *inet2 = inet_sk(sk2);
if (inet2->num == snum &&
sk2 != sk &&
!ipv6_only_sock(sk2) &&
- sk2->bound_dev_if == sk->bound_dev_if &&
+ sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
(!inet2->rcv_saddr ||
!inet->rcv_saddr ||
inet2->rcv_saddr == inet->rcv_saddr) &&
- (!sk2->reuse || !sk->reuse))
+ (!sk2->sk_reuse || !sk->sk_reuse))
goto fail;
}
}
inet->num = snum;
- if (sk->pprev == NULL) {
+ if (!sk->sk_pprev) {
struct sock **skp = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
- if ((sk->next = *skp) != NULL)
- (*skp)->pprev = &sk->next;
+ if ((sk->sk_next = *skp) != NULL)
+ (*skp)->sk_pprev = &sk->sk_next;
*skp = sk;
- sk->pprev = skp;
- sock_prot_inc_use(sk->prot);
+ sk->sk_pprev = skp;
+ sock_prot_inc_use(sk->sk_prot);
sock_hold(sk);
}
write_unlock_bh(&udp_hash_lock);
@@ -211,13 +211,13 @@ static void udp_v4_hash(struct sock *sk)
static void udp_v4_unhash(struct sock *sk)
{
write_lock_bh(&udp_hash_lock);
- if (sk->pprev) {
- if (sk->next)
- sk->next->pprev = sk->pprev;
- *sk->pprev = sk->next;
- sk->pprev = NULL;
+ if (sk->sk_pprev) {
+ if (sk->sk_next)
+ sk->sk_next->sk_pprev = sk->sk_pprev;
+ *sk->sk_pprev = sk->sk_next;
+ sk->sk_pprev = NULL;
inet_sk(sk)->num = 0;
- sock_prot_dec_use(sk->prot);
+ sock_prot_dec_use(sk->sk_prot);
__sock_put(sk);
}
write_unlock_bh(&udp_hash_lock);
@@ -232,11 +232,12 @@ struct sock *udp_v4_lookup_longway(u32 saddr, u16 sport, u32 daddr, u16 dport, i
unsigned short hnum = ntohs(dport);
int badness = -1;
- for(sk = udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]; sk != NULL; sk = sk->next) {
+ for (sk = udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]; sk;
+ sk = sk->sk_next) {
struct inet_opt *inet = inet_sk(sk);
if (inet->num == hnum && !ipv6_only_sock(sk)) {
- int score = (sk->family == PF_INET ? 1 : 0);
+ int score = (sk->sk_family == PF_INET ? 1 : 0);
if (inet->rcv_saddr) {
if (inet->rcv_saddr != daddr)
continue;
@@ -252,8 +253,8 @@ struct sock *udp_v4_lookup_longway(u32 saddr, u16 sport, u32 daddr, u16 dport, i
continue;
score+=2;
}
- if(sk->bound_dev_if) {
- if(sk->bound_dev_if != dif)
+ if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if != dif)
continue;
score+=2;
}
@@ -288,7 +289,8 @@ static inline struct sock *udp_v4_mcast_next(struct sock *sk,
{
struct sock *s = sk;
unsigned short hnum = ntohs(loc_port);
- for(; s; s = s->next) {
+
+ for (; s; s = s->sk_next) {
struct inet_opt *inet = inet_sk(s);
if (inet->num != hnum ||
@@ -296,7 +298,7 @@ static inline struct sock *udp_v4_mcast_next(struct sock *sk,
(inet->dport != rmt_port && inet->dport) ||
(inet->rcv_saddr && inet->rcv_saddr != loc_addr) ||
ipv6_only_sock(s) ||
- (s->bound_dev_if && s->bound_dev_if != dif))
+ (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
continue;
if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif))
continue;
@@ -370,13 +372,13 @@ void udp_err(struct sk_buff *skb, u32 info)
* 4.1.3.3.
*/
if (!inet->recverr) {
- if (!harderr || sk->state != TCP_ESTABLISHED)
+ if (!harderr || sk->sk_state != TCP_ESTABLISHED)
goto out;
} else {
ip_icmp_error(sk, skb, err, uh->dest, info, (u8*)(uh+1));
}
- sk->err = err;
- sk->error_report(sk);
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
out:
sock_put(sk);
}
@@ -404,7 +406,7 @@ static int udp_push_pending_frames(struct sock *sk, struct udp_opt *up)
int err = 0;
/* Grab the skbuff where UDP header space exists. */
- if ((skb = skb_peek(&sk->write_queue)) == NULL)
+ if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
goto out;
/*
@@ -416,12 +418,12 @@ static int udp_push_pending_frames(struct sock *sk, struct udp_opt *up)
uh->len = htons(up->len);
uh->check = 0;
- if (sk->no_check == UDP_CSUM_NOXMIT) {
+ if (sk->sk_no_check == UDP_CSUM_NOXMIT) {
skb->ip_summed = CHECKSUM_NONE;
goto send;
}
- if (skb_queue_len(&sk->write_queue) == 1) {
+ if (skb_queue_len(&sk->sk_write_queue) == 1) {
/*
* Only one fragment on the socket.
*/
@@ -454,7 +456,7 @@ static int udp_push_pending_frames(struct sock *sk, struct udp_opt *up)
sizeof(struct udphdr), skb->csum);
}
- skb_queue_walk(&sk->write_queue, skb) {
+ skb_queue_walk(&sk->sk_write_queue, skb) {
csum = csum_add(csum, skb->csum);
}
uh->check = csum_tcpudp_magic(up->saddr, up->daddr,
@@ -544,7 +546,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (dport == 0)
return -EINVAL;
} else {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
daddr = inet->daddr;
dport = inet->dport;
@@ -555,7 +557,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
}
ipc.addr = inet->saddr;
- ipc.oif = sk->bound_dev_if;
+ ipc.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
err = ip_cmsg_send(msg, &ipc);
if (err)
@@ -577,7 +579,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
connected = 0;
}
tos = RT_TOS(inet->tos);
- if (sk->localroute || (msg->msg_flags&MSG_DONTROUTE) ||
+ if (sk->sk_localroute || (msg->msg_flags & MSG_DONTROUTE) ||
(ipc.opt && ipc.opt->is_strictroute)) {
tos |= RTO_ONLINK;
connected = 0;
@@ -701,7 +703,8 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, in
ret = ip_append_page(sk, page, offset, size, flags);
if (ret == -EOPNOTSUPP) {
release_sock(sk);
- return sock_no_sendpage(sk->socket, page, offset, size, flags);
+ return sock_no_sendpage(sk->sk_socket, page, offset,
+ size, flags);
}
if (ret < 0) {
udp_flush_pending_frames(sk);
@@ -728,7 +731,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
case SIOCOUTQ:
{
- int amount = atomic_read(&sk->wmem_alloc);
+ int amount = atomic_read(&sk->sk_wmem_alloc);
return put_user(amount, (int *)arg);
}
@@ -738,8 +741,8 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
unsigned long amount;
amount = 0;
- spin_lock_irq(&sk->receive_queue.lock);
- skb = skb_peek(&sk->receive_queue);
+ spin_lock_irq(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL) {
/*
* We will only return the amount
@@ -748,7 +751,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
*/
amount = skb->len - sizeof(struct udphdr);
}
- spin_unlock_irq(&sk->receive_queue.lock);
+ spin_unlock_irq(&sk->sk_receive_queue.lock);
return put_user(amount, (int *)arg);
}
@@ -844,12 +847,12 @@ csum_copy_err:
/* Clear queue. */
if (flags&MSG_PEEK) {
int clear = 0;
- spin_lock_irq(&sk->receive_queue.lock);
- if (skb == skb_peek(&sk->receive_queue)) {
- __skb_unlink(skb, &sk->receive_queue);
+ spin_lock_irq(&sk->sk_receive_queue.lock);
+ if (skb == skb_peek(&sk->sk_receive_queue)) {
+ __skb_unlink(skb, &sk->sk_receive_queue);
clear = 1;
}
- spin_unlock_irq(&sk->receive_queue.lock);
+ spin_unlock_irq(&sk->sk_receive_queue.lock);
if (clear)
kfree_skb(skb);
}
@@ -877,7 +880,7 @@ int udp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk_dst_reset(sk);
- oif = sk->bound_dev_if;
+ oif = sk->sk_bound_dev_if;
saddr = inet->saddr;
if (MULTICAST(usin->sin_addr.s_addr)) {
if (!oif)
@@ -901,7 +904,7 @@ int udp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
inet->rcv_saddr = rt->rt_src;
inet->daddr = rt->rt_dst;
inet->dport = usin->sin_port;
- sk->state = TCP_ESTABLISHED;
+ sk->sk_state = TCP_ESTABLISHED;
inet->id = jiffies;
sk_dst_set(sk, &rt->u.dst);
@@ -915,15 +918,15 @@ int udp_disconnect(struct sock *sk, int flags)
* 1003.1g - break association.
*/
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
inet->daddr = 0;
inet->dport = 0;
- sk->bound_dev_if = 0;
- if (!(sk->userlocks & SOCK_BINDADDR_LOCK))
+ sk->sk_bound_dev_if = 0;
+ if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
inet_reset_saddr(sk);
- if (!(sk->userlocks&SOCK_BINDPORT_LOCK)) {
- sk->prot->unhash(sk);
+ if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
+ sk->sk_prot->unhash(sk);
inet->sport = 0;
}
sk_dst_reset(sk);
@@ -1054,7 +1057,7 @@ static int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
/* FALLTHROUGH -- it's a UDP Packet */
}
- if (sk->filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
+ if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
if (__udp_checksum_complete(skb)) {
UDP_INC_STATS_BH(UdpInErrors);
kfree_skb(skb);
@@ -1094,7 +1097,7 @@ static int udp_v4_mcast_deliver(struct sk_buff *skb, struct udphdr *uh,
do {
struct sk_buff *skb1 = skb;
- sknext = udp_v4_mcast_next(sk->next, uh->dest, daddr,
+ sknext = udp_v4_mcast_next(sk->sk_next, uh->dest, daddr,
uh->source, saddr, dif);
if(sknext)
skb1 = skb_clone(skb, GFP_ATOMIC);
@@ -1356,8 +1359,9 @@ static __inline__ struct sock *udp_get_bucket(struct seq_file *seq, loff_t *pos)
struct udp_iter_state *state = seq->private;
for (; state->bucket < UDP_HTABLE_SIZE; ++state->bucket)
- for (i = 0, sk = udp_hash[state->bucket]; sk; ++i, sk = sk->next) {
- if (sk->family != state->family)
+ for (i = 0, sk = udp_hash[state->bucket]; sk;
+ ++i, sk = sk->sk_next) {
+ if (sk->sk_family != state->family)
continue;
if (l--)
continue;
@@ -1387,12 +1391,11 @@ static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
state = seq->private;
sk = v;
- sk = sk->next;
+ sk = sk->sk_next;
- for (; sk; sk = sk->next) {
- if (sk->family == state->family)
+ for (; sk; sk = sk->sk_next)
+ if (sk->sk_family == state->family)
goto out;
- }
if (++state->bucket >= UDP_HTABLE_SIZE)
goto out;
@@ -1480,10 +1483,11 @@ static void udp4_format_sock(struct sock *sp, char *tmpbuf, int bucket)
sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
- bucket, src, srcp, dest, destp, sp->state,
- atomic_read(&sp->wmem_alloc), atomic_read(&sp->rmem_alloc),
+ bucket, src, srcp, dest, destp, sp->sk_state,
+ atomic_read(&sp->sk_wmem_alloc),
+ atomic_read(&sp->sk_rmem_alloc),
0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
- atomic_read(&sp->refcnt), sp);
+ atomic_read(&sp->sk_refcnt), sp);
}
static int udp4_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 7605b74c4baa..b6b668c2ab38 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -144,9 +144,9 @@ static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
{
struct ipv6_pinfo *rc = (&((struct tcp6_sock *)sk)->inet6);
- if (sk->protocol == IPPROTO_UDP)
+ if (sk->sk_protocol == IPPROTO_UDP)
rc = (&((struct udp6_sock *)sk)->inet6);
- else if (sk->protocol == IPPROTO_RAW)
+ else if (sk->sk_protocol == IPPROTO_RAW)
rc = (&((struct raw6_sock *)sk)->inet6);
return rc;
}
@@ -198,10 +198,10 @@ static int inet6_create(struct socket *sock, int protocol)
sock_init_data(sock, sk);
sk_set_owner(sk, THIS_MODULE);
- sk->prot = answer->prot;
- sk->no_check = answer->no_check;
+ sk->sk_prot = answer->prot;
+ sk->sk_no_check = answer->no_check;
if (INET_PROTOSW_REUSE & answer->flags)
- sk->reuse = 1;
+ sk->sk_reuse = 1;
rcu_read_unlock();
inet = inet_sk(sk);
@@ -212,12 +212,12 @@ static int inet6_create(struct socket *sock, int protocol)
inet->hdrincl = 1;
}
- sk->destruct = inet6_sock_destruct;
- sk->zapped = 0;
- sk->family = PF_INET6;
- sk->protocol = protocol;
+ sk->sk_destruct = inet6_sock_destruct;
+ sk->sk_zapped = 0;
+ sk->sk_family = PF_INET6;
+ sk->sk_protocol = protocol;
- sk->backlog_rcv = answer->prot->backlog_rcv;
+ sk->sk_backlog_rcv = answer->prot->backlog_rcv;
tcp6sk = (struct tcp6_sock *)sk;
tcp6sk->pinet6 = np = inet6_sk_generic(sk);
@@ -253,10 +253,10 @@ static int inet6_create(struct socket *sock, int protocol)
* creation time automatically shares.
*/
inet->sport = ntohs(inet->num);
- sk->prot->hash(sk);
+ sk->sk_prot->hash(sk);
}
- if (sk->prot->init) {
- int err = sk->prot->init(sk);
+ if (sk->sk_prot->init) {
+ int err = sk->sk_prot->init(sk);
if (err != 0) {
inet_sock_release(sk);
return err;
@@ -293,8 +293,8 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
int addr_type = 0;
/* If the socket has its own bind function then use it. */
- if(sk->prot->bind)
- return sk->prot->bind(sk, uaddr, addr_len);
+ if (sk->sk_prot->bind)
+ return sk->sk_prot->bind(sk, uaddr, addr_len);
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
@@ -327,7 +327,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
lock_sock(sk);
/* Check these errors (active socket, double bind). */
- if (sk->state != TCP_CLOSE || inet->num) {
+ if (sk->sk_state != TCP_CLOSE || inet->num) {
release_sock(sk);
return -EINVAL;
}
@@ -338,11 +338,11 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
/* Override any existing binding, if another one
* is supplied by user.
*/
- sk->bound_dev_if = addr->sin6_scope_id;
+ sk->sk_bound_dev_if = addr->sin6_scope_id;
}
/* Binding to link-local address requires an interface */
- if (sk->bound_dev_if == 0) {
+ if (!sk->sk_bound_dev_if) {
release_sock(sk);
return -EINVAL;
}
@@ -357,16 +357,16 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
/* Make sure we are allowed to bind here. */
- if (sk->prot->get_port(sk, snum) != 0) {
+ if (sk->sk_prot->get_port(sk, snum)) {
inet_reset_saddr(sk);
release_sock(sk);
return -EADDRINUSE;
}
if (addr_type != IPV6_ADDR_ANY)
- sk->userlocks |= SOCK_BINDADDR_LOCK;
+ sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
if (snum)
- sk->userlocks |= SOCK_BINDPORT_LOCK;
+ sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
inet->sport = ntohs(inet->num);
inet->dport = 0;
inet->daddr = 0;
@@ -437,7 +437,8 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
if (peer) {
if (!inet->dport)
return -ENOTCONN;
- if (((1<<sk->state)&(TCPF_CLOSE|TCPF_SYN_SENT)) && peer == 1)
+ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
+ peer == 1)
return -ENOTCONN;
sin->sin6_port = inet->dport;
ipv6_addr_copy(&sin->sin6_addr, &np->daddr);
@@ -452,7 +453,7 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
sin->sin6_port = inet->sport;
}
if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
- sin->sin6_scope_id = sk->bound_dev_if;
+ sin->sin6_scope_id = sk->sk_bound_dev_if;
*uaddr_len = sizeof(*sin);
return(0);
}
@@ -465,9 +466,9 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
switch(cmd)
{
case SIOCGSTAMP:
- if(sk->stamp.tv_sec==0)
+ if (!sk->sk_stamp.tv_sec)
return -ENOENT;
- err = copy_to_user((void *)arg, &sk->stamp,
+ err = copy_to_user((void *)arg, &sk->sk_stamp,
sizeof(struct timeval));
if (err)
return -EFAULT;
@@ -485,7 +486,8 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
case SIOCSIFDSTADDR:
return addrconf_set_dstaddr((void *) arg);
default:
- if(sk->prot->ioctl==0 || (err=sk->prot->ioctl(sk, cmd, arg))==-ENOIOCTLCMD)
+ if (!sk->sk_prot->ioctl ||
+ (err = sk->sk_prot->ioctl(sk, cmd, arg)) == -ENOIOCTLCMD)
return(dev_ioctl(cmd,(void *) arg));
return err;
}
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 0bd25c1710cc..6b2f90f02fbe 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -117,7 +117,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
int copied;
err = -EAGAIN;
- skb = skb_dequeue(&sk->error_queue);
+ skb = skb_dequeue(&sk->sk_error_queue);
if (skb == NULL)
goto out;
@@ -190,14 +190,14 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
err = copied;
/* Reset and regenerate socket error */
- spin_lock_irq(&sk->error_queue.lock);
- sk->err = 0;
- if ((skb2 = skb_peek(&sk->error_queue)) != NULL) {
- sk->err = SKB_EXT_ERR(skb2)->ee.ee_errno;
- spin_unlock_irq(&sk->error_queue.lock);
- sk->error_report(sk);
+ spin_lock_irq(&sk->sk_error_queue.lock);
+ sk->sk_err = 0;
+ if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
+ sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
+ spin_unlock_irq(&sk->sk_error_queue.lock);
+ sk->sk_error_report(sk);
} else {
- spin_unlock_irq(&sk->error_queue.lock);
+ spin_unlock_irq(&sk->sk_error_queue.lock);
}
out_free_skb:
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index d192b0d08356..887c2ea7b2dd 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -96,13 +96,13 @@ struct icmpv6_msg {
static __inline__ void icmpv6_xmit_lock(void)
{
local_bh_disable();
- if (unlikely(!spin_trylock(&icmpv6_socket->sk->lock.slock)))
+ if (unlikely(!spin_trylock(&icmpv6_socket->sk->sk_lock.slock)))
BUG();
}
static __inline__ void icmpv6_xmit_unlock(void)
{
- spin_unlock_bh(&icmpv6_socket->sk->lock.slock);
+ spin_unlock_bh(&icmpv6_socket->sk->sk_lock.slock);
}
/*
@@ -213,14 +213,14 @@ int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct icmp6hd
struct icmp6hdr *icmp6h;
int err = 0;
- if ((skb = skb_peek(&sk->write_queue)) == NULL)
+ if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
goto out;
icmp6h = (struct icmp6hdr*) skb->h.raw;
memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
icmp6h->icmp6_cksum = 0;
- if (skb_queue_len(&sk->write_queue) == 1) {
+ if (skb_queue_len(&sk->sk_write_queue) == 1) {
skb->csum = csum_partial((char *)icmp6h,
sizeof(struct icmp6hdr), skb->csum);
icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
@@ -230,7 +230,7 @@ int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct icmp6hd
} else {
u32 tmp_csum = 0;
- skb_queue_walk(&sk->write_queue, skb) {
+ skb_queue_walk(&sk->sk_write_queue, skb) {
tmp_csum = csum_add(tmp_csum, skb->csum);
}
@@ -519,7 +519,7 @@ static void icmpv6_notify(struct sk_buff *skb, int type, int code, u32 info)
if ((sk = raw_v6_htable[hash]) != NULL) {
while((sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr))) {
rawv6_err(sk, skb, NULL, type, code, inner_offset, info);
- sk = sk->next;
+ sk = sk->sk_next;
}
}
read_unlock(&raw_v6_lock);
@@ -687,9 +687,9 @@ int __init icmpv6_init(struct net_proto_family *ops)
}
sk = __icmpv6_socket[i]->sk;
- sk->allocation = GFP_ATOMIC;
- sk->sndbuf = SK_WMEM_MAX*2;
- sk->prot->unhash(sk);
+ sk->sk_allocation = GFP_ATOMIC;
+ sk->sk_sndbuf = SK_WMEM_MAX * 2;
+ sk->sk_prot->unhash(sk);
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 519ac97d90e7..99c5c5360c48 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -153,7 +153,7 @@ int ip6_route_me_harder(struct sk_buff *skb)
struct ipv6hdr *iph = skb->nh.ipv6h;
struct dst_entry *dst;
struct flowi fl = {
- .oif = skb->sk ? skb->sk->bound_dev_if : 0,
+ .oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
.nl_u =
{ .ip6_u =
{ .daddr = iph->daddr,
@@ -457,7 +457,7 @@ static int ip6_frag_xmit(struct sock *sk, inet_getfrag_t getfrag,
struct frag_hdr *fhdr2;
- skb = skb_copy(last_skb, sk->allocation);
+ skb = skb_copy(last_skb, sk->sk_allocation);
if (skb == NULL) {
IP6_INC_STATS(Ip6FragFails);
@@ -1222,13 +1222,14 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offse
if (flags&MSG_PROBE)
return 0;
- if (skb_queue_empty(&sk->write_queue)) {
+ if (skb_queue_empty(&sk->sk_write_queue)) {
/*
* setup for corking
*/
if (opt) {
if (np->cork.opt == NULL)
- np->cork.opt = kmalloc(opt->tot_len, sk->allocation);
+ np->cork.opt = kmalloc(opt->tot_len,
+ sk->sk_allocation);
memcpy(np->cork.opt, opt, opt->tot_len);
inet->cork.flags |= IPCORK_OPT;
/* need source address above miyazawa*/
@@ -1268,7 +1269,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offse
inet->cork.length += length;
- if ((skb = skb_peek_tail(&sk->write_queue)) == NULL)
+ if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
goto alloc_new_skb;
while (length > 0) {
@@ -1295,10 +1296,11 @@ alloc_new_skb:
(flags & MSG_DONTWAIT), &err);
} else {
skb = NULL;
- if (atomic_read(&sk->wmem_alloc) <= 2*sk->sndbuf)
+ if (atomic_read(&sk->sk_wmem_alloc) <=
+ 2 * sk->sk_sndbuf)
skb = sock_wmalloc(sk,
alloclen + hh_len + 15, 1,
- sk->allocation);
+ sk->sk_allocation);
if (unlikely(skb == NULL))
err = -ENOBUFS;
}
@@ -1335,7 +1337,7 @@ alloc_new_skb:
/*
* Put the packet on the pending queue
*/
- __skb_queue_tail(&sk->write_queue, skb);
+ __skb_queue_tail(&sk->sk_write_queue, skb);
continue;
}
@@ -1374,7 +1376,7 @@ alloc_new_skb:
} else if(i < MAX_SKB_FRAGS) {
if (copy > PAGE_SIZE)
copy = PAGE_SIZE;
- page = alloc_pages(sk->allocation, 0);
+ page = alloc_pages(sk->sk_allocation, 0);
if (page == NULL) {
err = -ENOMEM;
goto error;
@@ -1385,7 +1387,7 @@ alloc_new_skb:
skb_fill_page_desc(skb, i, page, 0, 0);
frag = &skb_shinfo(skb)->frags[i];
skb->truesize += PAGE_SIZE;
- atomic_add(PAGE_SIZE, &sk->wmem_alloc);
+ atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
} else {
err = -EMSGSIZE;
goto error;
@@ -1423,14 +1425,14 @@ int ip6_push_pending_frames(struct sock *sk)
unsigned char proto = fl->proto;
int err = 0;
- if ((skb = __skb_dequeue(&sk->write_queue)) == NULL)
+ if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
goto out;
tail_skb = &(skb_shinfo(skb)->frag_list);
/* move skb->data to ip header from ext header */
if (skb->data < skb->nh.raw)
__skb_pull(skb, skb->nh.raw - skb->data);
- while ((tmp_skb = __skb_dequeue(&sk->write_queue)) != NULL) {
+ while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
__skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
*tail_skb = tmp_skb;
tail_skb = &(tmp_skb->next);
@@ -1496,7 +1498,7 @@ void ip6_flush_pending_frames(struct sock *sk)
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff *skb;
- while ((skb = __skb_dequeue_tail(&sk->write_queue)) != NULL)
+ while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
kfree_skb(skb);
inet->cork.flags &= ~IPCORK_OPT;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 681416c6045b..271cb34b316c 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -81,7 +81,7 @@ int ip6_ra_control(struct sock *sk, int sel, void (*destructor)(struct sock *))
struct ip6_ra_chain *ra, *new_ra, **rap;
/* RA packet may be delivered ONLY to IPPROTO_RAW socket */
- if (sk->type != SOCK_RAW || inet_sk(sk)->num != IPPROTO_RAW)
+ if (sk->sk_type != SOCK_RAW || inet_sk(sk)->num != IPPROTO_RAW)
return -EINVAL;
new_ra = (sel>=0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
@@ -134,7 +134,7 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, char *optval,
int val, valbool;
int retv = -ENOPROTOOPT;
- if(level==SOL_IP && sk->type != SOCK_RAW)
+ if (level == SOL_IP && sk->sk_type != SOCK_RAW)
return udp_prot.setsockopt(sk, level, optname, optval, optlen);
if(level!=SOL_IPV6)
@@ -156,11 +156,11 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, char *optval,
struct ipv6_txoptions *opt;
struct sk_buff *pktopt;
- if (sk->protocol != IPPROTO_UDP &&
- sk->protocol != IPPROTO_TCP)
+ if (sk->sk_protocol != IPPROTO_UDP &&
+ sk->sk_protocol != IPPROTO_TCP)
break;
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
retv = -ENOTCONN;
break;
}
@@ -174,26 +174,26 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, char *optval,
fl6_free_socklist(sk);
ipv6_sock_mc_close(sk);
- if (sk->protocol == IPPROTO_TCP) {
+ if (sk->sk_protocol == IPPROTO_TCP) {
struct tcp_opt *tp = tcp_sk(sk);
local_bh_disable();
- sock_prot_dec_use(sk->prot);
+ sock_prot_dec_use(sk->sk_prot);
sock_prot_inc_use(&tcp_prot);
local_bh_enable();
- sk->prot = &tcp_prot;
+ sk->sk_prot = &tcp_prot;
tp->af_specific = &ipv4_specific;
- sk->socket->ops = &inet_stream_ops;
- sk->family = PF_INET;
+ sk->sk_socket->ops = &inet_stream_ops;
+ sk->sk_family = PF_INET;
tcp_sync_mss(sk, tp->pmtu_cookie);
} else {
local_bh_disable();
- sock_prot_dec_use(sk->prot);
+ sock_prot_dec_use(sk->sk_prot);
sock_prot_inc_use(&udp_prot);
local_bh_enable();
- sk->prot = &udp_prot;
- sk->socket->ops = &inet_dgram_ops;
- sk->family = PF_INET;
+ sk->sk_prot = &udp_prot;
+ sk->sk_socket->ops = &inet_dgram_ops;
+ sk->sk_family = PF_INET;
}
opt = xchg(&np->opt, NULL);
if (opt)
@@ -202,7 +202,7 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, char *optval,
if (pktopt)
kfree_skb(pktopt);
- sk->destruct = inet_sock_destruct;
+ sk->sk_destruct = inet_sock_destruct;
#ifdef INET_REFCNT_DEBUG
atomic_dec(&inet6_sock_nr);
#endif
@@ -264,7 +264,7 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, char *optval,
int junk;
fl.fl6_flowlabel = 0;
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
if (optlen == 0)
goto update;
@@ -295,10 +295,11 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, char *optval,
goto done;
update:
retv = 0;
- if (sk->type == SOCK_STREAM) {
+ if (sk->sk_type == SOCK_STREAM) {
if (opt) {
struct tcp_opt *tp = tcp_sk(sk);
- if (!((1<<sk->state)&(TCPF_LISTEN|TCPF_CLOSE))
+ if (!((1 << sk->sk_state) &
+ (TCPF_LISTEN | TCPF_CLOSE))
&& inet_sk(sk)->daddr != LOOPBACK4_IPV6) {
tp->ext_header_len = opt->opt_flen + opt->opt_nflen;
tcp_sync_mss(sk, tp->pmtu_cookie);
@@ -307,9 +308,9 @@ update:
opt = xchg(&np->opt, opt);
sk_dst_reset(sk);
} else {
- write_lock(&sk->dst_lock);
+ write_lock(&sk->sk_dst_lock);
opt = xchg(&np->opt, opt);
- write_unlock(&sk->dst_lock);
+ write_unlock(&sk->sk_dst_lock);
sk_dst_reset(sk);
}
@@ -326,7 +327,7 @@ done:
break;
case IPV6_MULTICAST_HOPS:
- if (sk->type == SOCK_STREAM)
+ if (sk->sk_type == SOCK_STREAM)
goto e_inval;
if (val > 255 || val < -1)
goto e_inval;
@@ -340,9 +341,9 @@ done:
break;
case IPV6_MULTICAST_IF:
- if (sk->type == SOCK_STREAM)
+ if (sk->sk_type == SOCK_STREAM)
goto e_inval;
- if (sk->bound_dev_if && sk->bound_dev_if != val)
+ if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
goto e_inval;
if (__dev_get_by_index(val) == NULL) {
@@ -488,7 +489,7 @@ done:
case IPV6_RECVERR:
np->recverr = valbool;
if (!val)
- skb_queue_purge(&sk->error_queue);
+ skb_queue_purge(&sk->sk_error_queue);
retv = 0;
break;
case IPV6_FLOWINFO_SEND:
@@ -528,7 +529,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname, char *optval,
int len;
int val;
- if(level==SOL_IP && sk->type != SOCK_RAW)
+ if (level == SOL_IP && sk->sk_type != SOCK_RAW)
return udp_prot.getsockopt(sk, level, optname, optval, optlen);
if(level!=SOL_IPV6)
return -ENOPROTOOPT;
@@ -536,12 +537,12 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname, char *optval,
return -EFAULT;
switch (optname) {
case IPV6_ADDRFORM:
- if (sk->protocol != IPPROTO_UDP &&
- sk->protocol != IPPROTO_TCP)
+ if (sk->sk_protocol != IPPROTO_UDP &&
+ sk->sk_protocol != IPPROTO_TCP)
return -EINVAL;
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
- val = sk->family;
+ val = sk->sk_family;
break;
case MCAST_MSFILTER:
{
@@ -564,7 +565,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname, char *optval,
struct msghdr msg;
struct sk_buff *skb;
- if (sk->type != SOCK_STREAM)
+ if (sk->sk_type != SOCK_STREAM)
return -ENOPROTOOPT;
msg.msg_control = optval;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 23f3859b3d47..1eccc9aa0b1e 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2176,8 +2176,8 @@ int __init igmp6_init(struct net_proto_family *ops)
}
sk = igmp6_socket->sk;
- sk->allocation = GFP_ATOMIC;
- sk->prot->unhash(sk);
+ sk->sk_allocation = GFP_ATOMIC;
+ sk->sk_prot->unhash(sk);
np = inet6_sk(sk);
np->hop_limit = 1;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index e3144c93fdd7..c75510fa39a9 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1466,11 +1466,11 @@ int __init ndisc_init(struct net_proto_family *ops)
sk = ndisc_socket->sk;
np = inet6_sk(sk);
- sk->allocation = GFP_ATOMIC;
+ sk->sk_allocation = GFP_ATOMIC;
np->hop_limit = 255;
/* Do not loopback ndisc messages */
np->mc_loop = 0;
- sk->prot->unhash(sk);
+ sk->sk_prot->unhash(sk);
/*
* Initialize the neighbour table
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index ca38148a313b..a5833277c223 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -538,14 +538,14 @@ ipq_rcv_sk(struct sock *sk, int len)
if (down_trylock(&ipqnl_sem))
return;
- while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
ipq_rcv_skb(skb);
kfree_skb(skb);
}
up(&ipqnl_sem);
- } while (ipqnl && ipqnl->receive_queue.qlen);
+ } while (ipqnl && ipqnl->sk_receive_queue.qlen);
}
static int
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 649cb253da7e..c26b7f8c45dd 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -62,11 +62,11 @@ static void raw_v6_hash(struct sock *sk)
(RAWV6_HTABLE_SIZE - 1)];
write_lock_bh(&raw_v6_lock);
- if ((sk->next = *skp) != NULL)
- (*skp)->pprev = &sk->next;
+ if ((sk->sk_next = *skp) != NULL)
+ (*skp)->sk_pprev = &sk->sk_next;
*skp = sk;
- sk->pprev = skp;
- sock_prot_inc_use(sk->prot);
+ sk->sk_pprev = skp;
+ sock_prot_inc_use(sk->sk_prot);
sock_hold(sk);
write_unlock_bh(&raw_v6_lock);
}
@@ -74,12 +74,12 @@ static void raw_v6_hash(struct sock *sk)
static void raw_v6_unhash(struct sock *sk)
{
write_lock_bh(&raw_v6_lock);
- if (sk->pprev) {
- if (sk->next)
- sk->next->pprev = sk->pprev;
- *sk->pprev = sk->next;
- sk->pprev = NULL;
- sock_prot_dec_use(sk->prot);
+ if (sk->sk_pprev) {
+ if (sk->sk_next)
+ sk->sk_next->sk_pprev = sk->sk_pprev;
+ *sk->sk_pprev = sk->sk_next;
+ sk->sk_pprev = NULL;
+ sock_prot_dec_use(sk->sk_prot);
__sock_put(sk);
}
write_unlock_bh(&raw_v6_lock);
@@ -93,7 +93,7 @@ struct sock *__raw_v6_lookup(struct sock *sk, unsigned short num,
struct sock *s = sk;
int addr_type = ipv6_addr_type(loc_addr);
- for(s = sk; s; s = s->next) {
+ for (s = sk; s; s = s->sk_next) {
if (inet_sk(s)->num == num) {
struct ipv6_pinfo *np = inet6_sk(s);
@@ -176,7 +176,7 @@ void ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
if (clone)
rawv6_rcv(sk, clone);
}
- sk = __raw_v6_lookup(sk->next, nexthdr, daddr, saddr);
+ sk = __raw_v6_lookup(sk->sk_next, nexthdr, daddr, saddr);
}
out:
read_unlock(&raw_v6_lock);
@@ -203,7 +203,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
lock_sock(sk);
err = -EINVAL;
- if (sk->state != TCP_CLOSE)
+ if (sk->sk_state != TCP_CLOSE)
goto out;
if (addr_type & IPV6_ADDR_LINKLOCAL) {
@@ -212,11 +212,11 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
/* Override any existing binding, if another one
* is supplied by user.
*/
- sk->bound_dev_if = addr->sin6_scope_id;
+ sk->sk_bound_dev_if = addr->sin6_scope_id;
}
/* Binding to link-local address requires an interface */
- if (sk->bound_dev_if == 0)
+ if (!sk->sk_bound_dev_if)
goto out;
}
@@ -257,7 +257,7 @@ void rawv6_err(struct sock *sk, struct sk_buff *skb,
2. Socket is connected (otherwise the error indication
is useless without recverr and error is hard.
*/
- if (!np->recverr && sk->state != TCP_ESTABLISHED)
+ if (!np->recverr && sk->sk_state != TCP_ESTABLISHED)
return;
harderr = icmpv6_err_convert(type, code, &err);
@@ -272,14 +272,14 @@ void rawv6_err(struct sock *sk, struct sk_buff *skb,
}
if (np->recverr || harderr) {
- sk->err = err;
- sk->error_report(sk);
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
}
}
static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
{
- if (sk->filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
+ if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
/* FIXME: increment a raw6 drops counter here */
kfree_skb(skb);
@@ -422,12 +422,12 @@ csum_copy_err:
/* Clear queue. */
if (flags&MSG_PEEK) {
int clear = 0;
- spin_lock_irq(&sk->receive_queue.lock);
- if (skb == skb_peek(&sk->receive_queue)) {
- __skb_unlink(skb, &sk->receive_queue);
+ spin_lock_irq(&sk->sk_receive_queue.lock);
+ if (skb == skb_peek(&sk->sk_receive_queue)) {
+ __skb_unlink(skb, &sk->sk_receive_queue);
clear = 1;
}
- spin_unlock_irq(&sk->receive_queue.lock);
+ spin_unlock_irq(&sk->sk_receive_queue.lock);
if (clear)
kfree_skb(skb);
}
@@ -446,7 +446,7 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct r
int err = 0;
u16 *csum;
- if ((skb = skb_peek(&sk->write_queue)) == NULL)
+ if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
goto out;
if (opt->offset + 1 < len)
@@ -456,7 +456,7 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct r
goto out;
}
- if (skb_queue_len(&sk->write_queue) == 1) {
+ if (skb_queue_len(&sk->sk_write_queue) == 1) {
/*
* Only one fragment on the socket.
*/
@@ -467,7 +467,7 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct r
} else {
u32 tmp_csum = 0;
- skb_queue_walk(&sk->write_queue, skb) {
+ skb_queue_walk(&sk->sk_write_queue, skb) {
tmp_csum = csum_add(tmp_csum, skb->csum);
}
@@ -508,7 +508,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
goto error;
skb_reserve(skb, hh_len);
- skb->priority = sk->priority;
+ skb->priority = sk->sk_priority;
skb->dst = dst_clone(&rt->u.dst);
skb->nh.ipv6h = iph = (struct ipv6hdr *)skb_put(skb, length);
@@ -597,8 +597,11 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg
}
}
- /* Otherwise it will be difficult to maintain sk->dst_cache. */
- if (sk->state == TCP_ESTABLISHED &&
+ /*
+ * Otherwise it will be difficult to maintain
+ * sk->sk_dst_cache.
+ */
+ if (sk->sk_state == TCP_ESTABLISHED &&
!ipv6_addr_cmp(daddr, &np->daddr))
daddr = &np->daddr;
@@ -607,7 +610,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg
ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
fl.oif = sin6->sin6_scope_id;
} else {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return(-EINVAL);
proto = inet->num;
@@ -625,7 +628,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg
}
if (fl.oif == 0)
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
opt = &opt_space;
@@ -857,7 +860,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
switch(cmd) {
case SIOCOUTQ:
{
- int amount = atomic_read(&sk->wmem_alloc);
+ int amount = atomic_read(&sk->sk_wmem_alloc);
return put_user(amount, (int *)arg);
}
case SIOCINQ:
@@ -865,11 +868,11 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
struct sk_buff *skb;
int amount = 0;
- spin_lock_irq(&sk->receive_queue.lock);
- skb = skb_peek(&sk->receive_queue);
+ spin_lock_irq(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL)
amount = skb->tail - skb->h.raw;
- spin_unlock_irq(&sk->receive_queue.lock);
+ spin_unlock_irq(&sk->sk_receive_queue.lock);
return put_user(amount, (int *)arg);
}
@@ -928,8 +931,8 @@ static struct sock *raw6_get_first(struct seq_file *seq)
for (state->bucket = 0; state->bucket < RAWV6_HTABLE_SIZE; ++state->bucket) {
sk = raw_v6_htable[state->bucket];
- while (sk && sk->family != PF_INET6)
- sk = sk->next;
+ while (sk && sk->sk_family != PF_INET6)
+ sk = sk->sk_next;
if (sk)
break;
}
@@ -941,10 +944,10 @@ static struct sock *raw6_get_next(struct seq_file *seq, struct sock *sk)
struct raw6_iter_state* state = raw6_seq_private(seq);
do {
- sk = sk->next;
+ sk = sk->sk_next;
try_again:
;
- } while (sk && sk->family != PF_INET6);
+ } while (sk && sk->sk_family != PF_INET6);
if (!sk && ++state->bucket < RAWV6_HTABLE_SIZE) {
sk = raw_v6_htable[state->bucket];
@@ -1003,12 +1006,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp,
- sp->state,
- atomic_read(&sp->wmem_alloc), atomic_read(&sp->rmem_alloc),
+ sp->sk_state,
+ atomic_read(&sp->sk_wmem_alloc),
+ atomic_read(&sp->sk_rmem_alloc),
0, 0L, 0,
sock_i_uid(sp), 0,
sock_i_ino(sp),
- atomic_read(&sp->refcnt), sp);
+ atomic_read(&sp->sk_refcnt), sp);
}
static int raw6_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 1881f0fca589..2907317946d6 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -101,22 +101,23 @@ static inline int ipv6_rcv_saddr_equal(struct sock *sk, struct sock *sk2)
if (!inet_sk(sk2)->rcv_saddr && !ipv6_only_sock(sk))
return 1;
- if (sk2->family == AF_INET6 &&
+ if (sk2->sk_family == AF_INET6 &&
ipv6_addr_any(&inet6_sk(sk2)->rcv_saddr) &&
!(ipv6_only_sock(sk2) && addr_type == IPV6_ADDR_MAPPED))
return 1;
if (addr_type == IPV6_ADDR_ANY &&
(!ipv6_only_sock(sk) ||
- !(sk2->family == AF_INET6 ?
- ipv6_addr_type(&inet6_sk(sk2)->rcv_saddr) == IPV6_ADDR_MAPPED : 1)))
+ !(sk2->sk_family == AF_INET6 ?
+ (ipv6_addr_type(&inet6_sk(sk2)->rcv_saddr) == IPV6_ADDR_MAPPED) :
+ 1)))
return 1;
- if (sk2->family == AF_INET6 &&
+ if (sk2->sk_family == AF_INET6 &&
!ipv6_addr_cmp(&np->rcv_saddr,
- (sk2->state != TCP_TIME_WAIT ?
+ (sk2->sk_state != TCP_TIME_WAIT ?
&inet6_sk(sk2)->rcv_saddr :
- &((struct tcp_tw_bucket *)sk)->v6_rcv_saddr)))
+ &((struct tcp_tw_bucket *)sk)->tw_v6_rcv_saddr)))
return 1;
if (addr_type == IPV6_ADDR_MAPPED &&
@@ -135,10 +136,10 @@ static inline int tcp_v6_bind_conflict(struct sock *sk,
struct sock *sk2 = tb->owners;
/* We must walk the whole port owner list in this case. -DaveM */
- for (; sk2; sk2 = sk2->bind_next)
- if (sk != sk2 && sk->bound_dev_if == sk2->bound_dev_if &&
- (!sk->reuse || !sk2->reuse ||
- sk2->state == TCP_LISTEN) &&
+ for (; sk2; sk2 = sk2->sk_bind_next)
+ if (sk != sk2 && sk->sk_bound_dev_if == sk2->sk_bound_dev_if &&
+ (!sk->sk_reuse || !sk2->sk_reuse ||
+ sk2->sk_state == TCP_LISTEN) &&
ipv6_rcv_saddr_equal(sk, sk2))
break;
@@ -196,7 +197,8 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
break;
}
if (tb != NULL && tb->owners != NULL) {
- if (tb->fastreuse > 0 && sk->reuse != 0 && sk->state != TCP_LISTEN) {
+ if (tb->fastreuse > 0 && sk->sk_reuse &&
+ sk->sk_state != TCP_LISTEN) {
goto success;
} else {
ret = 1;
@@ -209,18 +211,18 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
(tb = tcp_bucket_create(head, snum)) == NULL)
goto fail_unlock;
if (tb->owners == NULL) {
- if (sk->reuse && sk->state != TCP_LISTEN)
+ if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
tb->fastreuse = 1;
else
tb->fastreuse = 0;
} else if (tb->fastreuse &&
- ((sk->reuse == 0) || (sk->state == TCP_LISTEN)))
+ (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
tb->fastreuse = 0;
success:
- if (!sk->prev)
+ if (!sk->sk_prev)
tcp_bind_hash(sk, tb, snum);
- BUG_TRAP(sk->prev == (struct sock *)tb);
+ BUG_TRAP(sk->sk_prev == (struct sock *)tb);
ret = 0;
fail_unlock:
@@ -235,30 +237,30 @@ static __inline__ void __tcp_v6_hash(struct sock *sk)
struct sock **skp;
rwlock_t *lock;
- BUG_TRAP(sk->pprev==NULL);
+ BUG_TRAP(!sk->sk_pprev);
- if(sk->state == TCP_LISTEN) {
+ if (sk->sk_state == TCP_LISTEN) {
skp = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
lock = &tcp_lhash_lock;
tcp_listen_wlock();
} else {
- skp = &tcp_ehash[(sk->hashent = tcp_v6_sk_hashfn(sk))].chain;
- lock = &tcp_ehash[sk->hashent].lock;
+ skp = &tcp_ehash[(sk->sk_hashent = tcp_v6_sk_hashfn(sk))].chain;
+ lock = &tcp_ehash[sk->sk_hashent].lock;
write_lock(lock);
}
- if((sk->next = *skp) != NULL)
- (*skp)->pprev = &sk->next;
+ if ((sk->sk_next = *skp) != NULL)
+ (*skp)->sk_pprev = &sk->sk_next;
*skp = sk;
- sk->pprev = skp;
- sock_prot_inc_use(sk->prot);
+ sk->sk_pprev = skp;
+ sock_prot_inc_use(sk->sk_prot);
write_unlock(lock);
}
static void tcp_v6_hash(struct sock *sk)
{
- if(sk->state != TCP_CLOSE) {
+ if (sk->sk_state != TCP_CLOSE) {
struct tcp_opt *tp = tcp_sk(sk);
if (tp->af_specific == &ipv6_mapped) {
@@ -280,8 +282,8 @@ static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned shor
hiscore=0;
read_lock(&tcp_lhash_lock);
sk = tcp_listening_hash[tcp_lhashfn(hnum)];
- for(; sk; sk = sk->next) {
- if (inet_sk(sk)->num == hnum && sk->family == PF_INET6) {
+ for (; sk; sk = sk->sk_next) {
+ if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
score = 1;
@@ -290,8 +292,8 @@ static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned shor
continue;
score++;
}
- if (sk->bound_dev_if) {
- if (sk->bound_dev_if != dif)
+ if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if != dif)
continue;
score++;
}
@@ -332,21 +334,21 @@ static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u
hash = tcp_v6_hashfn(daddr, hnum, saddr, sport);
head = &tcp_ehash[hash];
read_lock(&head->lock);
- for(sk = head->chain; sk; sk = sk->next) {
+ for (sk = head->chain; sk; sk = sk->sk_next) {
/* For IPV6 do the cheaper port and family tests first. */
if(TCP_IPV6_MATCH(sk, saddr, daddr, ports, dif))
goto hit; /* You sunk my battleship! */
}
/* Must check for a TIME_WAIT'er before going to listener hash. */
- for(sk = (head + tcp_ehash_size)->chain; sk; sk = sk->next) {
+ for (sk = (head + tcp_ehash_size)->chain; sk; sk = sk->sk_next) {
/* FIXME: acme: check this... */
struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
- if(*((__u32 *)&(tw->dport)) == ports &&
- sk->family == PF_INET6) {
- if(!ipv6_addr_cmp(&tw->v6_daddr, saddr) &&
- !ipv6_addr_cmp(&tw->v6_rcv_saddr, daddr) &&
- (!sk->bound_dev_if || sk->bound_dev_if == dif))
+ if(*((__u32 *)&(tw->tw_dport)) == ports &&
+ sk->sk_family == PF_INET6) {
+ if(!ipv6_addr_cmp(&tw->tw_v6_daddr, saddr) &&
+ !ipv6_addr_cmp(&tw->tw_v6_rcv_saddr, daddr) &&
+ (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif))
goto hit;
}
}
@@ -468,7 +470,7 @@ static int tcp_v6_check_established(struct sock *sk)
struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *daddr = &np->rcv_saddr;
struct in6_addr *saddr = &np->daddr;
- int dif = sk->bound_dev_if;
+ int dif = sk->sk_bound_dev_if;
u32 ports = TCP_COMBINED_PORTS(inet->dport, inet->num);
int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport);
struct tcp_ehash_bucket *head = &tcp_ehash[hash];
@@ -477,22 +479,24 @@ static int tcp_v6_check_established(struct sock *sk)
write_lock_bh(&head->lock);
- for(skp = &(head + tcp_ehash_size)->chain; (sk2=*skp)!=NULL; skp = &sk2->next) {
+ for (skp = &(head + tcp_ehash_size)->chain; (sk2 = *skp) != NULL;
+ skp = &sk2->sk_next) {
tw = (struct tcp_tw_bucket*)sk2;
- if(*((__u32 *)&(tw->dport)) == ports &&
- sk2->family == PF_INET6 &&
- !ipv6_addr_cmp(&tw->v6_daddr, saddr) &&
- !ipv6_addr_cmp(&tw->v6_rcv_saddr, daddr) &&
- sk2->bound_dev_if == sk->bound_dev_if) {
+ if(*((__u32 *)&(tw->tw_dport)) == ports &&
+ sk2->sk_family == PF_INET6 &&
+ !ipv6_addr_cmp(&tw->tw_v6_daddr, saddr) &&
+ !ipv6_addr_cmp(&tw->tw_v6_rcv_saddr, daddr) &&
+ sk2->sk_bound_dev_if == sk->sk_bound_dev_if) {
struct tcp_opt *tp = tcp_sk(sk);
- if (tw->ts_recent_stamp) {
+ if (tw->tw_ts_recent_stamp) {
/* See comment in tcp_ipv4.c */
- if ((tp->write_seq = tw->snd_nxt+65535+2) == 0)
+ tp->write_seq = tw->tw_snd_nxt + 65535 + 2;
+ if (!tp->write_seq)
tp->write_seq = 1;
- tp->ts_recent = tw->ts_recent;
- tp->ts_recent_stamp = tw->ts_recent_stamp;
+ tp->ts_recent = tw->tw_ts_recent;
+ tp->ts_recent_stamp = tw->tw_ts_recent_stamp;
sock_hold(sk2);
skp = &head->chain;
goto unique;
@@ -502,20 +506,20 @@ static int tcp_v6_check_established(struct sock *sk)
}
tw = NULL;
- for(skp = &head->chain; (sk2=*skp)!=NULL; skp = &sk2->next) {
+ for (skp = &head->chain; (sk2 = *skp) != NULL; skp = &sk2->sk_next) {
if(TCP_IPV6_MATCH(sk, saddr, daddr, ports, dif))
goto not_unique;
}
unique:
- BUG_TRAP(sk->pprev==NULL);
- if ((sk->next = *skp) != NULL)
- (*skp)->pprev = &sk->next;
+ BUG_TRAP(!sk->sk_pprev);
+ if ((sk->sk_next = *skp) != NULL)
+ (*skp)->sk_pprev = &sk->sk_next;
*skp = sk;
- sk->pprev = skp;
- sk->hashent = hash;
- sock_prot_inc_use(sk->prot);
+ sk->sk_pprev = skp;
+ sk->sk_hashent = hash;
+ sock_prot_inc_use(sk->sk_prot);
write_unlock_bh(&head->lock);
if (tw) {
@@ -552,7 +556,7 @@ static int tcp_v6_hash_connect(struct sock *sk)
spin_lock_bh(&head->lock);
- if (tb->owners == sk && sk->bind_next == NULL) {
+ if (tb->owners == sk && !sk->sk_bind_next) {
__tcp_v6_hash(sk);
spin_unlock_bh(&head->lock);
return 0;
@@ -621,15 +625,15 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
/* If interface is set while binding, indices
* must coincide.
*/
- if (sk->bound_dev_if &&
- sk->bound_dev_if != usin->sin6_scope_id)
+ if (sk->sk_bound_dev_if &&
+ sk->sk_bound_dev_if != usin->sin6_scope_id)
return -EINVAL;
- sk->bound_dev_if = usin->sin6_scope_id;
+ sk->sk_bound_dev_if = usin->sin6_scope_id;
}
/* Connect to link-local address requires an interface */
- if (sk->bound_dev_if == 0)
+ if (!sk->sk_bound_dev_if)
return -EINVAL;
}
@@ -661,14 +665,14 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
tp->af_specific = &ipv6_mapped;
- sk->backlog_rcv = tcp_v4_do_rcv;
+ sk->sk_backlog_rcv = tcp_v4_do_rcv;
err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
if (err) {
tp->ext_header_len = exthdrlen;
tp->af_specific = &ipv6_specific;
- sk->backlog_rcv = tcp_v6_do_rcv;
+ sk->sk_backlog_rcv = tcp_v6_do_rcv;
goto failure;
} else {
ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
@@ -687,7 +691,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
ipv6_addr_copy(&fl.fl6_src,
(saddr ? saddr : &np->saddr));
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
fl.fl_ip_dport = usin->sin6_port;
fl.fl_ip_sport = inet->sport;
@@ -704,7 +708,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
}
ip6_dst_store(sk, dst, NULL);
- sk->route_caps = dst->dev->features&~(NETIF_F_IP_CSUM|NETIF_F_TSO);
+ sk->sk_route_caps = dst->dev->features &
+ ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
if (saddr == NULL) {
err = ipv6_get_saddr(dst, &np->daddr, &saddr_buf);
@@ -748,7 +753,7 @@ late_failure:
failure:
__sk_dst_reset(sk);
inet->dport = 0;
- sk->route_caps = 0;
+ sk->sk_route_caps = 0;
return err;
}
@@ -770,7 +775,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
return;
}
- if (sk->state == TCP_TIME_WAIT) {
+ if (sk->sk_state == TCP_TIME_WAIT) {
tcp_tw_put((struct tcp_tw_bucket*)sk);
return;
}
@@ -779,12 +784,13 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (sock_owned_by_user(sk))
NET_INC_STATS_BH(LockDroppedIcmps);
- if (sk->state == TCP_CLOSE)
+ if (sk->sk_state == TCP_CLOSE)
goto out;
tp = tcp_sk(sk);
seq = ntohl(th->seq);
- if (sk->state != TCP_LISTEN && !between(seq, tp->snd_una, tp->snd_nxt)) {
+ if (sk->sk_state != TCP_LISTEN &&
+ !between(seq, tp->snd_una, tp->snd_nxt)) {
NET_INC_STATS_BH(OutOfWindowIcmps);
goto out;
}
@@ -796,7 +802,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (sock_owned_by_user(sk))
goto out;
- if ((1<<sk->state)&(TCPF_LISTEN|TCPF_CLOSE))
+ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
goto out;
/* icmp should have updated the destination cache entry */
@@ -814,7 +820,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
fl.proto = IPPROTO_TCP;
ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
ipv6_addr_copy(&fl.fl6_src, &np->saddr);
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
fl.fl_ip_dport = inet->dport;
fl.fl_ip_sport = inet->sport;
@@ -823,7 +829,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
dst_hold(dst);
if (dst->error) {
- sk->err_soft = -dst->error;
+ sk->sk_err_soft = -dst->error;
} else if (tp->pmtu_cookie > dst_pmtu(dst)) {
tcp_sync_mss(sk, dst_pmtu(dst));
tcp_simple_retransmit(sk);
@@ -835,7 +841,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
icmpv6_err_convert(type, code, &err);
/* Might be for an open_request */
- switch (sk->state) {
+ switch (sk->sk_state) {
struct open_request *req, **prev;
case TCP_LISTEN:
if (sock_owned_by_user(sk))
@@ -864,22 +870,20 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
It can, it SYNs are crossed. --ANK */
if (!sock_owned_by_user(sk)) {
TCP_INC_STATS_BH(TcpAttemptFails);
- sk->err = err;
- sk->error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
+ sk->sk_err = err;
+ sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
tcp_done(sk);
- } else {
- sk->err_soft = err;
- }
+ } else
+ sk->sk_err_soft = err;
goto out;
}
if (!sock_owned_by_user(sk) && np->recverr) {
- sk->err = err;
- sk->error_report(sk);
- } else {
- sk->err_soft = err;
- }
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
+ } else
+ sk->sk_err_soft = err;
out:
bh_unlock_sock(sk);
@@ -1128,8 +1132,8 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
{
struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
- tcp_v6_send_ack(skb, tw->snd_nxt, tw->rcv_nxt,
- tw->rcv_wnd>>tw->rcv_wscale, tw->ts_recent);
+ tcp_v6_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
+ tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
tcp_tw_put(tw);
}
@@ -1160,7 +1164,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
tcp_v6_iif(skb));
if (nsk) {
- if (nsk->state != TCP_TIME_WAIT) {
+ if (nsk->sk_state != TCP_TIME_WAIT) {
bh_lock_sock(nsk);
return nsk;
}
@@ -1247,10 +1251,11 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
atomic_inc(&skb->users);
req->af.v6_req.pktopts = skb;
}
- req->af.v6_req.iif = sk->bound_dev_if;
+ req->af.v6_req.iif = sk->sk_bound_dev_if;
/* So that link locals have meaning */
- if (!sk->bound_dev_if && ipv6_addr_type(&req->af.v6_req.rmt_addr)&IPV6_ADDR_LINKLOCAL)
+ if (!sk->sk_bound_dev_if &&
+ ipv6_addr_type(&req->af.v6_req.rmt_addr) & IPV6_ADDR_LINKLOCAL)
req->af.v6_req.iif = tcp_v6_iif(skb);
if (isn == 0)
@@ -1312,7 +1317,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
newtp->af_specific = &ipv6_mapped;
- newsk->backlog_rcv = tcp_v4_do_rcv;
+ newsk->sk_backlog_rcv = tcp_v4_do_rcv;
newnp->pktoptions = NULL;
newnp->opt = NULL;
newnp->mcast_oif = tcp_v6_iif(skb);
@@ -1357,7 +1362,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
}
ipv6_addr_copy(&fl.fl6_src, &req->af.v6_req.loc_addr);
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
fl.fl_ip_dport = req->rmt_port;
fl.fl_ip_sport = inet_sk(sk)->sport;
@@ -1377,7 +1382,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
#endif
ip6_dst_store(newsk, dst, NULL);
- sk->route_caps = dst->dev->features&~(NETIF_F_IP_CSUM|NETIF_F_TSO);
+ sk->sk_route_caps = dst->dev->features &
+ ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
newtcp6sk = (struct tcp6_sock *)newsk;
newtcp6sk->pinet6 = &newtcp6sk->inet6;
@@ -1391,7 +1397,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
ipv6_addr_copy(&newnp->daddr, &req->af.v6_req.rmt_addr);
ipv6_addr_copy(&newnp->saddr, &req->af.v6_req.loc_addr);
ipv6_addr_copy(&newnp->rcv_saddr, &req->af.v6_req.loc_addr);
- newsk->bound_dev_if = req->af.v6_req.iif;
+ newsk->sk_bound_dev_if = req->af.v6_req.iif;
/* Now IPv6 options...
@@ -1524,7 +1530,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
if (np->rxopt.all)
opt_skb = skb_clone(skb, GFP_ATOMIC);
- if (sk->state == TCP_ESTABLISHED) { /* Fast path */
+ if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
TCP_CHECK_TIMER(sk);
if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
goto reset;
@@ -1537,7 +1543,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
goto csum_err;
- if (sk->state == TCP_LISTEN) {
+ if (sk->sk_state == TCP_LISTEN) {
struct sock *nsk = tcp_v6_hnd_req(sk, skb);
if (!nsk)
goto discard;
@@ -1586,7 +1592,7 @@ ipv6_pktoptions:
*/
tp = tcp_sk(sk);
if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
- !((1<<sk->state)&(TCPF_CLOSE|TCPF_LISTEN))) {
+ !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
if (np->rxopt.bits.rxinfo)
np->mcast_oif = tcp_v6_iif(opt_skb);
if (np->rxopt.bits.rxhlim)
@@ -1650,7 +1656,7 @@ static int tcp_v6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
goto no_tcp_socket;
process:
- if(sk->state == TCP_TIME_WAIT)
+ if (sk->sk_state == TCP_TIME_WAIT)
goto do_time_wait;
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
@@ -1749,7 +1755,7 @@ static int tcp_v6_rebuild_header(struct sock *sk)
ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
ipv6_addr_copy(&fl.fl6_src, &np->saddr);
fl.fl6_flowlabel = np->flow_label;
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
fl.fl_ip_dport = inet->dport;
fl.fl_ip_sport = inet->sport;
@@ -1763,12 +1769,13 @@ static int tcp_v6_rebuild_header(struct sock *sk)
if (dst->error) {
err = dst->error;
dst_release(dst);
- sk->route_caps = 0;
+ sk->sk_route_caps = 0;
return err;
}
ip6_dst_store(sk, dst, NULL);
- sk->route_caps = dst->dev->features&~(NETIF_F_IP_CSUM|NETIF_F_TSO);
+ sk->sk_route_caps = dst->dev->features &
+ ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
}
return 0;
@@ -1788,7 +1795,7 @@ static int tcp_v6_xmit(struct sk_buff *skb, int ipfragok)
ipv6_addr_copy(&fl.fl6_src, &np->saddr);
fl.fl6_flowlabel = np->flow_label;
IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel);
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
fl.fl_ip_sport = inet->sport;
fl.fl_ip_dport = inet->dport;
@@ -1803,9 +1810,9 @@ static int tcp_v6_xmit(struct sk_buff *skb, int ipfragok)
dst = ip6_route_output(sk, &fl);
if (dst->error) {
- sk->err_soft = -dst->error;
+ sk->sk_err_soft = -dst->error;
dst_release(dst);
- return -sk->err_soft;
+ return -sk->sk_err_soft;
}
ip6_dst_store(sk, dst, NULL);
@@ -1830,8 +1837,9 @@ static void v6_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
/* We do not store received flowlabel for TCP */
sin6->sin6_flowinfo = 0;
sin6->sin6_scope_id = 0;
- if (sk->bound_dev_if && ipv6_addr_type(&sin6->sin6_addr)&IPV6_ADDR_LINKLOCAL)
- sin6->sin6_scope_id = sk->bound_dev_if;
+ if (sk->sk_bound_dev_if &&
+ ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ sin6->sin6_scope_id = sk->sk_bound_dev_if;
}
static int tcp_v6_remember_stamp(struct sock *sk)
@@ -1906,15 +1914,15 @@ static int tcp_v6_init_sock(struct sock *sk)
tp->reordering = sysctl_tcp_reordering;
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
tp->af_specific = &ipv6_specific;
- sk->write_space = tcp_write_space;
- sk->use_write_queue = 1;
+ sk->sk_write_space = tcp_write_space;
+ sk->sk_use_write_queue = 1;
- sk->sndbuf = sysctl_tcp_wmem[1];
- sk->rcvbuf = sysctl_tcp_rmem[1];
+ sk->sk_sndbuf = sysctl_tcp_wmem[1];
+ sk->sk_rcvbuf = sysctl_tcp_rmem[1];
atomic_inc(&tcp_sockets_allocated);
@@ -1938,7 +1946,7 @@ static int tcp_v6_destroy_sock(struct sock *sk)
__skb_queue_purge(&tp->ucopy.prequeue);
/* Clean up a referenced TCP bind bucket. */
- if(sk->prev != NULL)
+ if (sk->sk_prev)
tcp_put_port(sk);
/* If sendmsg cached page exists, toss it. */
@@ -2003,9 +2011,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
} else if (tp->pending == TCP_TIME_PROBE0) {
timer_active = 4;
timer_expires = tp->timeout;
- } else if (timer_pending(&sp->timer)) {
+ } else if (timer_pending(&sp->sk_timer)) {
timer_active = 2;
- timer_expires = sp->timer.expires;
+ timer_expires = sp->sk_timer.expires;
} else {
timer_active = 0;
timer_expires = jiffies;
@@ -2019,14 +2027,14 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp,
- sp->state,
+ sp->sk_state,
tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq,
timer_active, timer_expires-jiffies,
tp->retransmits,
sock_i_uid(sp),
tp->probes_out,
sock_i_ino(sp),
- atomic_read(&sp->refcnt), sp,
+ atomic_read(&sp->sk_refcnt), sp,
tp->rto, tp->ack.ato, (tp->ack.quick<<1)|tp->ack.pingpong,
tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
);
@@ -2037,15 +2045,15 @@ static void get_timewait6_sock(struct seq_file *seq,
{
struct in6_addr *dest, *src;
__u16 destp, srcp;
- int ttd = tw->ttd - jiffies;
+ int ttd = tw->tw_ttd - jiffies;
if (ttd < 0)
ttd = 0;
- dest = &tw->v6_daddr;
- src = &tw->v6_rcv_saddr;
- destp = ntohs(tw->dport);
- srcp = ntohs(tw->sport);
+ dest = &tw->tw_v6_daddr;
+ src = &tw->tw_v6_rcv_saddr;
+ destp = ntohs(tw->tw_dport);
+ srcp = ntohs(tw->tw_sport);
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
@@ -2055,9 +2063,9 @@ static void get_timewait6_sock(struct seq_file *seq,
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp,
- tw->substate, 0, 0,
+ tw->tw_substate, 0, 0,
3, ttd, 0, 0, 0, 0,
- atomic_read(&tw->refcnt), tw);
+ atomic_read(&tw->tw_refcnt), tw);
}
static int tcp6_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index baec5aa3f87e..bca19a128614 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -70,18 +70,18 @@ static __inline__ int udv6_rcv_saddr_equal(struct sock *sk, struct sock *sk2)
if (!inet_sk(sk2)->rcv_saddr && !ipv6_only_sock(sk))
return 1;
- if (sk2->family == AF_INET6 &&
+ if (sk2->sk_family == AF_INET6 &&
ipv6_addr_any(&inet6_sk(sk2)->rcv_saddr) &&
!(ipv6_only_sock(sk2) && addr_type == IPV6_ADDR_MAPPED))
return 1;
if (addr_type == IPV6_ADDR_ANY &&
(!ipv6_only_sock(sk) ||
- !(sk2->family == AF_INET6 ?
+ !(sk2->sk_family == AF_INET6 ?
(ipv6_addr_type(&inet6_sk(sk2)->rcv_saddr) == IPV6_ADDR_MAPPED) : 1)))
return 1;
- if (sk2->family == AF_INET6 &&
+ if (sk2->sk_family == AF_INET6 &&
!ipv6_addr_cmp(&inet6_sk(sk)->rcv_saddr,
&inet6_sk(sk2)->rcv_saddr))
return 1;
@@ -126,7 +126,7 @@ static int udp_v6_get_port(struct sock *sk, unsigned short snum)
do {
if (++size >= best_size_so_far)
goto next;
- } while ((sk2 = sk2->next) != NULL);
+ } while ((sk2 = sk2->sk_next) != NULL);
best_size_so_far = size;
best = result;
next:;
@@ -147,24 +147,24 @@ gotit:
for (sk2 = udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
sk2 != NULL;
- sk2 = sk2->next) {
+ sk2 = sk2->sk_next) {
if (inet_sk(sk2)->num == snum &&
sk2 != sk &&
- sk2->bound_dev_if == sk->bound_dev_if &&
- (!sk2->reuse || !sk->reuse) &&
+ sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
+ (!sk2->sk_reuse || !sk->sk_reuse) &&
udv6_rcv_saddr_equal(sk, sk2))
goto fail;
}
}
inet_sk(sk)->num = snum;
- if (sk->pprev == NULL) {
+ if (!sk->sk_pprev) {
struct sock **skp = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
- if ((sk->next = *skp) != NULL)
- (*skp)->pprev = &sk->next;
+ if ((sk->sk_next = *skp) != NULL)
+ (*skp)->sk_pprev = &sk->sk_next;
*skp = sk;
- sk->pprev = skp;
- sock_prot_inc_use(sk->prot);
+ sk->sk_pprev = skp;
+ sock_prot_inc_use(sk->sk_prot);
sock_hold(sk);
}
write_unlock_bh(&udp_hash_lock);
@@ -183,13 +183,13 @@ static void udp_v6_hash(struct sock *sk)
static void udp_v6_unhash(struct sock *sk)
{
write_lock_bh(&udp_hash_lock);
- if (sk->pprev) {
- if (sk->next)
- sk->next->pprev = sk->pprev;
- *sk->pprev = sk->next;
- sk->pprev = NULL;
+ if (sk->sk_pprev) {
+ if (sk->sk_next)
+ sk->sk_next->sk_pprev = sk->sk_pprev;
+ *sk->sk_pprev = sk->sk_next;
+ sk->sk_pprev = NULL;
inet_sk(sk)->num = 0;
- sock_prot_dec_use(sk->prot);
+ sock_prot_dec_use(sk->sk_prot);
__sock_put(sk);
}
write_unlock_bh(&udp_hash_lock);
@@ -203,10 +203,11 @@ static struct sock *udp_v6_lookup(struct in6_addr *saddr, u16 sport,
int badness = -1;
read_lock(&udp_hash_lock);
- for(sk = udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]; sk != NULL; sk = sk->next) {
+ for (sk = udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]; sk;
+ sk = sk->sk_next) {
struct inet_opt *inet = inet_sk(sk);
- if (inet->num == hnum && sk->family == PF_INET6) {
+ if (inet->num == hnum && sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
int score = 0;
if (inet->dport) {
@@ -224,8 +225,8 @@ static struct sock *udp_v6_lookup(struct in6_addr *saddr, u16 sport,
continue;
score++;
}
- if(sk->bound_dev_if) {
- if(sk->bound_dev_if != dif)
+ if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if != dif)
continue;
score++;
}
@@ -328,17 +329,19 @@ ipv4_connected:
if (addr_type&IPV6_ADDR_LINKLOCAL) {
if (addr_len >= sizeof(struct sockaddr_in6) &&
usin->sin6_scope_id) {
- if (sk->bound_dev_if && sk->bound_dev_if != usin->sin6_scope_id) {
+ if (sk->sk_bound_dev_if &&
+ sk->sk_bound_dev_if != usin->sin6_scope_id) {
fl6_sock_release(flowlabel);
return -EINVAL;
}
- sk->bound_dev_if = usin->sin6_scope_id;
- if (!sk->bound_dev_if && (addr_type&IPV6_ADDR_MULTICAST))
+ sk->sk_bound_dev_if = usin->sin6_scope_id;
+ if (!sk->sk_bound_dev_if &&
+ (addr_type & IPV6_ADDR_MULTICAST))
fl.oif = np->mcast_oif;
}
/* Connect to link-local address requires an interface */
- if (sk->bound_dev_if == 0)
+ if (!sk->sk_bound_dev_if)
return -EINVAL;
}
@@ -355,7 +358,7 @@ ipv4_connected:
fl.proto = IPPROTO_UDP;
ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
ipv6_addr_copy(&fl.fl6_src, &np->saddr);
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
fl.fl_ip_dport = inet->dport;
fl.fl_ip_sport = inet->sport;
@@ -397,7 +400,7 @@ ipv4_connected:
!ipv6_addr_cmp(&fl.fl6_dst, &np->daddr) ?
&np->daddr : NULL);
- sk->state = TCP_ESTABLISHED;
+ sk->sk_state = TCP_ESTABLISHED;
}
fl6_sock_release(flowlabel);
@@ -494,12 +497,12 @@ csum_copy_err:
/* Clear queue. */
if (flags&MSG_PEEK) {
int clear = 0;
- spin_lock_irq(&sk->receive_queue.lock);
- if (skb == skb_peek(&sk->receive_queue)) {
- __skb_unlink(skb, &sk->receive_queue);
+ spin_lock_irq(&sk->sk_receive_queue.lock);
+ if (skb == skb_peek(&sk->sk_receive_queue)) {
+ __skb_unlink(skb, &sk->sk_receive_queue);
clear = 1;
}
- spin_unlock_irq(&sk->receive_queue.lock);
+ spin_unlock_irq(&sk->sk_receive_queue.lock);
if (clear)
kfree_skb(skb);
}
@@ -534,14 +537,14 @@ static void udpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!icmpv6_err_convert(type, code, &err) && !np->recverr)
goto out;
- if (sk->state != TCP_ESTABLISHED && !np->recverr)
+ if (sk->sk_state != TCP_ESTABLISHED && !np->recverr)
goto out;
if (np->recverr)
ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
- sk->err = err;
- sk->error_report(sk);
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
out:
sock_put(sk);
}
@@ -553,7 +556,7 @@ static inline int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
return -1;
}
- if (sk->filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
+ if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
UDP6_INC_STATS_BH(UdpInErrors);
kfree_skb(skb);
@@ -578,10 +581,10 @@ static struct sock *udp_v6_mcast_next(struct sock *sk,
{
struct sock *s = sk;
unsigned short num = ntohs(loc_port);
- for(; s; s = s->next) {
+ for (; s; s = s->sk_next) {
struct inet_opt *inet = inet_sk(s);
- if (inet->num == num && sk->family == PF_INET6) {
+ if (inet->num == num && sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(s);
if (inet->dport) {
if (inet->dport != rmt_port)
@@ -591,7 +594,7 @@ static struct sock *udp_v6_mcast_next(struct sock *sk,
ipv6_addr_cmp(&np->daddr, rmt_addr))
continue;
- if (s->bound_dev_if && s->bound_dev_if != dif)
+ if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)
continue;
if (!ipv6_addr_any(&np->rcv_saddr)) {
@@ -627,8 +630,8 @@ static void udpv6_mcast_deliver(struct udphdr *uh,
buff = NULL;
sk2 = sk;
- while((sk2 = udp_v6_mcast_next(sk2->next, uh->dest, daddr,
- uh->source, saddr, dif))) {
+ while ((sk2 = udp_v6_mcast_next(sk2->sk_next, uh->dest, daddr,
+ uh->source, saddr, dif))) {
if (!buff) {
buff = skb_clone(skb, GFP_ATOMIC);
if (!buff)
@@ -770,7 +773,7 @@ static int udp_v6_push_pending_frames(struct sock *sk, struct udp_opt *up)
int err = 0;
/* Grab the skbuff where UDP header space exists. */
- if ((skb = skb_peek(&sk->write_queue)) == NULL)
+ if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
goto out;
/*
@@ -782,12 +785,12 @@ static int udp_v6_push_pending_frames(struct sock *sk, struct udp_opt *up)
uh->len = htons(up->len);
uh->check = 0;
- if (sk->no_check == UDP_CSUM_NOXMIT) {
+ if (sk->sk_no_check == UDP_CSUM_NOXMIT) {
skb->ip_summed = CHECKSUM_NONE;
goto send;
}
- if (skb_queue_len(&sk->write_queue) == 1) {
+ if (skb_queue_len(&sk->sk_write_queue) == 1) {
skb->csum = csum_partial((char *)uh,
sizeof(struct udphdr), skb->csum);
uh->check = csum_ipv6_magic(&fl->fl6_src,
@@ -796,7 +799,7 @@ static int udp_v6_push_pending_frames(struct sock *sk, struct udp_opt *up)
} else {
u32 tmp_csum = 0;
- skb_queue_walk(&sk->write_queue, skb) {
+ skb_queue_walk(&sk->sk_write_queue, skb) {
tmp_csum = csum_add(tmp_csum, skb->csum);
}
tmp_csum = csum_partial((char *)uh,
@@ -886,8 +889,11 @@ static int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg
}
}
- /* Otherwise it will be difficult to maintain sk->dst_cache. */
- if (sk->state == TCP_ESTABLISHED &&
+ /*
+ * Otherwise it will be difficult to maintain
+ * sk->sk_dst_cache.
+ */
+ if (sk->sk_state == TCP_ESTABLISHED &&
!ipv6_addr_cmp(daddr, &np->daddr))
daddr = &np->daddr;
@@ -896,7 +902,7 @@ static int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg
ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
fl.oif = sin6->sin6_scope_id;
} else {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
up->dport = inet->dport;
@@ -923,7 +929,7 @@ static int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg
}
if (!fl.oif)
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
opt = &opt_space;
@@ -1144,12 +1150,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp,
- sp->state,
- atomic_read(&sp->wmem_alloc), atomic_read(&sp->rmem_alloc),
+ sp->sk_state,
+ atomic_read(&sp->sk_wmem_alloc),
+ atomic_read(&sp->sk_rmem_alloc),
0, 0L, 0,
sock_i_uid(sp), 0,
sock_i_ino(sp),
- atomic_read(&sp->refcnt), sp);
+ atomic_read(&sp->sk_refcnt), sp);
}
static int udp6_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 4590540030a0..c257bd3fab0f 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -143,16 +143,16 @@ static void ipx_remove_socket(struct sock *sk)
spin_lock_bh(&intrfc->if_sklist_lock);
s = intrfc->if_sklist;
if (s == sk) {
- intrfc->if_sklist = s->next;
+ intrfc->if_sklist = s->sk_next;
goto out_unlock;
}
- while (s && s->next) {
- if (s->next == sk) {
- s->next = sk->next;
+ while (s && s->sk_next) {
+ if (s->sk_next == sk) {
+ s->sk_next = sk->sk_next;
goto out_unlock;
}
- s = s->next;
+ s = s->sk_next;
}
out_unlock:
spin_unlock_bh(&intrfc->if_sklist_lock);
@@ -165,14 +165,14 @@ out:
static void ipx_destroy_socket(struct sock *sk)
{
ipx_remove_socket(sk);
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
#ifdef IPX_REFCNT_DEBUG
atomic_dec(&ipx_sock_nr);
printk(KERN_DEBUG "IPX socket %p released, %d are still alive\n", sk,
atomic_read(&ipx_sock_nr));
- if (atomic_read(&sk->refcnt) != 1)
+ if (atomic_read(&sk->sk_refcnt) != 1)
printk(KERN_DEBUG "Destruction sock ipx %p delayed, cnt=%d\n",
- sk, atomic_read(&sk->refcnt));
+ sk, atomic_read(&sk->sk_refcnt));
#endif
sock_put(sk);
}
@@ -246,14 +246,14 @@ static void ipxitf_insert_socket(struct ipx_interface *intrfc, struct sock *sk)
sock_hold(sk);
spin_lock_bh(&intrfc->if_sklist_lock);
ipx_sk(sk)->intrfc = intrfc;
- sk->next = NULL;
+ sk->sk_next = NULL;
if (!intrfc->if_sklist)
intrfc->if_sklist = sk;
else {
struct sock *s = intrfc->if_sklist;
- while (s->next)
- s = s->next;
- s->next = sk;
+ while (s->sk_next)
+ s = s->sk_next;
+ s->sk_next = sk;
}
spin_unlock_bh(&intrfc->if_sklist_lock);
ipxitf_put(intrfc);
@@ -266,7 +266,7 @@ static struct sock *__ipxitf_find_socket(struct ipx_interface *intrfc,
struct sock *s = intrfc->if_sklist;
while (s && ipx_sk(s)->port != port)
- s = s->next;
+ s = s->sk_next;
return s;
}
@@ -303,7 +303,7 @@ static struct sock *ipxitf_find_internal_socket(struct ipx_interface *intrfc,
if (ipxs->port == port &&
!memcmp(node, ipxs->node, IPX_NODE_LEN))
break;
- s = s->next;
+ s = s->sk_next;
}
spin_unlock_bh(&intrfc->if_sklist_lock);
ipxitf_put(intrfc);
@@ -324,14 +324,14 @@ void __ipxitf_down(struct ipx_interface *intrfc)
for (s = intrfc->if_sklist; s;) {
struct ipx_opt *ipxs = ipx_sk(s);
- s->err = ENOLINK;
- s->error_report(s);
+ s->sk_err = ENOLINK;
+ s->sk_error_report(s);
ipxs->intrfc = NULL;
ipxs->port = 0;
- s->zapped = 1; /* Indicates it is no longer bound */
+ s->sk_zapped = 1; /* Indicates it is no longer bound */
t = s;
- s = s->next;
- t->next = NULL;
+ s = s->sk_next;
+ t->sk_next = NULL;
}
intrfc->if_sklist = NULL;
spin_unlock_bh(&intrfc->if_sklist_lock);
@@ -429,7 +429,7 @@ static int ipxitf_demux_socket(struct ipx_interface *intrfc,
if (intrfc != ipx_internal_net)
break;
}
- s = s->next;
+ s = s->sk_next;
}
/* skb was solely for us, and we did not make a copy, so free it. */
@@ -468,7 +468,7 @@ static struct sock *ncp_connection_hack(struct ipx_interface *intrfc,
spin_lock_bh(&intrfc->if_sklist_lock);
for (sk = intrfc->if_sklist;
sk && ipx_sk(sk)->ipx_ncp_conn != connection;
- sk = sk->next);
+ sk = sk->sk_next);
if (sk)
sock_hold(sk);
spin_unlock_bh(&intrfc->if_sklist_lock);
@@ -1385,7 +1385,7 @@ static int ipx_create(struct socket *sock, int protocol)
atomic_read(&ipx_sock_nr));
#endif
sock_init_data(sock, sk);
- sk->no_check = 1; /* Checksum off by default */
+ sk->sk_no_check = 1; /* Checksum off by default */
rc = 0;
out:
return rc;
@@ -1402,7 +1402,7 @@ static int ipx_release(struct socket *sock)
goto out;
if (!sock_flag(sk, SOCK_DEAD))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
sock->sk = NULL;
@@ -1442,7 +1442,7 @@ static int ipx_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct sockaddr_ipx *addr = (struct sockaddr_ipx *)uaddr;
int rc = -EINVAL;
- if (!sk->zapped || addr_len != sizeof(struct sockaddr_ipx))
+ if (!sk->sk_zapped || addr_len != sizeof(struct sockaddr_ipx))
goto out;
intrfc = ipxitf_find_using_net(addr->sipx_network);
@@ -1520,7 +1520,7 @@ static int ipx_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
#endif /* CONFIG_IPX_INTERN */
ipxitf_insert_socket(intrfc, sk);
- sk->zapped = 0;
+ sk->sk_zapped = 0;
rc = 0;
out_put:
@@ -1538,7 +1538,7 @@ static int ipx_connect(struct socket *sock, struct sockaddr *uaddr,
int rc = -EINVAL;
struct ipx_route *rt;
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
if (addr_len != sizeof(*addr))
@@ -1580,7 +1580,7 @@ static int ipx_connect(struct socket *sock, struct sockaddr *uaddr,
if (sock->type == SOCK_DGRAM) {
sock->state = SS_CONNECTED;
- sk->state = TCP_ESTABLISHED;
+ sk->sk_state = TCP_ESTABLISHED;
}
if (rt)
@@ -1604,7 +1604,7 @@ static int ipx_getname(struct socket *sock, struct sockaddr *uaddr,
if (peer) {
rc = -ENOTCONN;
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
goto out;
addr = &ipxs->dest_addr;
@@ -1703,7 +1703,7 @@ static int ipx_sendmsg(struct kiocb *iocb, struct socket *sock,
int flags = msg->msg_flags;
/* Socket gets bound below anyway */
-/* if (sk->zapped)
+/* if (sk->sk_zapped)
return -EIO; */ /* Socket not bound */
if (flags & ~MSG_DONTWAIT)
goto out;
@@ -1733,7 +1733,7 @@ static int ipx_sendmsg(struct kiocb *iocb, struct socket *sock,
goto out;
} else {
rc = -ENOTCONN;
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
goto out;
usipx = &local_sipx;
@@ -1784,7 +1784,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
}
rc = -ENOTCONN;
- if (sk->zapped)
+ if (sk->sk_zapped)
goto out;
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
@@ -1803,7 +1803,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
copied);
if (rc)
goto out_free;
- sk->stamp = skb->stamp;
+ sk->sk_stamp = skb->stamp;
msg->msg_namelen = sizeof(*sipx);
@@ -1831,13 +1831,13 @@ static int ipx_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
switch (cmd) {
case TIOCOUTQ:
- amount = sk->sndbuf - atomic_read(&sk->wmem_alloc);
+ amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
if (amount < 0)
amount = 0;
rc = put_user(amount, (int *)arg);
break;
case TIOCINQ: {
- struct sk_buff *skb = skb_peek(&sk->receive_queue);
+ struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
/* These two are safe on a single CPU system as only
* user tasks fiddle here */
if (skb)
@@ -1878,10 +1878,10 @@ static int ipx_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
rc = -EINVAL;
if (sk) {
rc = -ENOENT;
- if (!sk->stamp.tv_sec)
+ if (!sk->sk_stamp.tv_sec)
break;
rc = -EFAULT;
- if (!copy_to_user((void *)arg, &sk->stamp,
+ if (!copy_to_user((void *)arg, &sk->sk_stamp,
sizeof(struct timeval)))
rc = 0;
}
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index fb7539aba467..5773bb395fb2 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -174,7 +174,7 @@ static __inline__ struct sock *ipx_get_socket_idx(loff_t pos)
if (!pos)
break;
spin_lock_bh(&i->if_sklist_lock);
- for (s = i->if_sklist; pos && s; s = s->next)
+ for (s = i->if_sklist; pos && s; s = s->sk_next)
--pos;
if (!pos) {
if (!s)
@@ -213,8 +213,8 @@ static void *ipx_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
goto out;
}
sk = v;
- if (sk->next) {
- sk = sk->next;
+ if (sk->sk_next) {
+ sk = sk->sk_next;
goto out;
}
ipxs = ipx_sk(sk);
@@ -264,7 +264,7 @@ static int ipx_seq_socket_show(struct seq_file *seq, void *v)
seq_printf(seq, "%08lX:%04X ", (unsigned long) htonl(ipxs->intrfc->if_netnum),
htons(ipxs->port));
#endif /* CONFIG_IPX_INTERN */
- if (s->state != TCP_ESTABLISHED)
+ if (s->sk_state != TCP_ESTABLISHED)
seq_printf(seq, "%-28s", "Not_Connected");
else {
seq_printf(seq, "%08lX:%02X%02X%02X%02X%02X%02X:%04X ",
@@ -276,8 +276,9 @@ static int ipx_seq_socket_show(struct seq_file *seq, void *v)
}
seq_printf(seq, "%08X %08X %02X %03d\n",
- atomic_read(&s->wmem_alloc), atomic_read(&s->rmem_alloc),
- s->state, SOCK_INODE(s->socket)->i_uid);
+ atomic_read(&s->sk_wmem_alloc),
+ atomic_read(&s->sk_rmem_alloc),
+ s->sk_state, SOCK_INODE(s->sk_socket)->i_uid);
out:
return 0;
}
diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c
index 48460f881e76..415554af8f3b 100644
--- a/net/ipx/ipx_route.c
+++ b/net/ipx/ipx_route.c
@@ -238,7 +238,7 @@ int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
}
/* Apply checksum. Not allowed on 802.3 links. */
- if (sk->no_check || intrfc->if_dlink_type == IPX_FRAME_8023)
+ if (sk->sk_no_check || intrfc->if_dlink_type == IPX_FRAME_8023)
ipx->ipx_checksum = 0xFFFF;
else
ipx->ipx_checksum = ipx_cksum(ipx, len + sizeof(struct ipxhdr));
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 5ea1be2f5ca1..808a8b53e497 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -133,12 +133,12 @@ static void irda_disconnect_indication(void *instance, void *sap,
}
/* Prevent race conditions with irda_release() and irda_shutdown() */
- if (!sock_flag(sk, SOCK_DEAD) && sk->state != TCP_CLOSE) {
- sk->state = TCP_CLOSE;
- sk->err = ECONNRESET;
- sk->shutdown |= SEND_SHUTDOWN;
+ if (!sock_flag(sk, SOCK_DEAD) && sk->sk_state != TCP_CLOSE) {
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_err = ECONNRESET;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
- sk->state_change(sk);
+ sk->sk_state_change(sk);
/* Uh-oh... Should use sock_orphan ? */
sock_set_flag(sk, SOCK_DEAD);
@@ -151,7 +151,7 @@ static void irda_disconnect_indication(void *instance, void *sap,
* requests. Some apps forget to close sockets, or hang to it
* a bit too long, so we may stay in this dead state long
* enough to be noticed...
- * Note : all socket function do check sk->state, so we are
+ * Note : all socket function do check sk->sk_state, so we are
* safe...
* Jean II
*/
@@ -163,8 +163,8 @@ static void irda_disconnect_indication(void *instance, void *sap,
/* Note : once we are there, there is not much you want to do
* with the socket anymore, apart from closing it.
- * For example, bind() and connect() won't reset sk->err,
- * sk->shutdown and sk->flags to valid values...
+ * For example, bind() and connect() won't reset sk->sk_err,
+ * sk->sk_shutdown and sk->sk_flags to valid values...
* Jean II
*/
}
@@ -192,7 +192,7 @@ static void irda_connect_confirm(void *instance, void *sap,
return;
dev_kfree_skb(skb);
- // Should be ??? skb_queue_tail(&sk->receive_queue, skb);
+ // Should be ??? skb_queue_tail(&sk->sk_receive_queue, skb);
/* How much header space do we need to reserve */
self->max_header_size = max_header_size;
@@ -201,7 +201,7 @@ static void irda_connect_confirm(void *instance, void *sap,
self->max_sdu_size_tx = max_sdu_size;
/* Find out what the largest chunk of data that we can transmit is */
- switch (sk->type) {
+ switch (sk->sk_type) {
case SOCK_STREAM:
if (max_sdu_size != 0) {
ERROR("%s: max_sdu_size must be 0\n", __FUNCTION__);
@@ -226,8 +226,8 @@ static void irda_connect_confirm(void *instance, void *sap,
memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
/* We are now connected! */
- sk->state = TCP_ESTABLISHED;
- sk->state_change(sk);
+ sk->sk_state = TCP_ESTABLISHED;
+ sk->sk_state_change(sk);
}
/*
@@ -258,7 +258,7 @@ static void irda_connect_indication(void *instance, void *sap,
self->max_sdu_size_tx = max_sdu_size;
/* Find out what the largest chunk of data that we can transmit is */
- switch (sk->type) {
+ switch (sk->sk_type) {
case SOCK_STREAM:
if (max_sdu_size != 0) {
ERROR("%s: max_sdu_size must be 0\n", __FUNCTION__);
@@ -284,8 +284,8 @@ static void irda_connect_indication(void *instance, void *sap,
memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
- skb_queue_tail(&sk->receive_queue, skb);
- sk->state_change(sk);
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_state_change(sk);
}
/*
@@ -344,7 +344,7 @@ static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
self->tx_flow = flow;
IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n",
__FUNCTION__);
- wake_up_interruptible(sk->sleep);
+ wake_up_interruptible(sk->sk_sleep);
break;
default:
IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __FUNCTION__);
@@ -717,7 +717,7 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
struct irda_sock *self = irda_sk(sk);
if (peer) {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
saddr.sir_family = AF_IRDA;
@@ -751,13 +751,13 @@ static int irda_listen(struct socket *sock, int backlog)
IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
- if ((sk->type != SOCK_STREAM) && (sk->type != SOCK_SEQPACKET) &&
- (sk->type != SOCK_DGRAM))
+ if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) &&
+ (sk->sk_type != SOCK_DGRAM))
return -EOPNOTSUPP;
- if (sk->state != TCP_LISTEN) {
- sk->max_ack_backlog = backlog;
- sk->state = TCP_LISTEN;
+ if (sk->sk_state != TCP_LISTEN) {
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_state = TCP_LISTEN;
return 0;
}
@@ -787,7 +787,8 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
#ifdef CONFIG_IRDA_ULTRA
/* Special care for Ultra sockets */
- if ((sk->type == SOCK_DGRAM) && (sk->protocol == IRDAPROTO_ULTRA)) {
+ if ((sk->sk_type == SOCK_DGRAM) &&
+ (sk->sk_protocol == IRDAPROTO_ULTRA)) {
self->pid = addr->sir_lsap_sel;
if (self->pid & 0x80) {
IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __FUNCTION__);
@@ -802,7 +803,7 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
/* Pretend we are connected */
sock->state = SS_CONNECTED;
- sk->state = TCP_ESTABLISHED;
+ sk->sk_state = TCP_ESTABLISHED;
return 0;
}
@@ -839,7 +840,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
ASSERT(self != NULL, return -1;);
- err = irda_create(newsock, sk->protocol);
+ err = irda_create(newsock, sk->sk_protocol);
if (err)
return err;
@@ -849,11 +850,11 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
if ((sk = sock->sk) == NULL)
return -EINVAL;
- if ((sk->type != SOCK_STREAM) && (sk->type != SOCK_SEQPACKET) &&
- (sk->type != SOCK_DGRAM))
+ if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) &&
+ (sk->sk_type != SOCK_DGRAM))
return -EOPNOTSUPP;
- if (sk->state != TCP_LISTEN)
+ if (sk->sk_state != TCP_LISTEN)
return -EINVAL;
/*
@@ -869,7 +870,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
* calling us, the data is waiting for us ;-)
* Jean II
*/
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (skb == NULL) {
int ret = 0;
DECLARE_WAITQUEUE(waitq, current);
@@ -883,10 +884,10 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
* We don't us the macro because the condition has
* side effects : we want to make sure that only one
* skb get dequeued - Jean II */
- add_wait_queue(sk->sleep, &waitq);
+ add_wait_queue(sk->sk_sleep, &waitq);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (skb != NULL)
break;
if (!signal_pending(current)) {
@@ -897,13 +898,13 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
break;
}
current->state = TASK_RUNNING;
- remove_wait_queue(sk->sleep, &waitq);
+ remove_wait_queue(sk->sk_sleep, &waitq);
if(ret)
return -ERESTARTSYS;
}
newsk = newsock->sk;
- newsk->state = TCP_ESTABLISHED;
+ newsk->sk_state = TCP_ESTABLISHED;
new = irda_sk(newsk);
ASSERT(new != NULL, return -1;);
@@ -935,7 +936,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
skb->sk = NULL;
skb->destructor = NULL;
kfree_skb(skb);
- sk->ack_backlog--;
+ sk->sk_ack_backlog--;
newsock->state = SS_CONNECTED;
@@ -975,23 +976,23 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self);
/* Don't allow connect for Ultra sockets */
- if ((sk->type == SOCK_DGRAM) && (sk->protocol == IRDAPROTO_ULTRA))
+ if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA))
return -ESOCKTNOSUPPORT;
- if (sk->state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
+ if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sock->state = SS_CONNECTED;
return 0; /* Connect completed during a ERESTARTSYS event */
}
- if (sk->state == TCP_CLOSE && sock->state == SS_CONNECTING) {
+ if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
sock->state = SS_UNCONNECTED;
return -ECONNREFUSED;
}
- if (sk->state == TCP_ESTABLISHED)
+ if (sk->sk_state == TCP_ESTABLISHED)
return -EISCONN; /* No reconnect on a seqpacket socket */
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
if (addr_len != sizeof(struct sockaddr_irda))
@@ -1024,7 +1025,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
/* Move to connecting socket, start sending Connect Requests */
sock->state = SS_CONNECTING;
- sk->state = TCP_SYN_SENT;
+ sk->sk_state = TCP_SYN_SENT;
/* Connect to remote device */
err = irttp_connect_request(self->tsap, self->dtsap_sel,
@@ -1036,13 +1037,14 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
}
/* Now the loop */
- if (sk->state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
+ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
return -EINPROGRESS;
- if (wait_event_interruptible(*(sk->sleep), (sk->state!=TCP_SYN_SENT)))
+ if (wait_event_interruptible(*(sk->sk_sleep),
+ (sk->sk_state != TCP_SYN_SENT)))
return -ERESTARTSYS;
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
sock->state = SS_UNCONNECTED;
return sock_error(sk); /* Always set at this point */
}
@@ -1095,9 +1097,9 @@ static int irda_create(struct socket *sock, int protocol)
init_waitqueue_head(&self->query_wait);
/* Initialise networking socket struct */
- sock_init_data(sock, sk); /* Note : set sk->refcnt to 1 */
- sk->family = PF_IRDA;
- sk->protocol = protocol;
+ sock_init_data(sock, sk); /* Note : set sk->sk_refcnt to 1 */
+ sk->sk_family = PF_IRDA;
+ sk->sk_protocol = protocol;
/* Link networking socket and IrDA socket structs together */
self->sk = sk;
@@ -1194,9 +1196,9 @@ static int irda_release(struct socket *sock)
if (sk == NULL)
return 0;
- sk->state = TCP_CLOSE;
- sk->shutdown |= SEND_SHUTDOWN;
- sk->state_change(sk);
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ sk->sk_state_change(sk);
/* Destroy IrDA socket */
irda_destroy_socket(irda_sk(sk));
@@ -1207,10 +1209,10 @@ static int irda_release(struct socket *sock)
sock->sk = NULL;
/* Purge queues (see sock_init_data()) */
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
/* Destroy networking socket if we are the last reference on it,
- * i.e. if(sk->refcnt == 0) -> sk_free(sk) */
+ * i.e. if(sk->sk_refcnt == 0) -> sk_free(sk) */
sock_put(sk);
/* Notes on socket locking and deallocation... - Jean II
@@ -1264,12 +1266,12 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR))
return -EINVAL;
- if (sk->shutdown & SEND_SHUTDOWN) {
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
return -EPIPE;
}
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
self = irda_sk(sk);
@@ -1277,12 +1279,12 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
/* Check if IrTTP is wants us to slow down */
- if (wait_event_interruptible(*(sk->sleep),
- (self->tx_flow != FLOW_STOP || sk->state != TCP_ESTABLISHED)))
+ if (wait_event_interruptible(*(sk->sk_sleep),
+ (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED)))
return -ERESTARTSYS;
/* Check if we are still connected */
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
/* Check that we don't send out to big frames */
@@ -1358,7 +1360,7 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
* empty
*/
if (self->rx_flow == FLOW_STOP) {
- if ((atomic_read(&sk->rmem_alloc) << 2) <= sk->rcvbuf) {
+ if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) {
IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __FUNCTION__);
self->rx_flow = FLOW_START;
irttp_flow_request(self->tsap, FLOW_START);
@@ -1398,9 +1400,8 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
do {
int chunk;
- struct sk_buff *skb;
+ struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
- skb=skb_dequeue(&sk->receive_queue);
if (skb==NULL) {
int ret = 0;
@@ -1411,32 +1412,32 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
* wait_event_interruptible() macro.
* We don't us the macro because the test condition
* is messy. - Jean II */
- set_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
- add_wait_queue(sk->sleep, &waitq);
+ set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ add_wait_queue(sk->sk_sleep, &waitq);
set_current_state(TASK_INTERRUPTIBLE);
/*
* POSIX 1003.1g mandates this order.
*/
- if (sk->err)
+ if (sk->sk_err)
ret = sock_error(sk);
- else if (sk->shutdown & RCV_SHUTDOWN)
+ else if (sk->sk_shutdown & RCV_SHUTDOWN)
;
else if (noblock)
ret = -EAGAIN;
else if (signal_pending(current))
ret = -ERESTARTSYS;
- else if (skb_peek(&sk->receive_queue) == NULL)
+ else if (skb_peek(&sk->sk_receive_queue) == NULL)
/* Wait process until data arrives */
schedule();
current->state = TASK_RUNNING;
- remove_wait_queue(sk->sleep, &waitq);
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
+ remove_wait_queue(sk->sk_sleep, &waitq);
+ clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
if(ret)
return(ret);
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
continue;
@@ -1444,7 +1445,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
chunk = min_t(unsigned int, skb->len, size);
if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
if (copied == 0)
copied = -EFAULT;
break;
@@ -1460,7 +1461,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
if (skb->len) {
IRDA_DEBUG(1, "%s(), back on q!\n",
__FUNCTION__);
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
@@ -1469,7 +1470,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
IRDA_DEBUG(0, "%s() questionable!?\n", __FUNCTION__);
/* put message back and return */
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
} while (size);
@@ -1481,7 +1482,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
* empty
*/
if (self->rx_flow == FLOW_STOP) {
- if ((atomic_read(&sk->rmem_alloc) << 2) <= sk->rcvbuf) {
+ if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) {
IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __FUNCTION__);
self->rx_flow = FLOW_START;
irttp_flow_request(self->tsap, FLOW_START);
@@ -1512,12 +1513,12 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
if (msg->msg_flags & ~MSG_DONTWAIT)
return -EINVAL;
- if (sk->shutdown & SEND_SHUTDOWN) {
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
return -EPIPE;
}
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
self = irda_sk(sk);
@@ -1578,7 +1579,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
if (msg->msg_flags & ~MSG_DONTWAIT)
return -EINVAL;
- if (sk->shutdown & SEND_SHUTDOWN) {
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
return -EPIPE;
}
@@ -1629,9 +1630,9 @@ static int irda_shutdown(struct socket *sock, int how)
IRDA_DEBUG(1, "%s(%p)\n", __FUNCTION__, self);
- sk->state = TCP_CLOSE;
- sk->shutdown |= SEND_SHUTDOWN;
- sk->state_change(sk);
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ sk->sk_state_change(sk);
if (self->iriap) {
iriap_close(self->iriap);
@@ -1664,32 +1665,32 @@ static unsigned int irda_poll(struct file * file, struct socket *sock,
IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
- poll_wait(file, sk->sleep, wait);
+ poll_wait(file, sk->sk_sleep, wait);
mask = 0;
/* Exceptional events? */
- if (sk->err)
+ if (sk->sk_err)
mask |= POLLERR;
- if (sk->shutdown & RCV_SHUTDOWN) {
+ if (sk->sk_shutdown & RCV_SHUTDOWN) {
IRDA_DEBUG(0, "%s(), POLLHUP\n", __FUNCTION__);
mask |= POLLHUP;
}
/* Readable? */
- if (!skb_queue_empty(&sk->receive_queue)) {
+ if (!skb_queue_empty(&sk->sk_receive_queue)) {
IRDA_DEBUG(4, "Socket is readable\n");
mask |= POLLIN | POLLRDNORM;
}
/* Connection-based need to check for termination and startup */
- switch (sk->type) {
+ switch (sk->sk_type) {
case SOCK_STREAM:
- if (sk->state == TCP_CLOSE) {
+ if (sk->sk_state == TCP_CLOSE) {
IRDA_DEBUG(0, "%s(), POLLHUP\n", __FUNCTION__);
mask |= POLLHUP;
}
- if (sk->state == TCP_ESTABLISHED) {
+ if (sk->sk_state == TCP_ESTABLISHED) {
if ((self->tx_flow == FLOW_START) &&
sock_writeable(sk))
{
@@ -1726,7 +1727,7 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
switch (cmd) {
case TIOCOUTQ: {
long amount;
- amount = sk->sndbuf - atomic_read(&sk->wmem_alloc);
+ amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
if (amount < 0)
amount = 0;
if (put_user(amount, (unsigned int *)arg))
@@ -1738,7 +1739,7 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
struct sk_buff *skb;
long amount = 0L;
/* These two are safe on a single CPU system as only user tasks fiddle here */
- if ((skb = skb_peek(&sk->receive_queue)) != NULL)
+ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
if (put_user(amount, (unsigned int *)arg))
return -EFAULT;
@@ -1747,9 +1748,9 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
case SIOCGSTAMP:
if (sk != NULL) {
- if (sk->stamp.tv_sec == 0)
+ if (!sk->sk_stamp.tv_sec)
return -ENOENT;
- if (copy_to_user((void *)arg, &sk->stamp,
+ if (copy_to_user((void *)arg, &sk->sk_stamp,
sizeof(struct timeval)))
return -EFAULT;
return 0;
@@ -1973,7 +1974,7 @@ static int irda_setsockopt(struct socket *sock, int level, int optname,
return -EFAULT;
/* Only possible for a seqpacket service (TTP with SAR) */
- if (sk->type != SOCK_SEQPACKET) {
+ if (sk->sk_type != SOCK_SEQPACKET) {
IRDA_DEBUG(2, "%s(), setting max_sdu_size = %d\n",
__FUNCTION__, opt);
self->max_sdu_size_rx = opt;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 988fd18da5b9..f86e3699cce4 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -46,19 +46,19 @@ struct pfkey_opt {
int registered;
int promisc;
};
-#define pfkey_sk(__sk) ((struct pfkey_opt *)(__sk)->protinfo)
+#define pfkey_sk(__sk) ((struct pfkey_opt *)(__sk)->sk_protinfo)
static void pfkey_sock_destruct(struct sock *sk)
{
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
if (!sock_flag(sk, SOCK_DEAD)) {
printk("Attempt to release alive pfkey socket: %p\n", sk);
return;
}
- BUG_TRAP(atomic_read(&sk->rmem_alloc)==0);
- BUG_TRAP(atomic_read(&sk->wmem_alloc)==0);
+ BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
+ BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
kfree(pfkey_sk(sk));
@@ -114,7 +114,7 @@ static struct proto_ops pfkey_ops;
static void pfkey_insert(struct sock *sk)
{
pfkey_table_grab();
- sk->next = pfkey_table;
+ sk->sk_next = pfkey_table;
pfkey_table = sk;
sock_hold(sk);
pfkey_table_ungrab();
@@ -125,9 +125,9 @@ static void pfkey_remove(struct sock *sk)
struct sock **skp;
pfkey_table_grab();
- for (skp = &pfkey_table; *skp; skp = &((*skp)->next)) {
+ for (skp = &pfkey_table; *skp; skp = &((*skp)->sk_next)) {
if (*skp == sk) {
- *skp = sk->next;
+ *skp = sk->sk_next;
__sock_put(sk);
break;
}
@@ -165,8 +165,8 @@ static int pfkey_create(struct socket *sock, int protocol)
}
memset(pfk, 0, sizeof(*pfk));
- sk->family = PF_KEY;
- sk->destruct = pfkey_sock_destruct;
+ sk->sk_family = PF_KEY;
+ sk->sk_destruct = pfkey_sock_destruct;
atomic_inc(&pfkey_socks_nr);
@@ -188,7 +188,7 @@ static int pfkey_release(struct socket *sock)
sock_orphan(sk);
sock->sk = NULL;
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_write_queue);
sock_put(sk);
return 0;
@@ -209,11 +209,11 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
}
}
if (*skb2 != NULL) {
- if (atomic_read(&sk->rmem_alloc) <= sk->rcvbuf) {
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
skb_orphan(*skb2);
skb_set_owner_r(*skb2, sk);
- skb_queue_tail(&sk->receive_queue, *skb2);
- sk->data_ready(sk, (*skb2)->len);
+ skb_queue_tail(&sk->sk_receive_queue, *skb2);
+ sk->sk_data_ready(sk, (*skb2)->len);
*skb2 = NULL;
err = 0;
}
@@ -241,7 +241,7 @@ static int pfkey_broadcast(struct sk_buff *skb, int allocation,
return -ENOMEM;
pfkey_lock_table();
- for (sk = pfkey_table; sk; sk = sk->next) {
+ for (sk = pfkey_table; sk; sk = sk->sk_next) {
struct pfkey_opt *pfk = pfkey_sk(sk);
int err2;
@@ -2694,7 +2694,7 @@ static int pfkey_sendmsg(struct kiocb *kiocb,
goto out;
err = -EMSGSIZE;
- if ((unsigned)len > sk->sndbuf-32)
+ if ((unsigned)len > sk->sk_sndbuf - 32)
goto out;
err = -ENOBUFS;
@@ -2804,12 +2804,12 @@ static int pfkey_read_proc(char *buffer, char **start, off_t offset,
read_lock(&pfkey_table_lock);
- for (s = pfkey_table; s; s = s->next) {
+ for (s = pfkey_table; s; s = s->sk_next) {
len += sprintf(buffer+len,"%p %-6d %-6u %-6u %-6u %-6lu",
s,
- atomic_read(&s->refcnt),
- atomic_read(&s->rmem_alloc),
- atomic_read(&s->wmem_alloc),
+ atomic_read(&s->sk_refcnt),
+ atomic_read(&s->sk_rmem_alloc),
+ atomic_read(&s->sk_wmem_alloc),
sock_i_uid(s),
sock_i_ino(s)
);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index c50d55b4765b..abe603925e15 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -97,7 +97,7 @@ static __inline__ u8 llc_ui_header_len(struct sock *sk,
if (addr->sllc_test || addr->sllc_xid)
rc = LLC_PDU_LEN_U;
- else if (sk->type == SOCK_STREAM)
+ else if (sk->sk_type == SOCK_STREAM)
rc = LLC_PDU_LEN_I;
return rc;
}
@@ -129,11 +129,11 @@ static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb, int noblock)
static void llc_ui_sk_init(struct socket *sock, struct sock *sk)
{
- sk->type = sock->type;
- sk->sleep = &sock->wait;
- sk->socket = sock;
- sock->sk = sk;
- sock->ops = &llc_ui_ops;
+ sk->sk_type = sock->type;
+ sk->sk_sleep = &sock->wait;
+ sk->sk_socket = sock;
+ sock->sk = sk;
+ sock->ops = &llc_ui_ops;
}
/**
@@ -180,8 +180,8 @@ static int llc_ui_release(struct socket *sock)
dprintk("%s: closing local(%02X) remote(%02X)\n", __FUNCTION__,
llc->laddr.lsap, llc->daddr.lsap);
if (!llc_send_disc(sk))
- llc_ui_wait_for_disc(sk, sk->rcvtimeo);
- if (!sk->zapped)
+ llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
+ if (!sk->sk_zapped)
llc_sap_unassign_sock(llc->sap, sk);
release_sock(sk);
if (llc->sap && !llc->sap->sk_list.list)
@@ -246,7 +246,7 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
struct net_device *dev = NULL;
int rc = -EINVAL;
- if (!sk->zapped)
+ if (!sk->sk_zapped)
goto out;
/* bind to a specific mac, optional. */
if (!llc_mac_null(addr->sllc_smac)) {
@@ -281,7 +281,7 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
memset(&laddr, 0, sizeof(laddr));
memset(&daddr, 0, sizeof(daddr));
if (!llc_mac_null(addr->sllc_mmac)) {
- if (sk->type != SOCK_DGRAM) {
+ if (sk->sk_type != SOCK_DGRAM) {
rc = -EOPNOTSUPP;
goto out;
}
@@ -304,7 +304,7 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
memcpy(&llc->addr, addr, sizeof(llc->addr));
/* assign new connection to its SAP */
llc_sap_assign_sock(sap, sk);
- rc = sk->zapped = 0;
+ rc = sk->sk_zapped = 0;
out:
return rc;
}
@@ -334,7 +334,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
int rc = -EINVAL;
dprintk("%s: binding %02X\n", __FUNCTION__, addr->sllc_ssap);
- if (!sk->zapped || addrlen != sizeof(*addr))
+ if (!sk->sk_zapped || addrlen != sizeof(*addr))
goto out;
rc = -EAFNOSUPPORT;
if (addr->sllc_family != AF_LLC)
@@ -362,16 +362,16 @@ static int llc_ui_shutdown(struct socket *sock, int how)
int rc = -ENOTCONN;
lock_sock(sk);
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
goto out;
rc = -EINVAL;
if (how != 2)
goto out;
rc = llc_send_disc(sk);
if (!rc)
- rc = llc_ui_wait_for_disc(sk, sk->rcvtimeo);
+ rc = llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
/* Wake up anyone sleeping in poll */
- sk->state_change(sk);
+ sk->sk_state_change(sk);
out:
release_sock(sk);
return rc;
@@ -407,7 +407,7 @@ static int llc_ui_connect(struct socket *sock, struct sockaddr *uaddr,
if (addr->sllc_family != AF_LLC)
goto out;
/* bind connection to sap if user hasn't done it. */
- if (sk->zapped) {
+ if (sk->sk_zapped) {
/* bind to sap with null dev, exclusive */
rc = llc_ui_autobind(sock, addr);
if (rc)
@@ -422,23 +422,23 @@ static int llc_ui_connect(struct socket *sock, struct sockaddr *uaddr,
llc->dev = dev;
} else
dev = llc->dev;
- if (sk->type != SOCK_STREAM)
+ if (sk->sk_type != SOCK_STREAM)
goto out;
rc = -EALREADY;
if (sock->state == SS_CONNECTING)
goto out;
sock->state = SS_CONNECTING;
- sk->state = TCP_SYN_SENT;
+ sk->sk_state = TCP_SYN_SENT;
llc->link = llc_ui_next_link_no(llc->sap->laddr.lsap);
rc = llc_establish_connection(sk, dev->dev_addr,
addr->sllc_dmac, addr->sllc_dsap);
if (rc) {
dprintk("%s: llc_ui_send_conn failed :-(\n", __FUNCTION__);
- sock->state = SS_UNCONNECTED;
- sk->state = TCP_CLOSE;
+ sock->state = SS_UNCONNECTED;
+ sk->sk_state = TCP_CLOSE;
goto out;
}
- rc = llc_ui_wait_for_conn(sk, sk->rcvtimeo);
+ rc = llc_ui_wait_for_conn(sk, sk->sk_rcvtimeo);
if (rc)
dprintk("%s: llc_ui_wait_for_conn failed=%d\n", __FUNCTION__, rc);
out:
@@ -463,20 +463,20 @@ static int llc_ui_listen(struct socket *sock, int backlog)
if (sock->state != SS_UNCONNECTED)
goto out;
rc = -EOPNOTSUPP;
- if (sk->type != SOCK_STREAM)
+ if (sk->sk_type != SOCK_STREAM)
goto out;
rc = -EAGAIN;
- if (sk->zapped)
+ if (sk->sk_zapped)
goto out;
rc = 0;
if (!(unsigned)backlog) /* BSDism */
backlog = 1;
- sk->max_ack_backlog = backlog;
- if (sk->state != TCP_LISTEN) {
- sk->ack_backlog = 0;
- sk->state = TCP_LISTEN;
+ sk->sk_max_ack_backlog = backlog;
+ if (sk->sk_state != TCP_LISTEN) {
+ sk->sk_ack_backlog = 0;
+ sk->sk_state = TCP_LISTEN;
}
- sk->socket->flags |= __SO_ACCEPTCON;
+ sk->sk_socket->flags |= __SO_ACCEPTCON;
out:
release_sock(sk);
return rc;
@@ -487,7 +487,7 @@ static int llc_ui_wait_for_disc(struct sock *sk, int timeout)
DECLARE_WAITQUEUE(wait, current);
int rc;
- add_wait_queue_exclusive(sk->sleep, &wait);
+ add_wait_queue_exclusive(sk->sk_sleep, &wait);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
rc = -ERESTARTSYS;
@@ -497,7 +497,7 @@ static int llc_ui_wait_for_disc(struct sock *sk, int timeout)
if (!timeout)
break;
rc = 0;
- if (sk->state != TCP_CLOSE) {
+ if (sk->sk_state != TCP_CLOSE) {
release_sock(sk);
timeout = schedule_timeout(timeout);
lock_sock(sk);
@@ -505,7 +505,7 @@ static int llc_ui_wait_for_disc(struct sock *sk, int timeout)
break;
}
__set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
return rc;
}
@@ -514,11 +514,11 @@ static int llc_ui_wait_for_conn(struct sock *sk, int timeout)
DECLARE_WAITQUEUE(wait, current);
int rc;
- add_wait_queue_exclusive(sk->sleep, &wait);
+ add_wait_queue_exclusive(sk->sk_sleep, &wait);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
rc = -EAGAIN;
- if (sk->state == TCP_CLOSE)
+ if (sk->sk_state == TCP_CLOSE)
break;
rc = -ERESTARTSYS;
if (signal_pending(current))
@@ -527,7 +527,7 @@ static int llc_ui_wait_for_conn(struct sock *sk, int timeout)
if (!timeout)
break;
rc = 0;
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
release_sock(sk);
timeout = schedule_timeout(timeout);
lock_sock(sk);
@@ -535,7 +535,7 @@ static int llc_ui_wait_for_conn(struct sock *sk, int timeout)
break;
}
__set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
return rc;
}
@@ -544,10 +544,10 @@ static int llc_ui_wait_for_data(struct sock *sk, int timeout)
DECLARE_WAITQUEUE(wait, current);
int rc = 0;
- add_wait_queue_exclusive(sk->sleep, &wait);
+ add_wait_queue_exclusive(sk->sk_sleep, &wait);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
rc = -ERESTARTSYS;
if (signal_pending(current))
@@ -558,12 +558,12 @@ static int llc_ui_wait_for_data(struct sock *sk, int timeout)
/*
* Well, if we have backlog, try to process it now.
*/
- if (sk->backlog.tail) {
+ if (sk->sk_backlog.tail) {
release_sock(sk);
lock_sock(sk);
}
rc = 0;
- if (skb_queue_empty(&sk->receive_queue)) {
+ if (skb_queue_empty(&sk->sk_receive_queue)) {
release_sock(sk);
timeout = schedule_timeout(timeout);
lock_sock(sk);
@@ -571,7 +571,7 @@ static int llc_ui_wait_for_data(struct sock *sk, int timeout)
break;
}
__set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
return rc;
}
@@ -581,12 +581,12 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, int timeout)
struct llc_opt *llc = llc_sk(sk);
int rc;
- add_wait_queue_exclusive(sk->sleep, &wait);
+ add_wait_queue_exclusive(sk->sk_sleep, &wait);
for (;;) {
dprintk("%s: looping...\n", __FUNCTION__);
__set_current_state(TASK_INTERRUPTIBLE);
rc = -ENOTCONN;
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
rc = -ERESTARTSYS;
if (signal_pending(current))
@@ -603,7 +603,7 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, int timeout)
break;
}
__set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
return rc;
}
@@ -626,18 +626,18 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags)
dprintk("%s: accepting on %02X\n", __FUNCTION__,
llc_sk(sk)->addr.sllc_ssap);
lock_sock(sk);
- if (sk->type != SOCK_STREAM)
+ if (sk->sk_type != SOCK_STREAM)
goto out;
rc = -EINVAL;
- if (sock->state != SS_UNCONNECTED || sk->state != TCP_LISTEN)
+ if (sock->state != SS_UNCONNECTED || sk->sk_state != TCP_LISTEN)
goto out;
/* wait for a connection to arrive. */
- rc = llc_ui_wait_for_data(sk, sk->rcvtimeo);
+ rc = llc_ui_wait_for_data(sk, sk->sk_rcvtimeo);
if (rc)
goto out;
dprintk("%s: got a new connection on %02X\n", __FUNCTION__,
llc_sk(sk)->addr.sllc_ssap);
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
rc = -EINVAL;
if (!skb->sk)
goto frees;
@@ -645,9 +645,9 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags)
newsk = skb->sk;
/* attach connection to a new socket. */
llc_ui_sk_init(newsock, newsk);
- newsk->pair = NULL;
- newsk->zapped = 0;
- newsk->state = TCP_ESTABLISHED;
+ newsk->sk_pair = NULL;
+ newsk->sk_zapped = 0;
+ newsk->sk_state = TCP_ESTABLISHED;
newsock->state = SS_CONNECTED;
llc = llc_sk(sk);
newllc = llc_sk(newsk);
@@ -657,8 +657,8 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags)
newllc->link = llc_ui_next_link_no(newllc->laddr.lsap);
/* put original socket back into a clean listen state. */
- sk->state = TCP_LISTEN;
- sk->ack_backlog--;
+ sk->sk_state = TCP_LISTEN;
+ sk->sk_ack_backlog--;
skb->sk = NULL;
dprintk("%s: ok success on %02X, client on %02X\n", __FUNCTION__,
llc_sk(sk)->addr.sllc_ssap, newllc->addr.sllc_dsap);
@@ -699,7 +699,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
llc_sk(sk)->laddr.lsap, llc_sk(sk)->daddr.lsap);
goto out;
}
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (!skb) /* shutdown */
goto out;
copied = skb->len;
@@ -710,7 +710,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
goto dgram_free;
if (skb->len > copied) {
skb_pull(skb, copied);
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
}
if (uaddr)
memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr));
@@ -757,7 +757,7 @@ static int llc_ui_sendmsg(struct kiocb *iocb, struct socket *sock,
addr = &llc->addr;
}
/* must bind connection to sap if user hasn't done it. */
- if (sk->zapped) {
+ if (sk->sk_zapped) {
/* bind to sap with null dev, exclusive. */
rc = llc_ui_autobind(sock, addr);
if (rc)
@@ -789,7 +789,7 @@ static int llc_ui_sendmsg(struct kiocb *iocb, struct socket *sock,
rc = memcpy_fromiovec(skb_put(skb, copied), msg->msg_iov, copied);
if (rc)
goto out;
- if (sk->type == SOCK_DGRAM || addr->sllc_ua) {
+ if (sk->sk_type == SOCK_DGRAM || addr->sllc_ua) {
llc_build_and_send_ui_pkt(llc->sap, skb, addr->sllc_dmac,
addr->sllc_dsap);
goto out;
@@ -805,7 +805,7 @@ static int llc_ui_sendmsg(struct kiocb *iocb, struct socket *sock,
goto out;
}
rc = -ENOPROTOOPT;
- if (!(sk->type == SOCK_STREAM && !addr->sllc_ua))
+ if (!(sk->sk_type == SOCK_STREAM && !addr->sllc_ua))
goto out;
rc = llc_ui_send_data(sk, skb, noblock);
if (rc)
@@ -839,13 +839,13 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
int rc = 0;
lock_sock(sk);
- if (sk->zapped)
+ if (sk->sk_zapped)
goto out;
*uaddrlen = sizeof(sllc);
memset(uaddr, 0, *uaddrlen);
if (peer) {
rc = -ENOTCONN;
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
goto out;
if(llc->dev)
sllc.sllc_arphrd = llc->dev->type;
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index fb559316cf4a..08ee90b3fed3 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -783,7 +783,7 @@ void llc_conn_set_p_flag(struct sock *sk, u8 value)
llc_sk(sk)->p_flag = value;
if (state_changed)
- sk->state_change(sk);
+ sk->sk_state_change(sk);
}
int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb)
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 3fbc40dae6ce..fb34073de1a5 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -44,7 +44,7 @@ void llc_save_primitive(struct sk_buff* skb, u8 prim)
struct sockaddr_llc *addr = llc_ui_skb_cb(skb);
/* save primitive for use by the user. */
- addr->sllc_family = skb->sk->family;
+ addr->sllc_family = skb->sk->sk_family;
addr->sllc_arphrd = skb->dev->type;
addr->sllc_test = prim == LLC_TEST_PRIM;
addr->sllc_xid = prim == LLC_XID_PRIM;
@@ -110,18 +110,19 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
struct sock *parent = skb->sk;
skb->sk = sk;
- skb_queue_tail(&parent->receive_queue, skb);
- sk->state_change(parent);
+ skb_queue_tail(&parent->sk_receive_queue, skb);
+ sk->sk_state_change(parent);
}
break;
case LLC_DISC_PRIM:
sock_hold(sk);
- if (sk->type == SOCK_STREAM && sk->state == TCP_ESTABLISHED) {
- sk->shutdown = SHUTDOWN_MASK;
- sk->socket->state = SS_UNCONNECTED;
- sk->state = TCP_CLOSE;
+ if (sk->sk_type == SOCK_STREAM &&
+ sk->sk_state == TCP_ESTABLISHED) {
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ sk->sk_socket->state = SS_UNCONNECTED;
+ sk->sk_state = TCP_CLOSE;
if (!sock_flag(sk, SOCK_DEAD)) {
- sk->state_change(sk);
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
}
@@ -149,28 +150,29 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
switch (ev->cfm_prim) {
case LLC_DATA_PRIM:
if (!llc_data_accept_state(llc->state))
- sk->write_space(sk);
+ sk->sk_write_space(sk);
else
rc = llc->failed_data_req = 1;
break;
case LLC_CONN_PRIM:
- if (sk->type == SOCK_STREAM && sk->state == TCP_SYN_SENT) {
+ if (sk->sk_type == SOCK_STREAM &&
+ sk->sk_state == TCP_SYN_SENT) {
if (ev->status) {
- sk->socket->state = SS_UNCONNECTED;
- sk->state = TCP_CLOSE;
+ sk->sk_socket->state = SS_UNCONNECTED;
+ sk->sk_state = TCP_CLOSE;
} else {
- sk->socket->state = SS_CONNECTED;
- sk->state = TCP_ESTABLISHED;
+ sk->sk_socket->state = SS_CONNECTED;
+ sk->sk_state = TCP_ESTABLISHED;
}
- sk->state_change(sk);
+ sk->sk_state_change(sk);
}
break;
case LLC_DISC_PRIM:
sock_hold(sk);
- if (sk->type == SOCK_STREAM && sk->state == TCP_CLOSING) {
- sk->socket->state = SS_UNCONNECTED;
- sk->state = TCP_CLOSE;
- sk->state_change(sk);
+ if (sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_CLOSING) {
+ sk->sk_socket->state = SS_UNCONNECTED;
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_state_change(sk);
}
sock_put(sk);
break;
@@ -199,7 +201,7 @@ out_skb_put:
void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
{
/* queue PDU to send to MAC layer */
- skb_queue_tail(&sk->write_queue, skb);
+ skb_queue_tail(&sk->sk_write_queue, skb);
llc_conn_send_pdus(sk);
}
@@ -250,7 +252,7 @@ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit)
pdu = llc_pdu_sn_hdr(skb);
llc_pdu_set_cmd_rsp(skb, LLC_PDU_CMD);
llc_pdu_set_pf_bit(skb, first_p_bit);
- skb_queue_tail(&sk->write_queue, skb);
+ skb_queue_tail(&sk->sk_write_queue, skb);
first_p_bit = 0;
llc->vS = LLC_I_GET_NS(pdu);
howmany_resend++;
@@ -291,7 +293,7 @@ void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit)
llc_pdu_set_cmd_rsp(skb, LLC_PDU_RSP);
llc_pdu_set_pf_bit(skb, first_f_bit);
- skb_queue_tail(&sk->write_queue, skb);
+ skb_queue_tail(&sk->sk_write_queue, skb);
first_f_bit = 0;
llc->vS = LLC_I_GET_NS(pdu);
howmany_resend++;
@@ -351,7 +353,7 @@ static void llc_conn_send_pdus(struct sock *sk)
{
struct sk_buff *skb;
- while ((skb = skb_dequeue(&sk->write_queue)) != NULL) {
+ while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) {
struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
if (!LLC_PDU_TYPE_IS_I(pdu) &&
@@ -391,7 +393,7 @@ static int llc_conn_service(struct sock *sk, struct sk_buff *skb)
if (!rc && trans->next_state != NO_STATE_CHANGE) {
llc->state = trans->next_state;
if (!llc_data_accept_state(llc->state))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
}
}
out:
@@ -489,7 +491,7 @@ struct sock *llc_lookup_established(struct llc_sap *sap, struct llc_addr *daddr,
struct sock *rc;
read_lock_bh(&sap->sk_list.lock);
- for (rc = sap->sk_list.list; rc; rc = rc->next) {
+ for (rc = sap->sk_list.list; rc; rc = rc->sk_next) {
struct llc_opt *llc = llc_sk(rc);
if (llc->laddr.lsap == laddr->lsap &&
@@ -518,10 +520,10 @@ struct sock *llc_lookup_listener(struct llc_sap *sap, struct llc_addr *laddr)
struct sock *rc;
read_lock_bh(&sap->sk_list.lock);
- for (rc = sap->sk_list.list; rc; rc = rc->next) {
+ for (rc = sap->sk_list.list; rc; rc = rc->sk_next) {
struct llc_opt *llc = llc_sk(rc);
- if (rc->type == SOCK_STREAM && rc->state == TCP_LISTEN &&
+ if (rc->sk_type == SOCK_STREAM && rc->sk_state == TCP_LISTEN &&
llc->laddr.lsap == laddr->lsap &&
llc_mac_match(llc->laddr.mac, laddr->mac))
break;
@@ -545,10 +547,10 @@ struct sock *llc_lookup_dgram(struct llc_sap *sap, struct llc_addr *laddr)
struct sock *rc;
read_lock_bh(&sap->sk_list.lock);
- for (rc = sap->sk_list.list; rc; rc = rc->next) {
+ for (rc = sap->sk_list.list; rc; rc = rc->sk_next) {
struct llc_opt *llc = llc_sk(rc);
- if (rc->type == SOCK_DGRAM &&
+ if (rc->sk_type == SOCK_DGRAM &&
llc->laddr.lsap == laddr->lsap &&
llc_mac_match(llc->laddr.mac, laddr->mac))
break;
diff --git a/net/llc/llc_if.c b/net/llc/llc_if.c
index 7736ea2920e1..217765b6fb44 100644
--- a/net/llc/llc_if.c
+++ b/net/llc/llc_if.c
@@ -223,7 +223,7 @@ int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap)
memcpy(laddr.mac, lmac, sizeof(laddr.mac));
existing = llc_lookup_established(llc->sap, &daddr, &laddr);
if (existing) {
- if (existing->state == TCP_ESTABLISHED) {
+ if (existing->sk_state == TCP_ESTABLISHED) {
sk = existing;
goto out_put;
} else
@@ -261,7 +261,7 @@ int llc_send_disc(struct sock *sk)
struct sk_buff *skb;
sock_hold(sk);
- if (sk->type != SOCK_STREAM || sk->state != TCP_ESTABLISHED ||
+ if (sk->sk_type != SOCK_STREAM || sk->sk_state != TCP_ESTABLISHED ||
llc_sk(sk)->state == LLC_CONN_STATE_ADM ||
llc_sk(sk)->state == LLC_CONN_OUT_OF_SVC)
goto out;
@@ -272,7 +272,7 @@ int llc_send_disc(struct sock *sk)
skb = alloc_skb(0, GFP_ATOMIC);
if (!skb)
goto out;
- sk->state = TCP_CLOSING;
+ sk->sk_state = TCP_CLOSING;
ev = llc_conn_ev(skb);
ev->type = LLC_CONN_EV_TYPE_PRIM;
ev->prim = LLC_DISC_PRIM;
diff --git a/net/llc/llc_mac.c b/net/llc/llc_mac.c
index b78696665109..9e4d18eb93c9 100644
--- a/net/llc/llc_mac.c
+++ b/net/llc/llc_mac.c
@@ -126,7 +126,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
goto drop;
}
- sk = llc_sk_alloc(parent->family, GFP_ATOMIC);
+ sk = llc_sk_alloc(parent->sk_family, GFP_ATOMIC);
if (!sk) {
sock_put(parent);
goto drop;
diff --git a/net/llc/llc_main.c b/net/llc/llc_main.c
index 5d8d1602bb9e..a24c089c4ca7 100644
--- a/net/llc/llc_main.c
+++ b/net/llc/llc_main.c
@@ -206,7 +206,7 @@ int llc_sk_init(struct sock* sk)
llc->rw = 128; /* rx win size (opt and equal to
* tx_win of remote LLC) */
skb_queue_head_init(&llc->pdu_unack_q);
- sk->backlog_rcv = llc_backlog_rcv;
+ sk->sk_backlog_rcv = llc_backlog_rcv;
llc_sk(sk) = llc;
out:
return rc;
@@ -258,15 +258,15 @@ void llc_sk_free(struct sock *sk)
#ifdef DEBUG_LLC_CONN_ALLOC
printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __FUNCTION__,
skb_queue_len(&llc->pdu_unack_q),
- skb_queue_len(&sk->write_queue));
+ skb_queue_len(&sk->sk_write_queue));
#endif
- skb_queue_purge(&sk->receive_queue);
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
skb_queue_purge(&llc->pdu_unack_q);
#ifdef LLC_REFCNT_DEBUG
- if (atomic_read(&sk->refcnt) != 1) {
+ if (atomic_read(&sk->sk_refcnt) != 1) {
printk(KERN_DEBUG "Destruction of LLC sock %p delayed in %s, cnt=%d\n",
- sk, __FUNCTION__, atomic_read(&sk->refcnt));
+ sk, __FUNCTION__, atomic_read(&sk->sk_refcnt));
printk(KERN_DEBUG "%d LLC sockets are still alive\n",
atomic_read(&llc_sock_nr));
} else {
@@ -290,7 +290,7 @@ void llc_sk_reset(struct sock *sk)
struct llc_opt *llc = llc_sk(sk);
llc_conn_ac_stop_all_timers(sk, NULL);
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_write_queue);
skb_queue_purge(&llc->pdu_unack_q);
llc->remote_busy_flag = 0;
llc->cause_flag = 0;
@@ -323,7 +323,7 @@ static int llc_rtn_all_conns(struct llc_sap *sap)
write_lock_bh(&sap->sk_list.lock);
- for (sk = sap->sk_list.list; sk; sk = sk->next) {
+ for (sk = sap->sk_list.list; sk; sk = sk->sk_next) {
llc_sk(sk)->state = LLC_CONN_STATE_TEMP;
if (llc_send_disc(sk))
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index c27759125555..b7b4cf9f3381 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -44,7 +44,7 @@ static struct sock *llc_get_sk_idx(loff_t pos)
sap = list_entry(sap_entry, struct llc_sap, node);
read_lock_bh(&sap->sk_list.lock);
- for (sk = sap->sk_list.list; sk; sk = sk->next)
+ for (sk = sap->sk_list.list; sk; sk = sk->sk_next)
if (!pos--) {
if (!sk)
read_unlock_bh(&sap->sk_list.lock);
@@ -76,8 +76,8 @@ static void *llc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
goto out;
}
sk = v;
- if (sk->next) {
- sk = sk->next;
+ if (sk->sk_next) {
+ sk = sk->sk_next;
goto out;
}
llc = llc_sk(sk);
@@ -124,7 +124,7 @@ static int llc_seq_socket_show(struct seq_file *seq, void *v)
sk = v;
llc = llc_sk(sk);
- seq_printf(seq, "%2X %2X ", sk->type,
+ seq_printf(seq, "%2X %2X ", sk->sk_type,
!llc_mac_null(llc->addr.sllc_mmac));
if (llc->dev && llc_mac_null(llc->addr.sllc_mmac))
@@ -136,8 +136,10 @@ static int llc_seq_socket_show(struct seq_file *seq, void *v)
seq_printf(seq, "@%02X ", llc->sap->laddr.lsap);
llc_ui_format_mac(seq, llc->addr.sllc_dmac);
seq_printf(seq, "@%02X %8d %8d %2d %3d %4d\n", llc->addr.sllc_dsap,
- atomic_read(&sk->wmem_alloc), atomic_read(&sk->rmem_alloc),
- sk->state, sk->socket ? SOCK_INODE(sk->socket)->i_uid : -1,
+ atomic_read(&sk->sk_wmem_alloc),
+ atomic_read(&sk->sk_rmem_alloc),
+ sk->sk_state,
+ sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : -1,
llc->link);
out:
return 0;
@@ -181,7 +183,7 @@ static int llc_seq_core_show(struct seq_file *seq, void *v)
timer_pending(&llc->pf_cycle_timer.timer),
timer_pending(&llc->rej_sent_timer.timer),
timer_pending(&llc->busy_state_timer.timer),
- !!sk->backlog.tail, sock_owned_by_user(sk));
+ !!sk->sk_backlog.tail, sock_owned_by_user(sk));
out:
return 0;
}
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index bcbab8bfaf8b..bd96be08f9ab 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -34,11 +34,11 @@ void llc_sap_assign_sock(struct llc_sap *sap, struct sock *sk)
{
write_lock_bh(&sap->sk_list.lock);
llc_sk(sk)->sap = sap;
- sk->next = sap->sk_list.list;
- if (sk->next)
- sap->sk_list.list->pprev = &sk->next;
+ sk->sk_next = sap->sk_list.list;
+ if (sk->sk_next)
+ sap->sk_list.list->sk_pprev = &sk->sk_next;
sap->sk_list.list = sk;
- sk->pprev = &sap->sk_list.list;
+ sk->sk_pprev = &sap->sk_list.list;
sock_hold(sk);
write_unlock_bh(&sap->sk_list.lock);
}
@@ -53,14 +53,14 @@ void llc_sap_assign_sock(struct llc_sap *sap, struct sock *sk)
void llc_sap_unassign_sock(struct llc_sap *sap, struct sock *sk)
{
write_lock_bh(&sap->sk_list.lock);
- if (sk->pprev) {
- if (sk->next)
- sk->next->pprev = sk->pprev;
- *sk->pprev = sk->next;
- sk->pprev = NULL;
+ if (sk->sk_pprev) {
+ if (sk->sk_next)
+ sk->sk_next->sk_pprev = sk->sk_pprev;
+ *sk->sk_pprev = sk->sk_next;
+ sk->sk_pprev = NULL;
/*
* This only makes sense if the socket was inserted on the
- * list, if sk->pprev is NULL it wasn't
+ * list, if sk->sk_pprev is NULL it wasn't
*/
sock_put(sk);
}
@@ -195,7 +195,7 @@ void llc_sap_state_process(struct llc_sap *sap, struct sk_buff *skb)
ev->ind_cfm_flag = 0;
llc_sap_next_state(sap, skb);
if (ev->ind_cfm_flag == LLC_IND) {
- if (skb->sk->state == TCP_LISTEN)
+ if (skb->sk->sk_state == TCP_LISTEN)
kfree_skb(skb);
else {
llc_save_primitive(skb, ev->prim);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index b16b56412e54..14127db6d2af 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -66,7 +66,7 @@ struct netlink_opt
void (*data_ready)(struct sock *sk, int bytes);
};
-#define nlk_sk(__sk) ((struct netlink_opt *)(__sk)->protinfo)
+#define nlk_sk(__sk) ((struct netlink_opt *)(__sk)->sk_protinfo)
static struct sock *nl_table[MAX_LINKS];
static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
@@ -88,14 +88,14 @@ static struct notifier_block *netlink_chain;
static void netlink_sock_destruct(struct sock *sk)
{
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
if (!sock_flag(sk, SOCK_DEAD)) {
printk("Freeing alive netlink socket %p\n", sk);
return;
}
- BUG_TRAP(atomic_read(&sk->rmem_alloc)==0);
- BUG_TRAP(atomic_read(&sk->wmem_alloc)==0);
+ BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
+ BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
BUG_TRAP(!nlk_sk(sk)->cb);
kfree(nlk_sk(sk));
@@ -162,7 +162,7 @@ static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
struct sock *sk;
read_lock(&nl_table_lock);
- for (sk=nl_table[protocol]; sk; sk=sk->next) {
+ for (sk = nl_table[protocol]; sk; sk = sk->sk_next) {
if (nlk_sk(sk)->pid == pid) {
sock_hold(sk);
read_unlock(&nl_table_lock);
@@ -182,7 +182,7 @@ static int netlink_insert(struct sock *sk, u32 pid)
struct sock *osk;
netlink_table_grab();
- for (osk=nl_table[sk->protocol]; osk; osk=osk->next) {
+ for (osk = nl_table[sk->sk_protocol]; osk; osk = osk->sk_next) {
if (nlk_sk(osk)->pid == pid)
break;
}
@@ -190,8 +190,8 @@ static int netlink_insert(struct sock *sk, u32 pid)
err = -EBUSY;
if (nlk_sk(sk)->pid == 0) {
nlk_sk(sk)->pid = pid;
- sk->next = nl_table[sk->protocol];
- nl_table[sk->protocol] = sk;
+ sk->sk_next = nl_table[sk->sk_protocol];
+ nl_table[sk->sk_protocol] = sk;
sock_hold(sk);
err = 0;
}
@@ -205,9 +205,9 @@ static void netlink_remove(struct sock *sk)
struct sock **skp;
netlink_table_grab();
- for (skp = &nl_table[sk->protocol]; *skp; skp = &((*skp)->next)) {
+ for (skp = &nl_table[sk->sk_protocol]; *skp; skp = &((*skp)->sk_next)) {
if (*skp == sk) {
- *skp = sk->next;
+ *skp = sk->sk_next;
__sock_put(sk);
break;
}
@@ -246,10 +246,10 @@ static int netlink_create(struct socket *sock, int protocol)
spin_lock_init(&nlk->cb_lock);
init_waitqueue_head(&nlk->wait);
- sk->destruct = netlink_sock_destruct;
+ sk->sk_destruct = netlink_sock_destruct;
atomic_inc(&netlink_sock_nr);
- sk->protocol=protocol;
+ sk->sk_protocol = protocol;
return 0;
}
@@ -280,11 +280,13 @@ static int netlink_release(struct socket *sock)
sock->sk = NULL;
wake_up_interruptible_all(&nlk->wait);
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_write_queue);
if (nlk->pid && !nlk->groups) {
- struct netlink_notify n = { .protocol = sk->protocol,
- .pid = nlk->pid };
+ struct netlink_notify n = {
+ .protocol = sk->sk_protocol,
+ .pid = nlk->pid,
+ };
notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
}
@@ -301,7 +303,7 @@ static int netlink_autobind(struct socket *sock)
retry:
netlink_table_grab();
- for (osk=nl_table[sk->protocol]; osk; osk=osk->next) {
+ for (osk = nl_table[sk->sk_protocol]; osk; osk = osk->sk_next) {
if (nlk_sk(osk)->pid == pid) {
/* Bind collision, search negative pid values. */
if (pid > 0)
@@ -322,7 +324,8 @@ retry:
static inline int netlink_capable(struct socket *sock, unsigned flag)
{
- return (nl_nonroot[sock->sk->protocol] & flag) || capable(CAP_NET_ADMIN);
+ return (nl_nonroot[sock->sk->sk_protocol] & flag) ||
+ capable(CAP_NET_ADMIN);
}
static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
@@ -412,8 +415,8 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr
static void netlink_overrun(struct sock *sk)
{
if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
- sk->err = ENOBUFS;
- sk->error_report(sk);
+ sk->sk_err = ENOBUFS;
+ sk->sk_error_report(sk);
}
}
@@ -422,7 +425,7 @@ int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock
struct sock *sk;
struct netlink_opt *nlk;
int len = skb->len;
- int protocol = ssk->protocol;
+ int protocol = ssk->sk_protocol;
long timeo;
DECLARE_WAITQUEUE(wait, current);
@@ -443,7 +446,7 @@ retry:
}
#endif
- if (atomic_read(&sk->rmem_alloc) > sk->rcvbuf ||
+ if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
test_bit(0, &nlk->state)) {
if (!timeo) {
if (!nlk->pid)
@@ -456,7 +459,7 @@ retry:
__set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&nlk->wait, &wait);
- if ((atomic_read(&sk->rmem_alloc) > sk->rcvbuf ||
+ if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
test_bit(0, &nlk->state)) &&
!sock_flag(sk, SOCK_DEAD))
timeo = schedule_timeout(timeo);
@@ -474,8 +477,8 @@ retry:
skb_orphan(skb);
skb_set_owner_r(skb, sk);
- skb_queue_tail(&sk->receive_queue, skb);
- sk->data_ready(sk, len);
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, len);
sock_put(sk);
return len;
@@ -490,16 +493,16 @@ static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff
#ifdef NL_EMULATE_DEV
if (nlk->handler) {
skb_orphan(skb);
- nlk->handler(sk->protocol, skb);
+ nlk->handler(sk->sk_protocol, skb);
return 0;
} else
#endif
- if (atomic_read(&sk->rmem_alloc) <= sk->rcvbuf &&
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
!test_bit(0, &nlk->state)) {
skb_orphan(skb);
skb_set_owner_r(skb, sk);
- skb_queue_tail(&sk->receive_queue, skb);
- sk->data_ready(sk, skb->len);
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
return 0;
}
return -1;
@@ -510,14 +513,14 @@ int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
{
struct sock *sk;
struct sk_buff *skb2 = NULL;
- int protocol = ssk->protocol;
+ int protocol = ssk->sk_protocol;
int failure = 0, delivered = 0;
/* While we sleep in clone, do not allow to change socket list */
netlink_lock_table();
- for (sk = nl_table[protocol]; sk; sk = sk->next) {
+ for (sk = nl_table[protocol]; sk; sk = sk->sk_next) {
struct netlink_opt *nlk = nlk_sk(sk);
if (ssk == sk)
@@ -569,10 +572,10 @@ int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
{
struct sock *sk;
- int protocol = ssk->protocol;
+ int protocol = ssk->sk_protocol;
read_lock(&nl_table_lock);
- for (sk = nl_table[protocol]; sk; sk = sk->next) {
+ for (sk = nl_table[protocol]; sk; sk = sk->sk_next) {
struct netlink_opt *nlk = nlk_sk(sk);
if (ssk == sk)
continue;
@@ -580,8 +583,8 @@ void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
if (nlk->pid == pid || !(nlk->groups & group))
continue;
- sk->err = code;
- sk->error_report(sk);
+ sk->sk_err = code;
+ sk->sk_error_report(sk);
}
read_unlock(&nl_table_lock);
}
@@ -590,7 +593,7 @@ static inline void netlink_rcv_wake(struct sock *sk)
{
struct netlink_opt *nlk = nlk_sk(sk);
- if (skb_queue_len(&sk->receive_queue) == 0)
+ if (!skb_queue_len(&sk->sk_receive_queue))
clear_bit(0, &nlk->state);
if (!test_bit(0, &nlk->state))
wake_up_interruptible(&nlk->wait);
@@ -637,7 +640,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
}
err = -EMSGSIZE;
- if ((unsigned)len > sk->sndbuf-32)
+ if ((unsigned)len > sk->sk_sndbuf - 32)
goto out;
err = -ENOBUFS;
skb = alloc_skb(len, GFP_KERNEL);
@@ -726,7 +729,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
siocb->scm->creds = *NETLINK_CREDS(skb);
skb_free_datagram(sk, skb);
- if (nlk->cb && atomic_read(&sk->rmem_alloc) <= sk->rcvbuf / 2)
+ if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
netlink_dump(sk);
scm_recv(sock, msg, siocb->scm, flags);
@@ -770,7 +773,7 @@ netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len))
return NULL;
}
sk = sock->sk;
- sk->data_ready = netlink_data_ready;
+ sk->sk_data_ready = netlink_data_ready;
if (input)
nlk_sk(sk)->data_ready = input;
@@ -821,16 +824,16 @@ static int netlink_dump(struct sock *sk)
if (len > 0) {
spin_unlock(&nlk->cb_lock);
- skb_queue_tail(&sk->receive_queue, skb);
- sk->data_ready(sk, len);
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, len);
return 0;
}
nlh = __nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLMSG_DONE, sizeof(int));
nlh->nlmsg_flags |= NLM_F_MULTI;
memcpy(NLMSG_DATA(nlh), &len, sizeof(len));
- skb_queue_tail(&sk->receive_queue, skb);
- sk->data_ready(sk, skb->len);
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
cb->done(cb);
nlk->cb = NULL;
@@ -861,7 +864,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
atomic_inc(&skb->users);
cb->skb = skb;
- sk = netlink_lookup(ssk->protocol, NETLINK_CB(skb).pid);
+ sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid);
if (sk == NULL) {
netlink_destroy_callback(cb);
return -ECONNREFUSED;
@@ -922,7 +925,7 @@ int netlink_attach(int unit, int (*function)(int, struct sk_buff *skb))
return -ENOBUFS;
nlk_sk(sk)->handler = function;
write_lock_bh(&nl_emu_lock);
- netlink_kernel[unit] = sk->socket;
+ netlink_kernel[unit] = sk->sk_socket;
write_unlock_bh(&nl_emu_lock);
return 0;
}
@@ -978,18 +981,18 @@ static int netlink_read_proc(char *buffer, char **start, off_t offset,
for (i=0; i<MAX_LINKS; i++) {
read_lock(&nl_table_lock);
- for (s = nl_table[i]; s; s = s->next) {
+ for (s = nl_table[i]; s; s = s->sk_next) {
struct netlink_opt *nlk = nlk_sk(s);
len+=sprintf(buffer+len,"%p %-3d %-6d %08x %-8d %-8d %p %d",
s,
- s->protocol,
+ s->sk_protocol,
nlk->pid,
nlk->groups,
- atomic_read(&s->rmem_alloc),
- atomic_read(&s->wmem_alloc),
+ atomic_read(&s->sk_rmem_alloc),
+ atomic_read(&s->sk_wmem_alloc),
nlk->cb,
- atomic_read(&s->refcnt)
+ atomic_read(&s->sk_refcnt)
);
buffer[len++]='\n';
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index cb0731628cf0..77d6d50ef3ef 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -94,19 +94,19 @@ static void nr_remove_socket(struct sock *sk)
spin_lock_bh(&nr_list_lock);
if ((s = nr_list) == sk) {
- nr_list = s->next;
+ nr_list = s->sk_next;
spin_unlock_bh(&nr_list_lock);
return;
}
- while (s != NULL && s->next != NULL) {
- if (s->next == sk) {
- s->next = sk->next;
+ while (s && s->sk_next) {
+ if (s->sk_next == sk) {
+ s->sk_next = sk->sk_next;
spin_unlock_bh(&nr_list_lock);
return;
}
- s = s->next;
+ s = s->sk_next;
}
spin_unlock_bh(&nr_list_lock);
@@ -120,7 +120,7 @@ static void nr_kill_by_device(struct net_device *dev)
struct sock *s;
spin_lock_bh(&nr_list_lock);
- for (s = nr_list; s != NULL; s = s->next) {
+ for (s = nr_list; s; s = s->sk_next) {
if (nr_sk(s)->device == dev)
nr_disconnect(s, ENETUNREACH);
}
@@ -149,7 +149,7 @@ static int nr_device_event(struct notifier_block *this, unsigned long event, voi
static void nr_insert_socket(struct sock *sk)
{
spin_lock_bh(&nr_list_lock);
- sk->next = nr_list;
+ sk->sk_next = nr_list;
nr_list = sk;
spin_unlock_bh(&nr_list_lock);
}
@@ -163,9 +163,9 @@ static struct sock *nr_find_listener(ax25_address *addr)
struct sock *s;
spin_lock_bh(&nr_list_lock);
- for (s = nr_list; s != NULL; s = s->next) {
+ for (s = nr_list; s; s = s->sk_next) {
if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
- s->state == TCP_LISTEN) {
+ s->sk_state == TCP_LISTEN) {
spin_unlock_bh(&nr_list_lock);
return s;
}
@@ -183,7 +183,7 @@ static struct sock *nr_find_socket(unsigned char index, unsigned char id)
struct sock *s;
spin_lock_bh(&nr_list_lock);
- for (s = nr_list; s != NULL; s = s->next) {
+ for (s = nr_list; s; s = s->sk_next) {
nr_cb *nr = nr_sk(s);
if (nr->my_index == index && nr->my_id == id) {
@@ -205,7 +205,7 @@ static struct sock *nr_find_peer(unsigned char index, unsigned char id,
struct sock *s;
spin_lock_bh(&nr_list_lock);
- for (s = nr_list; s != NULL; s = s->next) {
+ for (s = nr_list; s; s = s->sk_next) {
nr_cb *nr = nr_sk(s);
if (nr->your_index == index && nr->your_id == id &&
@@ -274,7 +274,7 @@ void nr_destroy_socket(struct sock *sk)
nr_clear_queues(sk); /* Flush the queues */
- while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (skb->sk != sk) { /* A pending connection */
/* Queue the unaccepted socket for death */
sock_set_flag(skb->sk, SOCK_DEAD);
@@ -285,13 +285,14 @@ void nr_destroy_socket(struct sock *sk)
kfree_skb(skb);
}
- if (atomic_read(&sk->wmem_alloc) != 0 || atomic_read(&sk->rmem_alloc) != 0) {
+ if (atomic_read(&sk->sk_wmem_alloc) ||
+ atomic_read(&sk->sk_rmem_alloc)) {
/* Defer: outstanding buffers */
- init_timer(&sk->timer);
- sk->timer.expires = jiffies + 10 * HZ;
- sk->timer.function = nr_destroy_timer;
- sk->timer.data = (unsigned long)sk;
- add_timer(&sk->timer);
+ init_timer(&sk->sk_timer);
+ sk->sk_timer.expires = jiffies + 10 * HZ;
+ sk->sk_timer.function = nr_destroy_timer;
+ sk->sk_timer.data = (unsigned long)sk;
+ add_timer(&sk->sk_timer);
} else
sk_free(sk);
}
@@ -407,10 +408,10 @@ static int nr_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
- if (sk->state != TCP_LISTEN) {
+ if (sk->sk_state != TCP_LISTEN) {
memset(&nr_sk(sk)->user_addr, 0, AX25_ADDR_LEN);
- sk->max_ack_backlog = backlog;
- sk->state = TCP_LISTEN;
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_state = TCP_LISTEN;
return 0;
}
@@ -433,7 +434,7 @@ static int nr_create(struct socket *sock, int protocol)
sock_init_data(sock, sk);
sock->ops = &nr_proto_ops;
- sk->protocol = protocol;
+ sk->sk_protocol = protocol;
skb_queue_head_init(&nr->ack_queue);
skb_queue_head_init(&nr->reseq_queue);
@@ -462,7 +463,7 @@ static struct sock *nr_make_new(struct sock *osk)
struct sock *sk;
nr_cb *nr, *onr;
- if (osk->type != SOCK_SEQPACKET)
+ if (osk->sk_type != SOCK_SEQPACKET)
return NULL;
if ((sk = nr_alloc_sock()) == NULL)
@@ -472,16 +473,16 @@ static struct sock *nr_make_new(struct sock *osk)
sock_init_data(NULL, sk);
- sk->type = osk->type;
- sk->socket = osk->socket;
- sk->priority = osk->priority;
- sk->protocol = osk->protocol;
- sk->rcvbuf = osk->rcvbuf;
- sk->sndbuf = osk->sndbuf;
- sk->debug = osk->debug;
- sk->state = TCP_ESTABLISHED;
- sk->sleep = osk->sleep;
- sk->zapped = osk->zapped;
+ sk->sk_type = osk->sk_type;
+ sk->sk_socket = osk->sk_socket;
+ sk->sk_priority = osk->sk_priority;
+ sk->sk_protocol = osk->sk_protocol;
+ sk->sk_rcvbuf = osk->sk_rcvbuf;
+ sk->sk_sndbuf = osk->sk_sndbuf;
+ sk->sk_debug = osk->sk_debug;
+ sk->sk_state = TCP_ESTABLISHED;
+ sk->sk_sleep = osk->sk_sleep;
+ sk->sk_zapped = osk->sk_zapped;
skb_queue_head_init(&nr->ack_queue);
skb_queue_head_init(&nr->reseq_queue);
@@ -533,16 +534,16 @@ static int nr_release(struct socket *sock)
nr_stop_t4timer(sk);
nr_stop_idletimer(sk);
nr->state = NR_STATE_2;
- sk->state = TCP_CLOSE;
- sk->shutdown |= SEND_SHUTDOWN;
- sk->state_change(sk);
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
sock_set_flag(sk, SOCK_DESTROY);
- sk->socket = NULL;
+ sk->sk_socket = NULL;
break;
default:
- sk->socket = NULL;
+ sk->sk_socket = NULL;
break;
}
@@ -559,7 +560,7 @@ static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct net_device *dev;
ax25_address *user, *source;
- if (sk->zapped == 0)
+ if (!sk->sk_zapped)
return -EINVAL;
if (addr_len < sizeof(struct sockaddr_ax25) || addr_len > sizeof(struct
@@ -601,7 +602,7 @@ full_sockaddr_ax25))
nr->device = dev;
nr_insert_socket(sk);
- sk->zapped = 0;
+ sk->sk_zapped = 0;
SOCK_DEBUG(sk, "NET/ROM: socket is bound\n");
return 0;
}
@@ -615,20 +616,20 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
ax25_address *user, *source = NULL;
struct net_device *dev;
- if (sk->state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
+ if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sock->state = SS_CONNECTED;
return 0; /* Connect completed during a ERESTARTSYS event */
}
- if (sk->state == TCP_CLOSE && sock->state == SS_CONNECTING) {
+ if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
sock->state = SS_UNCONNECTED;
return -ECONNREFUSED;
}
- if (sk->state == TCP_ESTABLISHED)
+ if (sk->sk_state == TCP_ESTABLISHED)
return -EISCONN; /* No reconnect on a seqpacket socket */
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
if (addr_len != sizeof(struct sockaddr_ax25) && addr_len != sizeof(struct full_sockaddr_ax25))
@@ -637,8 +638,8 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
if (addr->sax25_family != AF_NETROM)
return -EINVAL;
- if (sk->zapped) { /* Must bind first - autobinding in this may or may not work */
- sk->zapped = 0;
+ if (sk->sk_zapped) { /* Must bind first - autobinding in this may or may not work */
+ sk->sk_zapped = 0;
if ((dev = nr_dev_first()) == NULL)
return -ENETUNREACH;
@@ -668,8 +669,8 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
circuit++;
/* Move to connecting socket, start sending Connect Requests */
- sock->state = SS_CONNECTING;
- sk->state = TCP_SYN_SENT;
+ sock->state = SS_CONNECTING;
+ sk->sk_state = TCP_SYN_SENT;
nr_establish_data_link(sk);
@@ -678,21 +679,21 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
nr_start_heartbeat(sk);
/* Now the loop */
- if (sk->state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
+ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
return -EINPROGRESS;
/*
* A Connect Ack with Choke or timeout or failed routing will go to
* closed.
*/
- if (sk->state == TCP_SYN_SENT) {
+ if (sk->sk_state == TCP_SYN_SENT) {
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
- add_wait_queue(sk->sleep, &wait);
+ add_wait_queue(sk->sk_sleep, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- if (sk->state != TCP_SYN_SENT)
+ if (sk->sk_state != TCP_SYN_SENT)
break;
if (!signal_pending(tsk)) {
schedule();
@@ -701,10 +702,10 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
return -ERESTARTSYS;
}
current->state = TASK_RUNNING;
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
}
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
sock->state = SS_UNCONNECTED;
return sock_error(sk); /* Always set at this point */
}
@@ -727,12 +728,12 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
return -EINVAL;
lock_sock(sk);
- if (sk->type != SOCK_SEQPACKET) {
+ if (sk->sk_type != SOCK_SEQPACKET) {
err = -EOPNOTSUPP;
goto out;
}
- if (sk->state != TCP_LISTEN) {
+ if (sk->sk_state != TCP_LISTEN) {
err = -EINVAL;
goto out;
}
@@ -741,9 +742,9 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
* The write queue this time is holding sockets ready to use
* hooked into the SABM we saved
*/
- add_wait_queue(sk->sleep, &wait);
+ add_wait_queue(sk->sk_sleep, &wait);
for (;;) {
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
break;
@@ -759,16 +760,16 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
return -ERESTARTSYS;
}
current->state = TASK_RUNNING;
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
newsk = skb->sk;
- newsk->pair = NULL;
- newsk->socket = newsock;
- newsk->sleep = &newsock->wait;
+ newsk->sk_pair = NULL;
+ newsk->sk_socket = newsock;
+ newsk->sk_sleep = &newsock->wait;
/* Now attach up the new socket */
kfree_skb(skb);
- sk->ack_backlog--;
+ sk->sk_ack_backlog--;
newsock->sk = newsk;
out:
@@ -783,7 +784,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
nr_cb *nr = nr_sk(sk);
if (peer != 0) {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
sax->fsa_ax25.sax25_family = AF_NETROM;
sax->fsa_ax25.sax25_ndigis = 1;
@@ -893,7 +894,8 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
user = (ax25_address *)(skb->data + 21);
- if (sk == NULL || sk->ack_backlog == sk->max_ack_backlog || (make = nr_make_new(sk)) == NULL) {
+ if (!sk || sk->sk_ack_backlog == sk->sk_max_ack_backlog ||
+ (make = nr_make_new(sk)) == NULL) {
nr_transmit_refusal(skb, 0);
return 0;
}
@@ -901,7 +903,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
window = skb->data[20];
skb->sk = make;
- make->state = TCP_ESTABLISHED;
+ make->sk_state = TCP_ESTABLISHED;
/* Fill in his circuit details */
nr_make = nr_sk(make);
@@ -941,18 +943,18 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
nr_make->vr = 0;
nr_make->vl = 0;
nr_make->state = NR_STATE_3;
- sk->ack_backlog++;
- make->pair = sk;
+ sk->sk_ack_backlog++;
+ make->sk_pair = sk;
nr_insert_socket(make);
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
nr_start_heartbeat(make);
nr_start_idletimer(make);
if (!sock_flag(sk, SOCK_DEAD))
- sk->data_ready(sk, skb->len);
+ sk->sk_data_ready(sk, skb->len);
return 1;
}
@@ -972,10 +974,10 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR))
return -EINVAL;
- if (sk->zapped)
+ if (sk->sk_zapped)
return -EADDRNOTAVAIL;
- if (sk->shutdown & SEND_SHUTDOWN) {
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
return -EPIPE;
}
@@ -992,7 +994,7 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
if (sax.sax25_family != AF_NETROM)
return -EINVAL;
} else {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
sax.sax25_family = AF_NETROM;
sax.sax25_call = nr->dest_addr;
@@ -1038,7 +1040,7 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
memcpy_fromiovec(asmptr, msg->msg_iov, len);
SOCK_DEBUG(sk, "NET/ROM: Transmitting buffer\n");
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
kfree_skb(skb);
return -ENOTCONN;
}
@@ -1062,7 +1064,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
* us! We do one quick check first though
*/
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
/* Now we can treat all alike */
@@ -1099,7 +1101,7 @@ static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
switch (cmd) {
case TIOCOUTQ: {
long amount;
- amount = sk->sndbuf - atomic_read(&sk->wmem_alloc);
+ amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
if (amount < 0)
amount = 0;
return put_user(amount, (int *)arg);
@@ -1109,16 +1111,16 @@ static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
struct sk_buff *skb;
long amount = 0L;
/* These two are safe on a single CPU system as only user tasks fiddle here */
- if ((skb = skb_peek(&sk->receive_queue)) != NULL)
+ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
return put_user(amount, (int *)arg);
}
case SIOCGSTAMP:
if (sk != NULL) {
- if (sk->stamp.tv_sec == 0)
+ if (!sk->sk_stamp.tv_sec)
return -ENOENT;
- return copy_to_user((void *)arg, &sk->stamp, sizeof(struct timeval)) ? -EFAULT : 0;
+ return copy_to_user((void *)arg, &sk->sk_stamp, sizeof(struct timeval)) ? -EFAULT : 0;
}
return -EINVAL;
@@ -1160,7 +1162,7 @@ static int nr_get_info(char *buffer, char **start, off_t offset, int length)
len += sprintf(buffer, "user_addr dest_node src_node dev my your st vs vr va t1 t2 t4 idle n2 wnd Snd-Q Rcv-Q inode\n");
- for (s = nr_list; s != NULL; s = s->next) {
+ for (s = nr_list; s; s = s->sk_next) {
nr_cb *nr = nr_sk(s);
if ((dev = nr->device) == NULL)
@@ -1194,9 +1196,9 @@ static int nr_get_info(char *buffer, char **start, off_t offset, int length)
nr->n2count,
nr->n2,
nr->window,
- atomic_read(&s->wmem_alloc),
- atomic_read(&s->rmem_alloc),
- s->socket != NULL ? SOCK_INODE(s->socket)->i_ino : 0L);
+ atomic_read(&s->sk_wmem_alloc),
+ atomic_read(&s->sk_rmem_alloc),
+ s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
pos = begin + len;
diff --git a/net/netrom/nr_in.c b/net/netrom/nr_in.c
index aeff87c93ca2..669aea370be0 100644
--- a/net/netrom/nr_in.c
+++ b/net/netrom/nr_in.c
@@ -89,9 +89,9 @@ static int nr_state1_machine(struct sock *sk, struct sk_buff *skb,
nr->state = NR_STATE_3;
nr->n2count = 0;
nr->window = skb->data[20];
- sk->state = TCP_ESTABLISHED;
+ sk->sk_state = TCP_ESTABLISHED;
if (!sock_flag(sk, SOCK_DEAD))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
break;
}
diff --git a/net/netrom/nr_out.c b/net/netrom/nr_out.c
index 9c04a14fd197..d36393da5825 100644
--- a/net/netrom/nr_out.c
+++ b/net/netrom/nr_out.c
@@ -65,12 +65,12 @@ void nr_output(struct sock *sk, struct sk_buff *skb)
if (skb->len > 0)
skbn->data[4] |= NR_MORE_FLAG;
- skb_queue_tail(&sk->write_queue, skbn); /* Throw it on the queue */
+ skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
}
kfree_skb(skb);
} else {
- skb_queue_tail(&sk->write_queue, skb); /* Throw it on the queue */
+ skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */
}
nr_kick(sk);
@@ -135,7 +135,7 @@ void nr_kick(struct sock *sk)
if (nr->condition & NR_COND_PEER_RX_BUSY)
return;
- if (skb_peek(&sk->write_queue) == NULL)
+ if (!skb_peek(&sk->sk_write_queue))
return;
start = (skb_peek(&nr->ack_queue) == NULL) ? nr->va : nr->vs;
@@ -154,11 +154,11 @@ void nr_kick(struct sock *sk)
/*
* Dequeue the frame and copy it.
*/
- skb = skb_dequeue(&sk->write_queue);
+ skb = skb_dequeue(&sk->sk_write_queue);
do {
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
- skb_queue_head(&sk->write_queue, skb);
+ skb_queue_head(&sk->sk_write_queue, skb);
break;
}
@@ -176,7 +176,8 @@ void nr_kick(struct sock *sk)
*/
skb_queue_tail(&nr->ack_queue, skb);
- } while (nr->vs != end && (skb = skb_dequeue(&sk->write_queue)) != NULL);
+ } while (nr->vs != end &&
+ (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
nr->vl = nr->vr;
nr->condition &= ~NR_COND_ACK_PENDING;
diff --git a/net/netrom/nr_subr.c b/net/netrom/nr_subr.c
index 02198ba26616..6ba11ea5bcc2 100644
--- a/net/netrom/nr_subr.c
+++ b/net/netrom/nr_subr.c
@@ -36,7 +36,7 @@ void nr_clear_queues(struct sock *sk)
{
nr_cb *nr = nr_sk(sk);
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_write_queue);
skb_queue_purge(&nr->ack_queue);
skb_queue_purge(&nr->reseq_queue);
skb_queue_purge(&nr->frag_queue);
@@ -75,7 +75,7 @@ void nr_requeue_frames(struct sock *sk)
while ((skb = skb_dequeue(&nr_sk(sk)->ack_queue)) != NULL) {
if (skb_prev == NULL)
- skb_queue_head(&sk->write_queue, skb);
+ skb_queue_head(&sk->sk_write_queue, skb);
else
skb_append(skb_prev, skb);
skb_prev = skb;
@@ -272,12 +272,12 @@ void nr_disconnect(struct sock *sk, int reason)
nr_sk(sk)->state = NR_STATE_0;
- sk->state = TCP_CLOSE;
- sk->err = reason;
- sk->shutdown |= SEND_SHUTDOWN;
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_err = reason;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
- sk->state_change(sk);
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
}
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
index 6c29f5bdc258..2b9271567068 100644
--- a/net/netrom/nr_timer.c
+++ b/net/netrom/nr_timer.c
@@ -92,13 +92,13 @@ void nr_start_idletimer(struct sock *sk)
void nr_start_heartbeat(struct sock *sk)
{
- del_timer(&sk->timer);
+ del_timer(&sk->sk_timer);
- sk->timer.data = (unsigned long)sk;
- sk->timer.function = &nr_heartbeat_expiry;
- sk->timer.expires = jiffies + 5 * HZ;
+ sk->sk_timer.data = (unsigned long)sk;
+ sk->sk_timer.function = &nr_heartbeat_expiry;
+ sk->sk_timer.expires = jiffies + 5 * HZ;
- add_timer(&sk->timer);
+ add_timer(&sk->sk_timer);
}
void nr_stop_t1timer(struct sock *sk)
@@ -123,7 +123,7 @@ void nr_stop_idletimer(struct sock *sk)
void nr_stop_heartbeat(struct sock *sk)
{
- del_timer(&sk->timer);
+ del_timer(&sk->sk_timer);
}
int nr_t1timer_running(struct sock *sk)
@@ -142,7 +142,7 @@ static void nr_heartbeat_expiry(unsigned long param)
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (sock_flag(sk, SOCK_DESTROY) ||
- (sk->state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
+ (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
nr_destroy_socket(sk);
return;
}
@@ -152,7 +152,7 @@ static void nr_heartbeat_expiry(unsigned long param)
/*
* Check for the state of the receive buffer.
*/
- if (atomic_read(&sk->rmem_alloc) < (sk->rcvbuf / 2) &&
+ if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) &&
(nr->condition & NR_COND_OWN_RX_BUSY)) {
nr->condition &= ~NR_COND_OWN_RX_BUSY;
nr->condition &= ~NR_COND_ACK_PENDING;
@@ -206,12 +206,12 @@ static void nr_idletimer_expiry(unsigned long param)
nr_stop_t2timer(sk);
nr_stop_t4timer(sk);
- sk->state = TCP_CLOSE;
- sk->err = 0;
- sk->shutdown |= SEND_SHUTDOWN;
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_err = 0;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
- sk->state_change(sk);
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
bh_unlock_sock(sk);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index e8a5beb5cc7f..32ad8ce11d05 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -191,12 +191,12 @@ struct packet_opt
#endif
};
-#define pkt_sk(__sk) ((struct packet_opt *)(__sk)->protinfo)
+#define pkt_sk(__sk) ((struct packet_opt *)(__sk)->sk_protinfo)
void packet_sock_destruct(struct sock *sk)
{
- BUG_TRAP(atomic_read(&sk->rmem_alloc)==0);
- BUG_TRAP(atomic_read(&sk->wmem_alloc)==0);
+ BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
+ BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
if (!sock_flag(sk, SOCK_DEAD)) {
printk("Attempt to release alive packet socket: %p\n", sk);
@@ -356,7 +356,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
skb->protocol = proto;
skb->dev = dev;
- skb->priority = sk->priority;
+ skb->priority = sk->sk_priority;
if (err)
goto out_free;
@@ -418,7 +418,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
structure, so that corresponding packet head
never delivered to user.
*/
- if (sk->type != SOCK_DGRAM)
+ if (sk->sk_type != SOCK_DGRAM)
skb_push(skb, skb->data - skb->mac.raw);
else if (skb->pkt_type == PACKET_OUTGOING) {
/* Special case: outgoing packets have ll header at head */
@@ -428,13 +428,14 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
snaplen = skb->len;
- if (sk->filter) {
+ if (sk->sk_filter) {
unsigned res = snaplen;
struct sk_filter *filter;
bh_lock_sock(sk);
- if ((filter = sk->filter) != NULL)
- res = sk_run_filter(skb, sk->filter->insns, sk->filter->len);
+ if ((filter = sk->sk_filter) != NULL)
+ res = sk_run_filter(skb, sk->sk_filter->insns,
+ sk->sk_filter->len);
bh_unlock_sock(sk);
if (res == 0)
@@ -443,7 +444,8 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
snaplen = res;
}
- if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf)
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf)
goto drop_n_acct;
if (skb_shared(skb)) {
@@ -475,17 +477,17 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
skb_set_owner_r(skb, sk);
skb->dev = NULL;
- spin_lock(&sk->receive_queue.lock);
+ spin_lock(&sk->sk_receive_queue.lock);
po->stats.tp_packets++;
- __skb_queue_tail(&sk->receive_queue, skb);
- spin_unlock(&sk->receive_queue.lock);
- sk->data_ready(sk,skb->len);
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+ spin_unlock(&sk->sk_receive_queue.lock);
+ sk->sk_data_ready(sk, skb->len);
return 0;
drop_n_acct:
- spin_lock(&sk->receive_queue.lock);
+ spin_lock(&sk->sk_receive_queue.lock);
po->stats.tp_drops++;
- spin_unlock(&sk->receive_queue.lock);
+ spin_unlock(&sk->sk_receive_queue.lock);
drop_n_restore:
if (skb_head != skb->data && skb_shared(skb)) {
@@ -518,7 +520,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
po = pkt_sk(sk);
if (dev->hard_header) {
- if (sk->type != SOCK_DGRAM)
+ if (sk->sk_type != SOCK_DGRAM)
skb_push(skb, skb->data - skb->mac.raw);
else if (skb->pkt_type == PACKET_OUTGOING) {
/* Special case: outgoing packets have ll header at head */
@@ -530,13 +532,14 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
snaplen = skb->len;
- if (sk->filter) {
+ if (sk->sk_filter) {
unsigned res = snaplen;
struct sk_filter *filter;
bh_lock_sock(sk);
- if ((filter = sk->filter) != NULL)
- res = sk_run_filter(skb, sk->filter->insns, sk->filter->len);
+ if ((filter = sk->sk_filter) != NULL)
+ res = sk_run_filter(skb, sk->sk_filter->insns,
+ sk->sk_filter->len);
bh_unlock_sock(sk);
if (res == 0)
@@ -545,7 +548,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
snaplen = res;
}
- if (sk->type == SOCK_DGRAM) {
+ if (sk->sk_type == SOCK_DGRAM) {
macoff = netoff = TPACKET_ALIGN(TPACKET_HDRLEN) + 16;
} else {
unsigned maclen = skb->nh.raw - skb->data;
@@ -555,7 +558,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
if (macoff + snaplen > po->frame_size) {
if (po->copy_thresh &&
- atomic_read(&sk->rmem_alloc) + skb->truesize < (unsigned)sk->rcvbuf) {
+ atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
+ (unsigned)sk->sk_rcvbuf) {
if (skb_shared(skb)) {
copy_skb = skb_clone(skb, GFP_ATOMIC);
} else {
@@ -572,7 +576,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
if (snaplen > skb->len-skb->data_len)
snaplen = skb->len-skb->data_len;
- spin_lock(&sk->receive_queue.lock);
+ spin_lock(&sk->sk_receive_queue.lock);
h = po->iovec[po->head];
if (h->tp_status)
@@ -581,11 +585,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
po->stats.tp_packets++;
if (copy_skb) {
status |= TP_STATUS_COPY;
- __skb_queue_tail(&sk->receive_queue, copy_skb);
+ __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
}
if (!po->stats.tp_drops)
status &= ~TP_STATUS_LOSING;
- spin_unlock(&sk->receive_queue.lock);
+ spin_unlock(&sk->sk_receive_queue.lock);
memcpy((u8*)h + macoff, skb->data, snaplen);
@@ -621,7 +625,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
}
}
- sk->data_ready(sk, 0);
+ sk->sk_data_ready(sk, 0);
drop_n_restore:
if (skb_head != skb->data && skb_shared(skb)) {
@@ -634,9 +638,9 @@ drop:
ring_is_full:
po->stats.tp_drops++;
- spin_unlock(&sk->receive_queue.lock);
+ spin_unlock(&sk->sk_receive_queue.lock);
- sk->data_ready(sk, 0);
+ sk->sk_data_ready(sk, 0);
if (copy_skb)
kfree_skb(copy_skb);
goto drop_n_restore;
@@ -713,7 +717,7 @@ static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
skb->protocol = proto;
skb->dev = dev;
- skb->priority = sk->priority;
+ skb->priority = sk->sk_priority;
err = -ENETDOWN;
if (!(dev->flags & IFF_UP))
@@ -755,9 +759,9 @@ static int packet_release(struct socket *sock)
return 0;
write_lock_bh(&packet_sklist_lock);
- for (skp = &packet_sklist; *skp; skp = &(*skp)->next) {
+ for (skp = &packet_sklist; *skp; skp = &(*skp)->sk_next) {
if (*skp == sk) {
- *skp = sk->next;
+ *skp = sk->sk_next;
__sock_put(sk);
break;
}
@@ -799,7 +803,7 @@ static int packet_release(struct socket *sock)
/* Purge queues */
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
sock_put(sk);
return 0;
@@ -843,9 +847,9 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, int protocol)
sock_hold(sk);
po->running = 1;
} else {
- sk->err = ENETDOWN;
+ sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
- sk->error_report(sk);
+ sk->sk_error_report(sk);
}
} else {
dev_add_pack(&po->prot_hook);
@@ -959,10 +963,10 @@ static int packet_create(struct socket *sock, int protocol)
if (!po)
goto out_free;
memset(po, 0, sizeof(*po));
- sk->family = PF_PACKET;
+ sk->sk_family = PF_PACKET;
po->num = protocol;
- sk->destruct = packet_sock_destruct;
+ sk->sk_destruct = packet_sock_destruct;
atomic_inc(&packet_socks_nr);
/*
@@ -985,7 +989,7 @@ static int packet_create(struct socket *sock, int protocol)
}
write_lock_bh(&packet_sklist_lock);
- sk->next = packet_sklist;
+ sk->sk_next = packet_sklist;
packet_sklist = sk;
sock_hold(sk);
write_unlock_bh(&packet_sklist_lock);
@@ -1342,10 +1346,10 @@ int packet_getsockopt(struct socket *sock, int level, int optname,
if (len > sizeof(struct tpacket_stats))
len = sizeof(struct tpacket_stats);
- spin_lock_bh(&sk->receive_queue.lock);
+ spin_lock_bh(&sk->sk_receive_queue.lock);
st = po->stats;
memset(&po->stats, 0, sizeof(st));
- spin_unlock_bh(&sk->receive_queue.lock);
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
st.tp_packets += st.tp_drops;
if (copy_to_user(optval, &st, len))
@@ -1368,7 +1372,7 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void
struct net_device *dev = (struct net_device*)data;
read_lock(&packet_sklist_lock);
- for (sk = packet_sklist; sk; sk = sk->next) {
+ for (sk = packet_sklist; sk; sk = sk->sk_next) {
struct packet_opt *po = pkt_sk(sk);
switch (msg) {
@@ -1380,9 +1384,9 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void
__dev_remove_pack(&po->prot_hook);
__sock_put(sk);
po->running = 0;
- sk->err = ENETDOWN;
+ sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
- sk->error_report(sk);
+ sk->sk_error_report(sk);
}
if (msg == NETDEV_UNREGISTER) {
po->ifindex = -1;
@@ -1424,7 +1428,7 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
switch(cmd) {
case SIOCOUTQ:
{
- int amount = atomic_read(&sk->wmem_alloc);
+ int amount = atomic_read(&sk->sk_wmem_alloc);
return put_user(amount, (int *)arg);
}
case SIOCINQ:
@@ -1432,17 +1436,17 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
struct sk_buff *skb;
int amount = 0;
- spin_lock_bh(&sk->receive_queue.lock);
- skb = skb_peek(&sk->receive_queue);
+ spin_lock_bh(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
if (skb)
amount = skb->len;
- spin_unlock_bh(&sk->receive_queue.lock);
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int *)arg);
}
case SIOCGSTAMP:
- if (sk->stamp.tv_sec==0)
+ if (!sk->sk_stamp.tv_sec)
return -ENOENT;
- if (copy_to_user((void *)arg, &sk->stamp,
+ if (copy_to_user((void *)arg, &sk->sk_stamp,
sizeof(struct timeval)))
return -EFAULT;
break;
@@ -1482,14 +1486,14 @@ unsigned int packet_poll(struct file * file, struct socket *sock, poll_table *wa
struct packet_opt *po = pkt_sk(sk);
unsigned int mask = datagram_poll(file, sock, wait);
- spin_lock_bh(&sk->receive_queue.lock);
+ spin_lock_bh(&sk->sk_receive_queue.lock);
if (po->iovec) {
unsigned last = po->head ? po->head-1 : po->iovmax;
if (po->iovec[last]->tp_status)
mask |= POLLIN | POLLRDNORM;
}
- spin_unlock_bh(&sk->receive_queue.lock);
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
return mask;
}
@@ -1635,20 +1639,20 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
err = 0;
#define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
- spin_lock_bh(&sk->receive_queue.lock);
+ spin_lock_bh(&sk->sk_receive_queue.lock);
pg_vec = XC(po->pg_vec, pg_vec);
io_vec = XC(po->iovec, io_vec);
po->iovmax = req->tp_frame_nr-1;
po->head = 0;
po->frame_size = req->tp_frame_size;
- spin_unlock_bh(&sk->receive_queue.lock);
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
order = XC(po->pg_vec_order, order);
req->tp_block_nr = XC(po->pg_vec_len, req->tp_block_nr);
po->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
po->prot_hook.func = po->iovec ? tpacket_rcv : packet_rcv;
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
#undef XC
if (atomic_read(&po->mapped))
printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped));
@@ -1778,17 +1782,17 @@ static int packet_read_proc(char *buffer, char **start, off_t offset,
read_lock(&packet_sklist_lock);
- for (s = packet_sklist; s; s = s->next) {
+ for (s = packet_sklist; s; s = s->sk_next) {
struct packet_opt *po = pkt_sk(s);
len+=sprintf(buffer+len,"%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu",
s,
- atomic_read(&s->refcnt),
- s->type,
+ atomic_read(&s->sk_refcnt),
+ s->sk_type,
ntohs(po->num),
po->ifindex,
po->running,
- atomic_read(&s->rmem_alloc),
+ atomic_read(&s->sk_rmem_alloc),
sock_i_uid(s),
sock_i_ino(s)
);
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 9e8026d5e876..192e19476a53 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -155,19 +155,19 @@ static void rose_remove_socket(struct sock *sk)
spin_lock_bh(&rose_list_lock);
if ((s = rose_list) == sk) {
- rose_list = s->next;
+ rose_list = s->sk_next;
spin_unlock_bh(&rose_list_lock);
return;
}
- while (s != NULL && s->next != NULL) {
- if (s->next == sk) {
- s->next = sk->next;
+ while (s && s->sk_next) {
+ if (s->sk_next == sk) {
+ s->sk_next = sk->sk_next;
spin_unlock_bh(&rose_list_lock);
return;
}
- s = s->next;
+ s = s->sk_next;
}
spin_unlock_bh(&rose_list_lock);
}
@@ -181,7 +181,7 @@ void rose_kill_by_neigh(struct rose_neigh *neigh)
struct sock *s;
spin_lock_bh(&rose_list_lock);
- for (s = rose_list; s != NULL; s = s->next) {
+ for (s = rose_list; s; s = s->sk_next) {
rose_cb *rose = rose_sk(s);
if (rose->neighbour == neigh) {
@@ -201,7 +201,7 @@ static void rose_kill_by_device(struct net_device *dev)
struct sock *s;
spin_lock_bh(&rose_list_lock);
- for (s = rose_list; s != NULL; s = s->next) {
+ for (s = rose_list; s; s = s->sk_next) {
rose_cb *rose = rose_sk(s);
if (rose->device == dev) {
@@ -244,7 +244,7 @@ static void rose_insert_socket(struct sock *sk)
{
spin_lock_bh(&rose_list_lock);
- sk->next = rose_list;
+ sk->sk_next = rose_list;
rose_list = sk;
spin_unlock_bh(&rose_list_lock);
}
@@ -258,23 +258,23 @@ static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
struct sock *s;
spin_lock_bh(&rose_list_lock);
- for (s = rose_list; s != NULL; s = s->next) {
+ for (s = rose_list; s; s = s->sk_next) {
rose_cb *rose = rose_sk(s);
if (!rosecmp(&rose->source_addr, addr) &&
!ax25cmp(&rose->source_call, call) &&
- !rose->source_ndigis && s->state == TCP_LISTEN) {
+ !rose->source_ndigis && s->sk_state == TCP_LISTEN) {
spin_unlock_bh(&rose_list_lock);
return s;
}
}
- for (s = rose_list; s != NULL; s = s->next) {
+ for (s = rose_list; s; s = s->sk_next) {
rose_cb *rose = rose_sk(s);
if (!rosecmp(&rose->source_addr, addr) &&
!ax25cmp(&rose->source_call, &null_ax25_address) &&
- s->state == TCP_LISTEN) {
+ s->sk_state == TCP_LISTEN) {
spin_unlock_bh(&rose_list_lock);
return s;
}
@@ -292,7 +292,7 @@ struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
struct sock *s;
spin_lock_bh(&rose_list_lock);
- for (s = rose_list; s != NULL; s = s->next) {
+ for (s = rose_list; s; s = s->sk_next) {
rose_cb *rose = rose_sk(s);
if (rose->lci == lci && rose->neighbour == neigh) {
@@ -355,7 +355,7 @@ void rose_destroy_socket(struct sock *sk)
rose_clear_queues(sk); /* Flush the queues */
- while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (skb->sk != sk) { /* A pending connection */
/* Queue the unaccepted socket for death */
sock_set_flag(skb->sk, SOCK_DEAD);
@@ -366,13 +366,14 @@ void rose_destroy_socket(struct sock *sk)
kfree_skb(skb);
}
- if (atomic_read(&sk->wmem_alloc) != 0 || atomic_read(&sk->rmem_alloc) != 0) {
+ if (atomic_read(&sk->sk_wmem_alloc) ||
+ atomic_read(&sk->sk_rmem_alloc)) {
/* Defer: outstanding buffers */
- init_timer(&sk->timer);
- sk->timer.expires = jiffies + 10 * HZ;
- sk->timer.function = rose_destroy_timer;
- sk->timer.data = (unsigned long)sk;
- add_timer(&sk->timer);
+ init_timer(&sk->sk_timer);
+ sk->sk_timer.expires = jiffies + 10 * HZ;
+ sk->sk_timer.function = rose_destroy_timer;
+ sk->sk_timer.data = (unsigned long)sk;
+ add_timer(&sk->sk_timer);
} else
sk_free(sk);
}
@@ -504,15 +505,15 @@ static int rose_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
- if (sk->state != TCP_LISTEN) {
+ if (sk->sk_state != TCP_LISTEN) {
rose_cb *rose = rose_sk(sk);
rose->dest_ndigis = 0;
memset(&rose->dest_addr, 0, ROSE_ADDR_LEN);
memset(&rose->dest_call, 0, AX25_ADDR_LEN);
memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
- sk->max_ack_backlog = backlog;
- sk->state = TCP_LISTEN;
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_state = TCP_LISTEN;
return 0;
}
@@ -541,7 +542,7 @@ static int rose_create(struct socket *sock, int protocol)
#endif
sock->ops = &rose_proto_ops;
- sk->protocol = protocol;
+ sk->sk_protocol = protocol;
init_timer(&rose->timer);
init_timer(&rose->idletimer);
@@ -562,7 +563,7 @@ static struct sock *rose_make_new(struct sock *osk)
struct sock *sk;
rose_cb *rose, *orose;
- if (osk->type != SOCK_SEQPACKET)
+ if (osk->sk_type != SOCK_SEQPACKET)
return NULL;
if ((sk = rose_alloc_sock()) == NULL)
@@ -578,16 +579,16 @@ static struct sock *rose_make_new(struct sock *osk)
rose->fraglen = 0;
#endif
- sk->type = osk->type;
- sk->socket = osk->socket;
- sk->priority = osk->priority;
- sk->protocol = osk->protocol;
- sk->rcvbuf = osk->rcvbuf;
- sk->sndbuf = osk->sndbuf;
- sk->debug = osk->debug;
- sk->state = TCP_ESTABLISHED;
- sk->sleep = osk->sleep;
- sk->zapped = osk->zapped;
+ sk->sk_type = osk->sk_type;
+ sk->sk_socket = osk->sk_socket;
+ sk->sk_priority = osk->sk_priority;
+ sk->sk_protocol = osk->sk_protocol;
+ sk->sk_rcvbuf = osk->sk_rcvbuf;
+ sk->sk_sndbuf = osk->sk_sndbuf;
+ sk->sk_debug = osk->sk_debug;
+ sk->sk_state = TCP_ESTABLISHED;
+ sk->sk_sleep = osk->sk_sleep;
+ sk->sk_zapped = osk->sk_zapped;
init_timer(&rose->timer);
init_timer(&rose->idletimer);
@@ -635,9 +636,9 @@ static int rose_release(struct socket *sock)
rose_write_internal(sk, ROSE_CLEAR_REQUEST);
rose_start_t3timer(sk);
rose->state = ROSE_STATE_2;
- sk->state = TCP_CLOSE;
- sk->shutdown |= SEND_SHUTDOWN;
- sk->state_change(sk);
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
sock_set_flag(sk, SOCK_DESTROY);
break;
@@ -647,7 +648,7 @@ static int rose_release(struct socket *sock)
}
sock->sk = NULL;
- sk->socket = NULL; /* Not used, but we should do this. **/
+ sk->sk_socket = NULL; /* Not used, but we should do this. **/
return 0;
}
@@ -661,7 +662,7 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
ax25_address *user, *source;
int n;
- if (sk->zapped == 0)
+ if (!sk->sk_zapped)
return -EINVAL;
if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
@@ -706,7 +707,7 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
rose_insert_socket(sk);
- sk->zapped = 0;
+ sk->sk_zapped = 0;
SOCK_DEBUG(sk, "ROSE: socket is bound\n");
return 0;
}
@@ -721,20 +722,20 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
struct net_device *dev;
int n;
- if (sk->state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
+ if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sock->state = SS_CONNECTED;
return 0; /* Connect completed during a ERESTARTSYS event */
}
- if (sk->state == TCP_CLOSE && sock->state == SS_CONNECTING) {
+ if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
sock->state = SS_UNCONNECTED;
return -ECONNREFUSED;
}
- if (sk->state == TCP_ESTABLISHED)
+ if (sk->sk_state == TCP_ESTABLISHED)
return -EISCONN; /* No reconnect on a seqpacket socket */
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
@@ -762,8 +763,8 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
if (!rose->lci)
return -ENETUNREACH;
- if (sk->zapped) { /* Must bind first - autobinding in this may or may not work */
- sk->zapped = 0;
+ if (sk->sk_zapped) { /* Must bind first - autobinding in this may or may not work */
+ sk->sk_zapped = 0;
if ((dev = rose_dev_first()) == NULL)
return -ENETUNREACH;
@@ -795,7 +796,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
/* Move to connecting socket, start sending Connect Requests */
sock->state = SS_CONNECTING;
- sk->state = TCP_SYN_SENT;
+ sk->sk_state = TCP_SYN_SENT;
rose->state = ROSE_STATE_1;
@@ -806,21 +807,21 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
rose_start_t1timer(sk);
/* Now the loop */
- if (sk->state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
+ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
return -EINPROGRESS;
/*
* A Connect Ack with Choke or timeout or failed routing will go to
* closed.
*/
- if (sk->state == TCP_SYN_SENT) {
+ if (sk->sk_state == TCP_SYN_SENT) {
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
- add_wait_queue(sk->sleep, &wait);
+ add_wait_queue(sk->sk_sleep, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- if (sk->state != TCP_SYN_SENT)
+ if (sk->sk_state != TCP_SYN_SENT)
break;
if (!signal_pending(tsk)) {
schedule();
@@ -829,10 +830,10 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
return -ERESTARTSYS;
}
current->state = TASK_RUNNING;
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
}
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
sock->state = SS_UNCONNECTED;
return sock_error(sk); /* Always set at this point */
}
@@ -855,12 +856,12 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
return -EINVAL;
lock_sock(sk);
- if (sk->type != SOCK_SEQPACKET) {
+ if (sk->sk_type != SOCK_SEQPACKET) {
err = -EOPNOTSUPP;
goto out;
}
- if (sk->state != TCP_LISTEN) {
+ if (sk->sk_state != TCP_LISTEN) {
err = -EINVAL;
goto out;
}
@@ -869,9 +870,9 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
* The write queue this time is holding sockets ready to use
* hooked into the SABM we saved
*/
- add_wait_queue(sk->sleep, &wait);
+ add_wait_queue(sk->sk_sleep, &wait);
for (;;) {
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
break;
@@ -887,17 +888,17 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
return -ERESTARTSYS;
}
current->state = TASK_RUNNING;
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
newsk = skb->sk;
- newsk->pair = NULL;
- newsk->socket = newsock;
- newsk->sleep = &newsock->wait;
+ newsk->sk_pair = NULL;
+ newsk->sk_socket = newsock;
+ newsk->sk_sleep = &newsock->wait;
/* Now attach up the new socket */
skb->sk = NULL;
kfree_skb(skb);
- sk->ack_backlog--;
+ sk->sk_ack_backlog--;
newsock->sk = newsk;
out:
@@ -915,7 +916,7 @@ static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
int n;
if (peer != 0) {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
srose->srose_family = AF_ROSE;
srose->srose_addr = rose->dest_addr;
@@ -963,13 +964,14 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros
/*
* We can't accept the Call Request.
*/
- if (sk == NULL || sk->ack_backlog == sk->max_ack_backlog || (make = rose_make_new(sk)) == NULL) {
+ if (!sk || sk->sk_ack_backlog == sk->sk_max_ack_backlog ||
+ (make = rose_make_new(sk)) == NULL) {
rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120);
return 0;
}
skb->sk = make;
- make->state = TCP_ESTABLISHED;
+ make->sk_state = TCP_ESTABLISHED;
make_rose = rose_sk(make);
make_rose->lci = lci;
@@ -1002,17 +1004,17 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros
make_rose->va = 0;
make_rose->vr = 0;
make_rose->vl = 0;
- sk->ack_backlog++;
- make->pair = sk;
+ sk->sk_ack_backlog++;
+ make->sk_pair = sk;
rose_insert_socket(make);
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
rose_start_heartbeat(make);
if (!sock_flag(sk, SOCK_DEAD))
- sk->data_ready(sk, skb->len);
+ sk->sk_data_ready(sk, skb->len);
return 1;
}
@@ -1032,10 +1034,10 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR))
return -EINVAL;
- if (sk->zapped)
+ if (sk->sk_zapped)
return -EADDRNOTAVAIL;
- if (sk->shutdown & SEND_SHUTDOWN) {
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
return -EPIPE;
}
@@ -1062,7 +1064,7 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
if (srose.srose_family != AF_ROSE)
return -EINVAL;
} else {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
srose.srose_family = AF_ROSE;
@@ -1121,7 +1123,7 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
SOCK_DEBUG(sk, "ROSE: Transmitting buffer\n");
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
kfree_skb(skb);
return -ENOTCONN;
}
@@ -1163,16 +1165,16 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
if (skb->len > 0)
skbn->data[2] |= M_BIT;
- skb_queue_tail(&sk->write_queue, skbn); /* Throw it on the queue */
+ skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
}
skb->free = 1;
kfree_skb(skb, FREE_WRITE);
} else {
- skb_queue_tail(&sk->write_queue, skb); /* Throw it on the queue */
+ skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */
}
#else
- skb_queue_tail(&sk->write_queue, skb); /* Shove it onto the queue */
+ skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */
#endif
rose_kick(sk);
@@ -1196,7 +1198,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
*/
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
/* Now we can treat all alike */
@@ -1255,7 +1257,7 @@ static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
switch (cmd) {
case TIOCOUTQ: {
long amount;
- amount = sk->sndbuf - atomic_read(&sk->wmem_alloc);
+ amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
if (amount < 0)
amount = 0;
return put_user(amount, (unsigned int *)arg);
@@ -1265,16 +1267,17 @@ static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
struct sk_buff *skb;
long amount = 0L;
/* These two are safe on a single CPU system as only user tasks fiddle here */
- if ((skb = skb_peek(&sk->receive_queue)) != NULL)
+ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
return put_user(amount, (unsigned int *)arg);
}
case SIOCGSTAMP:
if (sk != NULL) {
- if (sk->stamp.tv_sec == 0)
+ if (!sk->sk_stamp.tv_sec)
return -ENOENT;
- return copy_to_user((void *)arg, &sk->stamp, sizeof(struct timeval)) ? -EFAULT : 0;
+ return copy_to_user((void *)arg, &sk->sk_stamp,
+ sizeof(struct timeval)) ? -EFAULT : 0;
}
return -EINVAL;
@@ -1359,7 +1362,7 @@ static int rose_get_info(char *buffer, char **start, off_t offset, int length)
len += sprintf(buffer, "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n");
- for (s = rose_list; s != NULL; s = s->next) {
+ for (s = rose_list; s; s = s->sk_next) {
rose_cb *rose = rose_sk(s);
if ((dev = rose->device) == NULL)
@@ -1393,9 +1396,9 @@ static int rose_get_info(char *buffer, char **start, off_t offset, int length)
rose->hb / HZ,
ax25_display_timer(&rose->idletimer) / (60 * HZ),
rose->idle / (60 * HZ),
- atomic_read(&s->wmem_alloc),
- atomic_read(&s->rmem_alloc),
- s->socket != NULL ? SOCK_INODE(s->socket)->i_ino : 0L);
+ atomic_read(&s->sk_wmem_alloc),
+ atomic_read(&s->sk_rmem_alloc),
+ s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
pos = begin + len;
diff --git a/net/rose/rose_in.c b/net/rose/rose_in.c
index e60220284cd2..c6aa8fba6a38 100644
--- a/net/rose/rose_in.c
+++ b/net/rose/rose_in.c
@@ -53,9 +53,9 @@ static int rose_state1_machine(struct sock *sk, struct sk_buff *skb, int framety
rose->vr = 0;
rose->vl = 0;
rose->state = ROSE_STATE_3;
- sk->state = TCP_ESTABLISHED;
+ sk->sk_state = TCP_ESTABLISHED;
if (!sock_flag(sk, SOCK_DEAD))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
break;
case ROSE_CLEAR_REQUEST:
@@ -183,7 +183,8 @@ static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int framety
rose_stop_idletimer(sk);
break;
}
- if (atomic_read(&sk->rmem_alloc) > (sk->rcvbuf / 2))
+ if (atomic_read(&sk->sk_rmem_alloc) >
+ (sk->sk_rcvbuf / 2))
rose->condition |= ROSE_COND_OWN_RX_BUSY;
}
/*
diff --git a/net/rose/rose_out.c b/net/rose/rose_out.c
index 3583963560ed..5e5d4c4b459c 100644
--- a/net/rose/rose_out.c
+++ b/net/rose/rose_out.c
@@ -58,7 +58,7 @@ void rose_kick(struct sock *sk)
if (rose->condition & ROSE_COND_PEER_RX_BUSY)
return;
- if (skb_peek(&sk->write_queue) == NULL)
+ if (!skb_peek(&sk->sk_write_queue))
return;
start = (skb_peek(&rose->ack_queue) == NULL) ? rose->va : rose->vs;
@@ -74,11 +74,11 @@ void rose_kick(struct sock *sk)
* the window is full.
*/
- skb = skb_dequeue(&sk->write_queue);
+ skb = skb_dequeue(&sk->sk_write_queue);
do {
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
- skb_queue_head(&sk->write_queue, skb);
+ skb_queue_head(&sk->sk_write_queue, skb);
break;
}
@@ -96,7 +96,8 @@ void rose_kick(struct sock *sk)
*/
skb_queue_tail(&rose->ack_queue, skb);
- } while (rose->vs != end && (skb = skb_dequeue(&sk->write_queue)) != NULL);
+ } while (rose->vs != end &&
+ (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
rose->vl = rose->vr;
rose->condition &= ~ROSE_COND_ACK_PENDING;
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 22af35f5cd28..88817bb1344f 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -907,11 +907,11 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
rose->neighbour = NULL;
rose->lci = 0;
rose->state = ROSE_STATE_0;
- sk->state = TCP_CLOSE;
- sk->err = 0;
- sk->shutdown |= SEND_SHUTDOWN;
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_err = 0;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
- sk->state_change(sk);
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
}
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
index 924b56c55ae3..4c3f3c7f08a8 100644
--- a/net/rose/rose_subr.c
+++ b/net/rose/rose_subr.c
@@ -33,7 +33,7 @@
*/
void rose_clear_queues(struct sock *sk)
{
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_write_queue);
skb_queue_purge(&rose_sk(sk)->ack_queue);
}
@@ -70,7 +70,7 @@ void rose_requeue_frames(struct sock *sk)
*/
while ((skb = skb_dequeue(&rose_sk(sk)->ack_queue)) != NULL) {
if (skb_prev == NULL)
- skb_queue_head(&sk->write_queue, skb);
+ skb_queue_head(&sk->sk_write_queue, skb);
else
skb_append(skb_prev, skb);
skb_prev = skb;
@@ -506,12 +506,12 @@ void rose_disconnect(struct sock *sk, int reason, int cause, int diagnostic)
if (diagnostic != -1)
rose->diagnostic = diagnostic;
- sk->state = TCP_CLOSE;
- sk->err = reason;
- sk->shutdown |= SEND_SHUTDOWN;
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_err = reason;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
- sk->state_change(sk);
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
}
diff --git a/net/rose/rose_timer.c b/net/rose/rose_timer.c
index 42bfc0f1b4f0..638bb1f58560 100644
--- a/net/rose/rose_timer.c
+++ b/net/rose/rose_timer.c
@@ -35,13 +35,13 @@ static void rose_idletimer_expiry(unsigned long);
void rose_start_heartbeat(struct sock *sk)
{
- del_timer(&sk->timer);
+ del_timer(&sk->sk_timer);
- sk->timer.data = (unsigned long)sk;
- sk->timer.function = &rose_heartbeat_expiry;
- sk->timer.expires = jiffies + 5 * HZ;
+ sk->sk_timer.data = (unsigned long)sk;
+ sk->sk_timer.function = &rose_heartbeat_expiry;
+ sk->sk_timer.expires = jiffies + 5 * HZ;
- add_timer(&sk->timer);
+ add_timer(&sk->sk_timer);
}
void rose_start_t1timer(struct sock *sk)
@@ -113,7 +113,7 @@ void rose_start_idletimer(struct sock *sk)
void rose_stop_heartbeat(struct sock *sk)
{
- del_timer(&sk->timer);
+ del_timer(&sk->sk_timer);
}
void rose_stop_timer(struct sock *sk)
@@ -137,7 +137,7 @@ static void rose_heartbeat_expiry(unsigned long param)
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (sock_flag(sk, SOCK_DESTROY) ||
- (sk->state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
+ (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
rose_destroy_socket(sk);
return;
}
@@ -147,7 +147,7 @@ static void rose_heartbeat_expiry(unsigned long param)
/*
* Check for the state of the receive buffer.
*/
- if (atomic_read(&sk->rmem_alloc) < (sk->rcvbuf / 2) &&
+ if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) &&
(rose->condition & ROSE_COND_OWN_RX_BUSY)) {
rose->condition &= ~ROSE_COND_OWN_RX_BUSY;
rose->condition &= ~ROSE_COND_ACK_PENDING;
@@ -204,12 +204,12 @@ static void rose_idletimer_expiry(unsigned long param)
rose_start_t3timer(sk);
- sk->state = TCP_CLOSE;
- sk->err = 0;
- sk->shutdown |= SEND_SHUTDOWN;
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_err = 0;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
- sk->state_change(sk);
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
bh_unlock_sock(sk);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 13cffde90aaa..94c349128df6 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -508,7 +508,8 @@ static void sch_atm_dequeue(unsigned long data)
ATM_SKB(skb)->vcc = flow->vcc;
memcpy(skb_push(skb,flow->hdr_len),flow->hdr,
flow->hdr_len);
- atomic_add(skb->truesize,&flow->vcc->sk->wmem_alloc);
+ atomic_add(skb->truesize,
+ &flow->vcc->sk->sk_wmem_alloc);
/* atm.atm_options are already set by atm_tc_enqueue */
(void) flow->vcc->send(flow->vcc,skb);
}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index dd7eca949ce3..ac69c28813dc 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -177,10 +177,10 @@ struct sctp_association *sctp_association_init(struct sctp_association *asoc,
* RFC 6 - A SCTP receiver MUST be able to receive a minimum of
* 1500 bytes in one SCTP packet.
*/
- if (sk->rcvbuf < SCTP_DEFAULT_MINWINDOW)
+ if (sk->sk_rcvbuf < SCTP_DEFAULT_MINWINDOW)
asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
else
- asoc->rwnd = sk->rcvbuf;
+ asoc->rwnd = sk->sk_rcvbuf;
asoc->a_rwnd = asoc->rwnd;
@@ -299,7 +299,7 @@ void sctp_association_free(struct sctp_association *asoc)
/* Decrement the backlog value for a TCP-style listening socket. */
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
- sk->ack_backlog--;
+ sk->sk_ack_backlog--;
/* Mark as dead, so other users can know this structure is
* going away.
@@ -857,7 +857,7 @@ void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
/* Decrement the backlog value for a TCP-style socket. */
if (sctp_style(oldsk, TCP))
- oldsk->ack_backlog--;
+ oldsk->sk_ack_backlog--;
/* Release references to the old endpoint and the sock. */
sctp_endpoint_put(assoc->ep);
@@ -1026,7 +1026,7 @@ static inline int sctp_peer_needs_update(struct sctp_association *asoc)
case SCTP_STATE_SHUTDOWN_RECEIVED:
if ((asoc->rwnd > asoc->a_rwnd) &&
((asoc->rwnd - asoc->a_rwnd) >=
- min_t(__u32, (asoc->base.sk->rcvbuf >> 1), asoc->pmtu)))
+ min_t(__u32, (asoc->base.sk->sk_rcvbuf >> 1), asoc->pmtu)))
return 1;
break;
default:
@@ -1109,7 +1109,7 @@ int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, int gfp)
* the endpoint.
*/
scope = sctp_scope(&asoc->peer.active_path->ipaddr);
- flags = (PF_INET6 == asoc->base.sk->family) ? SCTP_ADDR6_ALLOWED : 0;
+ flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
if (asoc->peer.ipv4_address)
flags |= SCTP_ADDR4_PEERSUPP;
if (asoc->peer.ipv6_address)
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index a7bba493e301..2f5c5084f8a8 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -151,12 +151,12 @@ struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
/* FIXME - Should the min and max window size be configurable
* sysctl parameters as opposed to be constants?
*/
- sk->rcvbuf = SCTP_DEFAULT_MAXWINDOW;
- sk->sndbuf = SCTP_DEFAULT_MAXWINDOW * 2;
+ sk->sk_rcvbuf = SCTP_DEFAULT_MAXWINDOW;
+ sk->sk_sndbuf = SCTP_DEFAULT_MAXWINDOW * 2;
/* Use SCTP specific send buffer space queues. */
- sk->write_space = sctp_write_space;
- sk->use_write_queue = 1;
+ sk->sk_write_space = sctp_write_space;
+ sk->sk_use_write_queue = 1;
/* Initialize the secret key used with cookie. */
get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE);
@@ -178,7 +178,7 @@ void sctp_endpoint_add_asoc(struct sctp_endpoint *ep,
/* Increment the backlog value for a TCP-style listening socket. */
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
- sk->ack_backlog++;
+ sk->sk_ack_backlog++;
}
/* Free the endpoint structure. Delay cleanup until
@@ -195,7 +195,7 @@ void sctp_endpoint_destroy(struct sctp_endpoint *ep)
{
SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
- ep->base.sk->state = SCTP_SS_CLOSED;
+ ep->base.sk->sk_state = SCTP_SS_CLOSED;
/* Unlink this endpoint, so we can't find it again! */
sctp_unhash_endpoint(ep);
@@ -209,7 +209,7 @@ void sctp_endpoint_destroy(struct sctp_endpoint *ep)
sctp_bind_addr_free(&ep->base.bind_addr);
/* Remove and free the port */
- if (ep->base.sk->prev != NULL)
+ if (ep->base.sk->sk_prev)
sctp_put_port(ep->base.sk);
/* Give up our hold on the sock. */
diff --git a/net/sctp/input.c b/net/sctp/input.c
index e0e1d41acf89..f5483d72c288 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -447,10 +447,10 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
inet = inet_sk(sk);
if (!sock_owned_by_user(sk) && inet->recverr) {
- sk->err = err;
- sk->error_report(sk);
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
} else { /* Only an error on timeout */
- sk->err_soft = err;
+ sk->sk_err_soft = err;
}
out_unlock:
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 9d4583e4824e..7cb022c291c2 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -123,10 +123,10 @@ void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
np = inet6_sk(sk);
icmpv6_err_convert(type, code, &err);
if (!sock_owned_by_user(sk) && np->recverr) {
- sk->err = err;
- sk->error_report(sk);
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
} else { /* Only an error on timeout */
- sk->err_soft = err;
+ sk->sk_err_soft = err;
}
out_unlock:
@@ -146,7 +146,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport,
memset(&fl, 0, sizeof(fl));
- fl.proto = sk->protocol;
+ fl.proto = sk->sk_protocol;
/* Fill in the dest address from the route entry passed with the skb
* and the source address from the transport.
@@ -159,7 +159,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport,
if (ipv6_addr_type(&fl.fl6_src) & IPV6_ADDR_LINKLOCAL)
fl.oif = transport->saddr.v6.sin6_scope_id;
else
- fl.oif = sk->bound_dev_if;
+ fl.oif = sk->sk_bound_dev_if;
fl.fl_ip_sport = inet_sk(sk)->sport;
fl.fl_ip_dport = transport->ipaddr.v6.sin6_port;
@@ -366,13 +366,13 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk)
addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr;
}
-/* Initialize sk->rcv_saddr from sctp_addr. */
+/* Initialize sk->sk_rcv_saddr from sctp_addr. */
static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
{
inet6_sk(sk)->rcv_saddr = addr->v6.sin6_addr;
}
-/* Initialize sk->daddr from sctp_addr. */
+/* Initialize sk->sk_daddr from sctp_addr. */
static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
{
inet6_sk(sk)->daddr = addr->v6.sin6_addr;
@@ -500,25 +500,25 @@ struct sock *sctp_v6_create_accept_sk(struct sock *sk,
struct sctp6_sock *newsctp6sk;
newsk = sk_alloc(PF_INET6, GFP_KERNEL, sizeof(struct sctp6_sock),
- sk->slab);
+ sk->sk_slab);
if (!newsk)
goto out;
sock_init_data(NULL, newsk);
sk_set_owner(newsk, THIS_MODULE);
- newsk->type = SOCK_STREAM;
+ newsk->sk_type = SOCK_STREAM;
- newsk->prot = sk->prot;
- newsk->no_check = sk->no_check;
- newsk->reuse = sk->reuse;
+ newsk->sk_prot = sk->sk_prot;
+ newsk->sk_no_check = sk->sk_no_check;
+ newsk->sk_reuse = sk->sk_reuse;
- newsk->destruct = inet_sock_destruct;
- newsk->zapped = 0;
- newsk->family = PF_INET6;
- newsk->protocol = IPPROTO_SCTP;
- newsk->backlog_rcv = sk->prot->backlog_rcv;
- newsk->shutdown = sk->shutdown;
+ newsk->sk_destruct = inet_sock_destruct;
+ newsk->sk_zapped = 0;
+ newsk->sk_family = PF_INET6;
+ newsk->sk_protocol = IPPROTO_SCTP;
+ newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
+ newsk->sk_shutdown = sk->sk_shutdown;
newsctp6sk = (struct sctp6_sock *)newsk;
newsctp6sk->pinet6 = &newsctp6sk->inet6;
@@ -556,7 +556,7 @@ struct sock *sctp_v6_create_accept_sk(struct sock *sk,
atomic_inc(&inet_sock_nr);
#endif
- if (0 != newsk->prot->init(newsk)) {
+ if (newsk->sk_prot->init(newsk)) {
inet_sock_release(newsk);
newsk = NULL;
}
@@ -716,8 +716,8 @@ static int sctp_inet6_bind_verify(struct sctp_opt *opt, union sctp_addr *addr)
*/
if (addr->v6.sin6_scope_id)
- sk->bound_dev_if = addr->v6.sin6_scope_id;
- if (!sk->bound_dev_if)
+ sk->sk_bound_dev_if = addr->v6.sin6_scope_id;
+ if (!sk->sk_bound_dev_if)
return 0;
}
af = opt->pf->af;
@@ -746,8 +746,8 @@ static int sctp_inet6_send_verify(struct sctp_opt *opt, union sctp_addr *addr)
*/
if (addr->v6.sin6_scope_id)
- sk->bound_dev_if = addr->v6.sin6_scope_id;
- if (!sk->bound_dev_if)
+ sk->sk_bound_dev_if = addr->v6.sin6_scope_id;
+ if (!sk->sk_bound_dev_if)
return 0;
}
af = opt->pf->af;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 494ab8cec399..bc321f6019fd 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -138,7 +138,7 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
if (!packet->has_cookie_echo) {
error = sctp_packet_transmit(packet);
if (error < 0)
- chunk->skb->sk->err = -error;
+ chunk->skb->sk->sk_err = -error;
/* If we have an empty packet, then we can NOT ever
* return PMTU_FULL.
@@ -429,7 +429,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
/* Set up the IP options. */
/* BUG: not implemented
- * For v4 this all lives somewhere in sk->opt...
+ * For v4 this all lives somewhere in sk->sk_opt...
*/
/* Dump that on IP! */
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 11c584c5e1bc..1bf3a7509eaa 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -492,7 +492,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
error = sctp_outq_flush(q, /* rtx_timeout */ 1);
if (error)
- q->asoc->base.sk->err = -error;
+ q->asoc->base.sk->sk_err = -error;
}
/*
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index c78af3861c85..1b65b4eea3fd 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -267,13 +267,13 @@ static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk)
addr->v4.sin_addr.s_addr = inet_sk(sk)->rcv_saddr;
}
-/* Initialize sk->rcv_saddr from sctp_addr. */
+/* Initialize sk->sk_rcv_saddr from sctp_addr. */
static void sctp_v4_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
{
inet_sk(sk)->rcv_saddr = addr->v4.sin_addr.s_addr;
}
-/* Initialize sk->daddr from sctp_addr. */
+/* Initialize sk->sk_daddr from sctp_addr. */
static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
{
inet_sk(sk)->daddr = addr->v4.sin_addr.s_addr;
@@ -512,25 +512,25 @@ struct sock *sctp_v4_create_accept_sk(struct sock *sk,
struct inet_opt *newinet;
newsk = sk_alloc(PF_INET, GFP_KERNEL, sizeof(struct sctp_sock),
- sk->slab);
+ sk->sk_slab);
if (!newsk)
goto out;
sock_init_data(NULL, newsk);
sk_set_owner(newsk, THIS_MODULE);
- newsk->type = SOCK_STREAM;
+ newsk->sk_type = SOCK_STREAM;
- newsk->prot = sk->prot;
- newsk->no_check = sk->no_check;
- newsk->reuse = sk->reuse;
- newsk->shutdown = sk->shutdown;
+ newsk->sk_prot = sk->sk_prot;
+ newsk->sk_no_check = sk->sk_no_check;
+ newsk->sk_reuse = sk->sk_reuse;
+ newsk->sk_shutdown = sk->sk_shutdown;
- newsk->destruct = inet_sock_destruct;
- newsk->zapped = 0;
- newsk->family = PF_INET;
- newsk->protocol = IPPROTO_SCTP;
- newsk->backlog_rcv = sk->prot->backlog_rcv;
+ newsk->sk_destruct = inet_sock_destruct;
+ newsk->sk_zapped = 0;
+ newsk->sk_family = PF_INET;
+ newsk->sk_protocol = IPPROTO_SCTP;
+ newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
newinet = inet_sk(newsk);
@@ -555,7 +555,7 @@ struct sock *sctp_v4_create_accept_sk(struct sock *sk,
atomic_inc(&inet_sock_nr);
#endif
- if (0 != newsk->prot->init(newsk)) {
+ if (newsk->sk_prot->init(newsk)) {
inet_sock_release(newsk);
newsk = NULL;
}
@@ -601,7 +601,7 @@ int sctp_ctl_sock_init(void)
"SCTP: Failed to create the SCTP control socket.\n");
return err;
}
- sctp_ctl_socket->sk->allocation = GFP_ATOMIC;
+ sctp_ctl_socket->sk->sk_allocation = GFP_ATOMIC;
inet_sk(sctp_ctl_socket->sk)->uc_ttl = -1;
return 0;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 30b0a3008700..520260340f4b 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1895,7 +1895,7 @@ int sctp_process_param(struct sctp_association *asoc, union sctp_params param,
*/
switch (param.p->type) {
case SCTP_PARAM_IPV6_ADDRESS:
- if( PF_INET6 != asoc->base.sk->family)
+ if (PF_INET6 != asoc->base.sk->sk_family)
break;
/* Fall through. */
case SCTP_PARAM_IPV4_ADDRESS:
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index bdb55de62692..dc8d2983c2bf 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -229,7 +229,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
transport, GFP_ATOMIC);
if (error)
- asoc->base.sk->err = -error;
+ asoc->base.sk->sk_err = -error;
out_unlock:
sctp_bh_unlock_sock(asoc->base.sk);
@@ -269,7 +269,7 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
(void *)timeout_type, GFP_ATOMIC);
if (error)
- asoc->base.sk->err = -error;
+ asoc->base.sk->sk_err = -error;
out_unlock:
sctp_bh_unlock_sock(asoc->base.sk);
@@ -339,7 +339,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
transport, GFP_ATOMIC);
if (error)
- asoc->base.sk->err = -error;
+ asoc->base.sk->sk_err = -error;
out_unlock:
sctp_bh_unlock_sock(asoc->base.sk);
@@ -616,16 +616,16 @@ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, struct sctp_association *as
asoc->state_timestamp = jiffies;
if (sctp_style(sk, TCP)) {
- /* Change the sk->state of a TCP-style socket that has
+ /* Change the sk->sk_state of a TCP-style socket that has
* sucessfully completed a connect() call.
*/
if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
- sk->state = SCTP_SS_ESTABLISHED;
+ sk->sk_state = SCTP_SS_ESTABLISHED;
/* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
sctp_sstate(sk, ESTABLISHED))
- sk->shutdown |= RCV_SHUTDOWN;
+ sk->sk_shutdown |= RCV_SHUTDOWN;
}
if (sctp_state(asoc, ESTABLISHED) ||
@@ -644,7 +644,7 @@ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, struct sctp_association *as
* notifications.
*/
if (!sctp_style(sk, UDP))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
}
}
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 426beaa6dc9a..52f4ee0c8d90 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -213,7 +213,8 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
* ABORT.
*/
if (!sctp_sstate(sk, LISTENING) ||
- (sctp_style(sk, TCP) && (sk->ack_backlog >= sk->max_ack_backlog)))
+ (sctp_style(sk, TCP) &&
+ (sk->sk_ack_backlog >= sk->sk_max_ack_backlog)))
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
/* Verify the INIT chunk before processing it. */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 2e12db10227d..7d0d2bb912c2 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -327,7 +327,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
* added.
*/
-/* Unprotected by locks. Call only with socket lock sk->lock held! See
+/* Unprotected by locks. Call only with socket lock sk->sk_lock held! See
* sctp_bindx() for a lock-protected call.
*/
@@ -537,8 +537,8 @@ int sctp_bindx_rem(struct sock *sk, struct sockaddr_storage *addrs, int addrcnt)
goto err_bindx_rem;
};
- /* FIXME - There is probably a need to check if sk->saddr and
- * sk->rcv_addr are currently set to one of the addresses to
+ /* FIXME - There is probably a need to check if sk->sk_saddr and
+ * sk->sk_rcv_addr are currently set to one of the addresses to
* be removed. This is something which needs to be looked into
* when we are fixing the outstanding issues with multi-homing
* socket routing and failover schemes. Refer to comments in
@@ -713,7 +713,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
printk("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout);
sctp_lock_sock(sk);
- sk->shutdown = SHUTDOWN_MASK;
+ sk->sk_shutdown = SHUTDOWN_MASK;
ep = sctp_sk(sk)->ep;
@@ -732,7 +732,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
sctp_association_free(asoc);
} else if (sock_flag(sk, SOCK_LINGER) &&
- !sk->lingertime)
+ !sk->sk_lingertime)
sctp_primitive_ABORT(asoc, NULL);
else
sctp_primitive_SHUTDOWN(asoc, NULL);
@@ -741,7 +741,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
}
/* Clean up any skbs sitting on the receive queue. */
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sctp_sk(sk)->pd_lobby);
/* On a TCP-style socket, block for at most linger_time if set. */
@@ -1073,7 +1073,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
/* API 7.1.7, the sndbuf size per association bounds the
* maximum size of data that can be sent in a single send call.
*/
- if (msg_len > sk->sndbuf) {
+ if (msg_len > sk->sk_sndbuf) {
err = -EMSGSIZE;
goto out_free;
}
@@ -1296,7 +1296,7 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
sctp_ulpevent_read_sndrcvinfo(event, msg);
#if 0
/* FIXME: we should be calling IP/IPv6 layers. */
- if (sk->protinfo.af_inet.cmsg_flags)
+ if (sk->sk_protinfo.af_inet.cmsg_flags)
ip_cmsg_recv(msg, skb);
#endif
@@ -1311,7 +1311,7 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
if (flags & MSG_PEEK)
goto out_free;
sctp_skb_pull(skb, copied);
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
/* When only partial message is copied to the user, increase
* rwnd by that amount. If all the data in the skb is read,
@@ -1819,7 +1819,7 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr,
af = sctp_get_af_specific(to.sa.sa_family);
af->to_sk_daddr(&to, sk);
- timeo = sock_sndtimeo(sk, sk->socket->file->f_flags & O_NONBLOCK);
+ timeo = sock_sndtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK);
err = sctp_wait_for_connect(asoc, &timeo);
out_unlock:
@@ -1865,7 +1865,7 @@ SCTP_STATIC struct sock *sctp_accept(struct sock *sk, int flags, int *err)
goto out;
}
- timeo = sock_rcvtimeo(sk, sk->socket->file->f_flags & O_NONBLOCK);
+ timeo = sock_rcvtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK);
error = sctp_wait_for_accept(sk, timeo);
if (error)
@@ -1916,7 +1916,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
sp = sctp_sk(sk);
/* Initialize the SCTP per socket area. */
- switch (sk->type) {
+ switch (sk->sk_type) {
case SOCK_SEQPACKET:
sp->type = SCTP_SOCKET_UDP;
break;
@@ -1988,7 +1988,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
/* User specified fragmentation limit. */
sp->user_frag = 0;
- sp->pf = sctp_get_pf_specific(sk->family);
+ sp->pf = sctp_get_pf_specific(sk->sk_family);
/* Control variables for partial data delivery. */
sp->pd_mode = 0;
@@ -2184,7 +2184,7 @@ SCTP_STATIC int sctp_do_peeloff(struct sctp_association *asoc,
return -EINVAL;
/* Create a new socket. */
- err = sock_create(sk->family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
+ err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
if (err < 0)
return err;
@@ -2814,12 +2814,12 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
* used by other socket (pp->sk != NULL); that other
* socket is going to be sk2.
*/
- int sk_reuse = sk->reuse;
+ int reuse = sk->sk_reuse;
struct sock *sk2 = pp->sk;
SCTP_DEBUG_PRINTK("sctp_get_port() found a "
"possible match\n");
- if (pp->fastreuse != 0 && sk->reuse != 0)
+ if (pp->fastreuse && sk->sk_reuse)
goto success;
/* Run through the list of sockets bound to the port
@@ -2832,11 +2832,11 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
* that this port/socket (sk) combination are already
* in an endpoint.
*/
- for ( ; sk2 != NULL; sk2 = sk2->bind_next) {
+ for (; sk2; sk2 = sk2->sk_bind_next) {
struct sctp_endpoint *ep2;
ep2 = sctp_sk(sk2)->ep;
- if (sk_reuse && sk2->reuse)
+ if (reuse && sk2->sk_reuse)
continue;
if (sctp_bind_addr_match(&ep2->base.bind_addr, addr,
@@ -2860,12 +2860,12 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
goto fail_unlock;
/* In either case (hit or miss), make sure fastreuse is 1 only
- * if sk->reuse is too (that is, if the caller requested
+ * if sk->sk_reuse is too (that is, if the caller requested
* SO_REUSEADDR on this socket -sk-).
*/
if (!pp->sk)
- pp->fastreuse = sk->reuse ? 1 : 0;
- else if (pp->fastreuse && sk->reuse == 0)
+ pp->fastreuse = sk->sk_reuse ? 1 : 0;
+ else if (pp->fastreuse && !sk->sk_reuse)
pp->fastreuse = 0;
/* We are set, so fill up all the data in the hash table
@@ -2874,12 +2874,12 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
*/
success:
inet_sk(sk)->num = snum;
- if (sk->prev == NULL) {
- if ((sk->bind_next = pp->sk) != NULL)
- pp->sk->bind_pprev = &sk->bind_next;
+ if (!sk->sk_prev) {
+ if ((sk->sk_bind_next = pp->sk) != NULL)
+ pp->sk->sk_bind_pprev = &sk->sk_bind_next;
pp->sk = sk;
- sk->bind_pprev = &pp->sk;
- sk->prev = (struct sock *) pp;
+ sk->sk_bind_pprev = &pp->sk;
+ sk->sk_prev = (struct sock *) pp;
}
ret = 0;
@@ -2907,7 +2907,7 @@ static int sctp_get_port(struct sock *sk, unsigned short snum)
af->from_sk(&addr, sk);
addr.v4.sin_port = htons(snum);
- /* Note: sk->num gets filled in if ephemeral port request. */
+ /* Note: sk->sk_num gets filled in if ephemeral port request. */
ret = sctp_get_port_local(sk, &addr);
return (ret ? 1 : 0);
@@ -2948,7 +2948,7 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog)
if (sctp_autobind(sk))
return -EAGAIN;
}
- sk->state = SCTP_SS_LISTENING;
+ sk->sk_state = SCTP_SS_LISTENING;
sctp_hash_endpoint(ep);
return 0;
}
@@ -2981,8 +2981,8 @@ SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog)
if (sctp_autobind(sk))
return -EAGAIN;
}
- sk->state = SCTP_SS_LISTENING;
- sk->max_ack_backlog = backlog;
+ sk->sk_state = SCTP_SS_LISTENING;
+ sk->sk_max_ack_backlog = backlog;
sctp_hash_endpoint(ep);
return 0;
}
@@ -3056,7 +3056,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
struct sctp_opt *sp = sctp_sk(sk);
unsigned int mask;
- poll_wait(file, sk->sleep, wait);
+ poll_wait(file, sk->sk_sleep, wait);
/* A TCP-style listening socket becomes readable when the accept queue
* is not empty.
@@ -3068,14 +3068,14 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
mask = 0;
/* Is there any exceptional events? */
- if (sk->err || !skb_queue_empty(&sk->error_queue))
+ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
mask |= POLLERR;
- if (sk->shutdown == SHUTDOWN_MASK)
+ if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
/* Is it readable? Reconsider this code with TCP-style support. */
- if (!skb_queue_empty(&sk->receive_queue) ||
- (sk->shutdown & RCV_SHUTDOWN))
+ if (!skb_queue_empty(&sk->sk_receive_queue) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN))
mask |= POLLIN | POLLRDNORM;
/* The association is either gone or not ready. */
@@ -3086,7 +3086,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
if (sctp_writeable(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else {
- set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
/*
* Since the socket is not locked, the buffer
* might be made available after the writeable check and
@@ -3133,11 +3133,11 @@ static __inline__ void __sctp_put_port(struct sock *sk)
sctp_bind_bucket_t *pp;
sctp_spin_lock(&head->lock);
- pp = (sctp_bind_bucket_t *) sk->prev;
- if (sk->bind_next)
- sk->bind_next->bind_pprev = sk->bind_pprev;
- *(sk->bind_pprev) = sk->bind_next;
- sk->prev = NULL;
+ pp = (sctp_bind_bucket_t *)sk->sk_prev;
+ if (sk->sk_bind_next)
+ sk->sk_bind_next->sk_bind_pprev = sk->sk_bind_pprev;
+ *(sk->sk_bind_pprev) = sk->sk_bind_next;
+ sk->sk_prev = NULL;
inet_sk(sk)->num = 0;
if (pp->sk) {
if (pp->next)
@@ -3299,18 +3299,18 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
int error;
DEFINE_WAIT(wait);
- prepare_to_wait_exclusive(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
/* Socket errors? */
error = sock_error(sk);
if (error)
goto out;
- if (!skb_queue_empty(&sk->receive_queue))
+ if (!skb_queue_empty(&sk->sk_receive_queue))
goto ready;
/* Socket shut down? */
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
goto out;
/* Sequenced packets can come disconnected. If so we report the
@@ -3336,14 +3336,14 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
sctp_lock_sock(sk);
ready:
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
return 0;
interrupted:
error = sock_intr_errno(*timeo_p);
out:
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
*err = error;
return error;
}
@@ -3359,7 +3359,7 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
struct sk_buff *skb;
long timeo;
- /* Caller is allowed not to check sk->err before calling. */
+ /* Caller is allowed not to check sk->sk_err before calling. */
error = sock_error(sk);
if (error)
goto no_packet;
@@ -3380,21 +3380,21 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
if (flags & MSG_PEEK) {
unsigned long cpu_flags;
- sctp_spin_lock_irqsave(&sk->receive_queue.lock,
+ sctp_spin_lock_irqsave(&sk->sk_receive_queue.lock,
cpu_flags);
- skb = skb_peek(&sk->receive_queue);
+ skb = skb_peek(&sk->sk_receive_queue);
if (skb)
atomic_inc(&skb->users);
- sctp_spin_unlock_irqrestore(&sk->receive_queue.lock,
+ sctp_spin_unlock_irqrestore(&sk->sk_receive_queue.lock,
cpu_flags);
} else {
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
}
if (skb)
return skb;
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
/* User doesn't want to wait. */
@@ -3437,7 +3437,7 @@ static inline int sctp_wspace(struct sctp_association *asoc)
struct sock *sk = asoc->base.sk;
int amt = 0;
- amt = sk->sndbuf - asoc->sndbuf_used;
+ amt = sk->sk_sndbuf - asoc->sndbuf_used;
if (amt < 0)
amt = 0;
return amt;
@@ -3465,29 +3465,29 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
*((struct sctp_chunk **)(chunk->skb->cb)) = chunk;
asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk);
- sk->wmem_queued += SCTP_DATA_SNDSIZE(chunk);
+ sk->sk_wmem_queued += SCTP_DATA_SNDSIZE(chunk);
}
/* If sndbuf has changed, wake up per association sndbuf waiters. */
static void __sctp_write_space(struct sctp_association *asoc)
{
struct sock *sk = asoc->base.sk;
- struct socket *sock = sk->socket;
+ struct socket *sock = sk->sk_socket;
if ((sctp_wspace(asoc) > 0) && sock) {
if (waitqueue_active(&asoc->wait))
wake_up_interruptible(&asoc->wait);
if (sctp_writeable(sk)) {
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
/* Note that we try to include the Async I/O support
* here by modeling from the current TCP/UDP code.
* We have not tested with it yet.
*/
if (sock->fasync_list &&
- !(sk->shutdown & SEND_SHUTDOWN))
+ !(sk->sk_shutdown & SEND_SHUTDOWN))
sock_wake_async(sock, 2, POLL_OUT);
}
}
@@ -3508,7 +3508,7 @@ static void sctp_wfree(struct sk_buff *skb)
asoc = chunk->asoc;
sk = asoc->base.sk;
asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk);
- sk->wmem_queued -= SCTP_DATA_SNDSIZE(chunk);
+ sk->sk_wmem_queued -= SCTP_DATA_SNDSIZE(chunk);
__sctp_write_space(asoc);
sctp_association_put(asoc);
@@ -3535,7 +3535,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
TASK_INTERRUPTIBLE);
if (!*timeo_p)
goto do_nonblock;
- if (sk->err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
+ if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
asoc->base.dead)
goto do_error;
if (signal_pending(current))
@@ -3602,7 +3602,7 @@ static int sctp_writeable(struct sock *sk)
{
int amt = 0;
- amt = sk->sndbuf - sk->wmem_queued;
+ amt = sk->sk_sndbuf - sk->sk_wmem_queued;
if (amt < 0)
amt = 0;
return amt;
@@ -3629,9 +3629,9 @@ static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p)
TASK_INTERRUPTIBLE);
if (!*timeo_p)
goto do_nonblock;
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
- if (sk->err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
+ if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
asoc->base.dead)
goto do_error;
if (signal_pending(current))
@@ -3681,7 +3681,8 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
for (;;) {
- prepare_to_wait_exclusive(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait_exclusive(sk->sk_sleep, &wait,
+ TASK_INTERRUPTIBLE);
if (list_empty(&ep->asocs)) {
sctp_release_sock(sk);
@@ -3706,7 +3707,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
break;
}
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
return err;
}
@@ -3716,7 +3717,7 @@ void sctp_wait_for_close(struct sock *sk, long timeout)
DEFINE_WAIT(wait);
do {
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
if (list_empty(&sctp_sk(sk)->ep->asocs))
break;
sctp_release_sock(sk);
@@ -3724,7 +3725,7 @@ void sctp_wait_for_close(struct sock *sk, long timeout)
sctp_lock_sock(sk);
} while (!signal_pending(current) && timeout);
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
}
/* Populate the fields of the newsk from the oldsk and migrate the assoc
@@ -3743,8 +3744,8 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
/* Migrate socket buffer sizes and all the socket level options to the
* new socket.
*/
- newsk->sndbuf = oldsk->sndbuf;
- newsk->rcvbuf = oldsk->rcvbuf;
+ newsk->sk_sndbuf = oldsk->sk_sndbuf;
+ newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
*newsp = *oldsp;
/* Restore the ep value that was overwritten with the above structure
@@ -3756,11 +3757,11 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
/* Move any messages in the old socket's receive queue that are for the
* peeled off association to the new socket's receive queue.
*/
- sctp_skb_for_each(skb, &oldsk->receive_queue, tmp) {
+ sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
__skb_unlink(skb, skb->list);
- __skb_queue_tail(&newsk->receive_queue, skb);
+ __skb_queue_tail(&newsk->sk_receive_queue, skb);
}
}
@@ -3780,7 +3781,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
if (assoc->ulpq.pd_mode) {
queue = &newsp->pd_lobby;
} else
- queue = &newsk->receive_queue;
+ queue = &newsk->sk_receive_queue;
/* Walk through the pd_lobby, looking for skbs that
* need moved to the new socket.
@@ -3814,9 +3815,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
* is called, set RCV_SHUTDOWN flag.
*/
if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP))
- newsk->shutdown |= RCV_SHUTDOWN;
+ newsk->sk_shutdown |= RCV_SHUTDOWN;
- newsk->state = SCTP_SS_ESTABLISHED;
+ newsk->sk_state = SCTP_SS_ESTABLISHED;
}
/* This proto struct describes the ULP interface for SCTP. */
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 63fe8a3b8d2d..2fa0743eaaab 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -245,7 +245,7 @@ void sctp_transport_route(struct sctp_transport *transport,
if (dst) {
transport->pmtu = dst_pmtu(dst);
- /* Initialize sk->rcv_saddr, if the transport is the
+ /* Initialize sk->sk_rcv_saddr, if the transport is the
* association's active path for getsockname().
*/
if (asoc && (transport == asoc->peer.active_path))
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 4c85dc95a12a..8a11bc28bd38 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -163,7 +163,7 @@ int sctp_clear_pd(struct sock *sk)
sp->pd_mode = 0;
if (!skb_queue_empty(&sp->pd_lobby)) {
struct list_head *list;
- sctp_skb_list_tail(&sp->pd_lobby, &sk->receive_queue);
+ sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
INIT_LIST_HEAD(list);
return 1;
@@ -189,7 +189,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
/* If the socket is just going to throw this away, do not
* even try to deliver it.
*/
- if (sock_flag(sk, SOCK_DEAD) || (sk->shutdown & RCV_SHUTDOWN))
+ if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
goto out_free;
/* Check if the user wishes to receive this event. */
@@ -202,13 +202,13 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
*/
if (!sctp_sk(sk)->pd_mode) {
- queue = &sk->receive_queue;
+ queue = &sk->sk_receive_queue;
} else if (ulpq->pd_mode) {
if (event->msg_flags & MSG_NOTIFICATION)
queue = &sctp_sk(sk)->pd_lobby;
else {
clear_pd = event->msg_flags & MSG_EOR;
- queue = &sk->receive_queue;
+ queue = &sk->sk_receive_queue;
}
} else
queue = &sctp_sk(sk)->pd_lobby;
@@ -229,8 +229,8 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
if (clear_pd)
sctp_ulpq_clear_pd(ulpq);
- if (queue == &sk->receive_queue)
- sk->data_ready(sk, 0);
+ if (queue == &sk->sk_receive_queue)
+ sk->sk_data_ready(sk, 0);
return 1;
out_free:
@@ -773,7 +773,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
freed = 0;
- if (skb_queue_empty(&asoc->base.sk->receive_queue)) {
+ if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
freed = sctp_ulpq_renege_order(ulpq, needed);
if (freed < needed) {
freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
@@ -812,9 +812,9 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, int gfp)
SCTP_PARTIAL_DELIVERY_ABORTED,
gfp);
if (ev)
- __skb_queue_tail(&sk->receive_queue, sctp_event2skb(ev));
+ __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
/* If there is data waiting, send it up the socket now. */
if (sctp_ulpq_clear_pd(ulpq) || ev)
- sk->data_ready(sk, 0);
+ sk->sk_data_ready(sk, 0);
}
diff --git a/net/socket.c b/net/socket.c
index 76d29acb146a..6d79cd4b31ec 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -889,11 +889,11 @@ int sock_close(struct inode *inode, struct file *filp)
*
* 1. fasync_list is modified only under process context socket lock
* i.e. under semaphore.
- * 2. fasync_list is used under read_lock(&sk->callback_lock)
+ * 2. fasync_list is used under read_lock(&sk->sk_callback_lock)
* or under socket lock.
* 3. fasync_list can be used from softirq context, so that
* modification under socket lock have to be enhanced with
- * write_lock_bh(&sk->callback_lock).
+ * write_lock_bh(&sk->sk_callback_lock).
* --ANK (990710)
*/
@@ -930,9 +930,9 @@ static int sock_fasync(int fd, struct file *filp, int on)
{
if(fa!=NULL)
{
- write_lock_bh(&sk->callback_lock);
+ write_lock_bh(&sk->sk_callback_lock);
fa->fa_fd=fd;
- write_unlock_bh(&sk->callback_lock);
+ write_unlock_bh(&sk->sk_callback_lock);
kfree(fna);
goto out;
@@ -941,17 +941,17 @@ static int sock_fasync(int fd, struct file *filp, int on)
fna->fa_fd=fd;
fna->magic=FASYNC_MAGIC;
fna->fa_next=sock->fasync_list;
- write_lock_bh(&sk->callback_lock);
+ write_lock_bh(&sk->sk_callback_lock);
sock->fasync_list=fna;
- write_unlock_bh(&sk->callback_lock);
+ write_unlock_bh(&sk->sk_callback_lock);
}
else
{
if (fa!=NULL)
{
- write_lock_bh(&sk->callback_lock);
+ write_lock_bh(&sk->sk_callback_lock);
*prev=fa->fa_next;
- write_unlock_bh(&sk->callback_lock);
+ write_unlock_bh(&sk->sk_callback_lock);
kfree(fa);
}
}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index f2fa1ab4ce25..f1230fdafa93 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -496,9 +496,9 @@ svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv)
* DaveM said I could!
*/
lock_sock(sock->sk);
- sock->sk->sndbuf = snd * 2;
- sock->sk->rcvbuf = rcv * 2;
- sock->sk->userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
+ sock->sk->sk_sndbuf = snd * 2;
+ sock->sk->sk_rcvbuf = rcv * 2;
+ sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
release_sock(sock->sk);
#endif
}
@@ -508,7 +508,7 @@ svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv)
static void
svc_udp_data_ready(struct sock *sk, int count)
{
- struct svc_sock *svsk = (struct svc_sock *)(sk->user_data);
+ struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
if (!svsk)
goto out;
@@ -517,8 +517,8 @@ svc_udp_data_ready(struct sock *sk, int count)
set_bit(SK_DATA, &svsk->sk_flags);
svc_sock_enqueue(svsk);
out:
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
}
/*
@@ -527,7 +527,7 @@ svc_udp_data_ready(struct sock *sk, int count)
static void
svc_write_space(struct sock *sk)
{
- struct svc_sock *svsk = (struct svc_sock *)(sk->user_data);
+ struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
if (svsk) {
dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
@@ -535,10 +535,10 @@ svc_write_space(struct sock *sk)
svc_sock_enqueue(svsk);
}
- if (sk->sleep && waitqueue_active(sk->sleep)) {
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) {
printk(KERN_WARNING "RPC svc_write_space: some sleeping on %p\n",
svsk);
- wake_up_interruptible(sk->sleep);
+ wake_up_interruptible(sk->sk_sleep);
}
}
@@ -589,7 +589,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
rqstp->rq_addr.sin_port = skb->h.uh->source;
rqstp->rq_addr.sin_addr.s_addr = skb->nh.iph->saddr;
- svsk->sk_sk->stamp = skb->stamp;
+ svsk->sk_sk->sk_stamp = skb->stamp;
if (skb_is_nonlinear(skb)) {
/* we have to copy */
@@ -652,8 +652,8 @@ svc_udp_sendto(struct svc_rqst *rqstp)
static void
svc_udp_init(struct svc_sock *svsk)
{
- svsk->sk_sk->data_ready = svc_udp_data_ready;
- svsk->sk_sk->write_space = svc_write_space;
+ svsk->sk_sk->sk_data_ready = svc_udp_data_ready;
+ svsk->sk_sk->sk_write_space = svc_write_space;
svsk->sk_recvfrom = svc_udp_recvfrom;
svsk->sk_sendto = svc_udp_sendto;
@@ -679,21 +679,21 @@ svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
struct svc_sock *svsk;
dprintk("svc: socket %p TCP (listen) state change %d\n",
- sk, sk->state);
+ sk, sk->sk_state);
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
/* Aborted connection, SYN_RECV or whatever... */
goto out;
}
- if (!(svsk = (struct svc_sock *) sk->user_data)) {
+ if (!(svsk = (struct svc_sock *) sk->sk_user_data)) {
printk("svc: socket %p: no user data\n", sk);
goto out;
}
set_bit(SK_CONN, &svsk->sk_flags);
svc_sock_enqueue(svsk);
out:
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible_all(sk->sleep);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_all(sk->sk_sleep);
}
/*
@@ -705,17 +705,17 @@ svc_tcp_state_change(struct sock *sk)
struct svc_sock *svsk;
dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
- sk, sk->state, sk->user_data);
+ sk, sk->sk_state, sk->sk_user_data);
- if (!(svsk = (struct svc_sock *) sk->user_data)) {
+ if (!(svsk = (struct svc_sock *) sk->sk_user_data)) {
printk("svc: socket %p: no user data\n", sk);
goto out;
}
set_bit(SK_CLOSE, &svsk->sk_flags);
svc_sock_enqueue(svsk);
out:
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible_all(sk->sleep);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_all(sk->sk_sleep);
}
static void
@@ -724,14 +724,14 @@ svc_tcp_data_ready(struct sock *sk, int count)
struct svc_sock * svsk;
dprintk("svc: socket %p TCP data ready (svsk %p)\n",
- sk, sk->user_data);
- if (!(svsk = (struct svc_sock *)(sk->user_data)))
+ sk, sk->sk_user_data);
+ if (!(svsk = (struct svc_sock *)(sk->sk_user_data)))
goto out;
set_bit(SK_DATA, &svsk->sk_flags);
svc_sock_enqueue(svsk);
out:
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
}
/*
@@ -797,7 +797,7 @@ svc_tcp_accept(struct svc_sock *svsk)
/* make sure that a write doesn't block forever when
* low on memory
*/
- newsock->sk->sndtimeo = HZ*30;
+ newsock->sk->sk_sndtimeo = HZ*30;
if (!(newsvsk = svc_setup_socket(serv, newsock, &err, 0)))
goto failed;
@@ -1035,15 +1035,15 @@ svc_tcp_init(struct svc_sock *svsk)
svsk->sk_recvfrom = svc_tcp_recvfrom;
svsk->sk_sendto = svc_tcp_sendto;
- if (sk->state == TCP_LISTEN) {
+ if (sk->sk_state == TCP_LISTEN) {
dprintk("setting up TCP socket for listening\n");
- sk->data_ready = svc_tcp_listen_data_ready;
+ sk->sk_data_ready = svc_tcp_listen_data_ready;
set_bit(SK_CONN, &svsk->sk_flags);
} else {
dprintk("setting up TCP socket for reading\n");
- sk->state_change = svc_tcp_state_change;
- sk->data_ready = svc_tcp_data_ready;
- sk->write_space = svc_write_space;
+ sk->sk_state_change = svc_tcp_state_change;
+ sk->sk_data_ready = svc_tcp_data_ready;
+ sk->sk_write_space = svc_write_space;
svsk->sk_reclen = 0;
svsk->sk_tcplen = 0;
@@ -1290,7 +1290,7 @@ svc_setup_socket(struct svc_serv *serv, struct socket *sock,
/* Register socket with portmapper */
if (*errp >= 0 && pmap_register)
- *errp = svc_register(serv, inet->protocol,
+ *errp = svc_register(serv, inet->sk_protocol,
ntohs(inet_sk(inet)->sport));
if (*errp < 0) {
@@ -1299,12 +1299,12 @@ svc_setup_socket(struct svc_serv *serv, struct socket *sock,
}
set_bit(SK_BUSY, &svsk->sk_flags);
- inet->user_data = svsk;
+ inet->sk_user_data = svsk;
svsk->sk_sock = sock;
svsk->sk_sk = inet;
- svsk->sk_ostate = inet->state_change;
- svsk->sk_odata = inet->data_ready;
- svsk->sk_owspace = inet->write_space;
+ svsk->sk_ostate = inet->sk_state_change;
+ svsk->sk_odata = inet->sk_data_ready;
+ svsk->sk_owspace = inet->sk_write_space;
svsk->sk_server = serv;
svsk->sk_lastrecv = get_seconds();
INIT_LIST_HEAD(&svsk->sk_deferred);
@@ -1363,7 +1363,7 @@ svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin)
return error;
if (sin != NULL) {
- sock->sk->reuse = 1; /* allow address reuse */
+ sock->sk->sk_reuse = 1; /* allow address reuse */
error = sock->ops->bind(sock, (struct sockaddr *) sin,
sizeof(*sin));
if (error < 0)
@@ -1398,9 +1398,9 @@ svc_delete_socket(struct svc_sock *svsk)
serv = svsk->sk_server;
sk = svsk->sk_sk;
- sk->state_change = svsk->sk_ostate;
- sk->data_ready = svsk->sk_odata;
- sk->write_space = svsk->sk_owspace;
+ sk->sk_state_change = svsk->sk_ostate;
+ sk->sk_data_ready = svsk->sk_odata;
+ sk->sk_write_space = svsk->sk_owspace;
spin_lock_bh(&serv->sv_lock);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 6bddd4b38250..0f78b8a8b29a 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -129,7 +129,7 @@ xprt_pktdump(char *msg, u32 *packet, unsigned int count)
static inline struct rpc_xprt *
xprt_from_sock(struct sock *sk)
{
- return (struct rpc_xprt *) sk->user_data;
+ return (struct rpc_xprt *) sk->sk_user_data;
}
/*
@@ -367,18 +367,18 @@ xprt_close(struct rpc_xprt *xprt)
if (!sk)
return;
- write_lock_bh(&sk->callback_lock);
+ write_lock_bh(&sk->sk_callback_lock);
xprt->inet = NULL;
xprt->sock = NULL;
- sk->user_data = NULL;
- sk->data_ready = xprt->old_data_ready;
- sk->state_change = xprt->old_state_change;
- sk->write_space = xprt->old_write_space;
- write_unlock_bh(&sk->callback_lock);
+ sk->sk_user_data = NULL;
+ sk->sk_data_ready = xprt->old_data_ready;
+ sk->sk_state_change = xprt->old_state_change;
+ sk->sk_write_space = xprt->old_write_space;
+ write_unlock_bh(&sk->sk_callback_lock);
xprt_disconnect(xprt);
- sk->no_check = 0;
+ sk->sk_no_check = 0;
sock_release(sock);
}
@@ -448,7 +448,7 @@ xprt_connect(struct rpc_task *task)
status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr,
sizeof(xprt->addr), O_NONBLOCK);
dprintk("RPC: %4d connect status %d connected %d sock state %d\n",
- task->tk_pid, -status, xprt_connected(xprt), inet->state);
+ task->tk_pid, -status, xprt_connected(xprt), inet->sk_state);
if (status >= 0)
return;
@@ -458,12 +458,13 @@ xprt_connect(struct rpc_task *task)
case -EALREADY:
/* Protect against TCP socket state changes */
lock_sock(inet);
- if (inet->state != TCP_ESTABLISHED) {
+ if (inet->sk_state != TCP_ESTABLISHED) {
dprintk("RPC: %4d waiting for connection\n",
task->tk_pid);
task->tk_timeout = RPC_CONNECT_TIMEOUT;
/* if the socket is already closing, delay briefly */
- if ((1 << inet->state) & ~(TCPF_SYN_SENT|TCPF_SYN_RECV))
+ if ((1 << inet->sk_state) &
+ ~(TCPF_SYN_SENT | TCPF_SYN_RECV))
task->tk_timeout = RPC_REESTABLISH_TIMEOUT;
rpc_sleep_on(&xprt->pending, task, xprt_connect_status,
NULL);
@@ -679,7 +680,7 @@ udp_data_ready(struct sock *sk, int len)
struct sk_buff *skb;
int err, repsize, copied;
- read_lock(&sk->callback_lock);
+ read_lock(&sk->sk_callback_lock);
dprintk("RPC: udp_data_ready...\n");
if (!(xprt = xprt_from_sock(sk))) {
printk("RPC: udp_data_ready request not found!\n");
@@ -728,9 +729,9 @@ udp_data_ready(struct sock *sk, int len)
dropit:
skb_free_datagram(sk, skb);
out:
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
- read_unlock(&sk->callback_lock);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
+ read_unlock(&sk->sk_callback_lock);
}
/*
@@ -935,7 +936,7 @@ static void tcp_data_ready(struct sock *sk, int bytes)
struct rpc_xprt *xprt;
read_descriptor_t rd_desc;
- read_lock(&sk->callback_lock);
+ read_lock(&sk->sk_callback_lock);
dprintk("RPC: tcp_data_ready...\n");
if (!(xprt = xprt_from_sock(sk))) {
printk("RPC: tcp_data_ready socket info not found!\n");
@@ -949,7 +950,7 @@ static void tcp_data_ready(struct sock *sk, int bytes)
rd_desc.count = 65536;
tcp_read_sock(sk, &rd_desc, tcp_data_recv);
out:
- read_unlock(&sk->callback_lock);
+ read_unlock(&sk->sk_callback_lock);
}
static void
@@ -957,15 +958,15 @@ tcp_state_change(struct sock *sk)
{
struct rpc_xprt *xprt;
- read_lock(&sk->callback_lock);
+ read_lock(&sk->sk_callback_lock);
if (!(xprt = xprt_from_sock(sk)))
goto out;
dprintk("RPC: tcp_state_change client %p...\n", xprt);
dprintk("RPC: state %x conn %d dead %d zapped %d\n",
- sk->state, xprt_connected(xprt),
- sock_flag(sk, SOCK_DEAD), sk->zapped);
+ sk->sk_state, xprt_connected(xprt),
+ sock_flag(sk, SOCK_DEAD), sk->sk_zapped);
- switch (sk->state) {
+ switch (sk->sk_state) {
case TCP_ESTABLISHED:
spin_lock_bh(&xprt->sock_lock);
if (!xprt_test_and_set_connected(xprt)) {
@@ -989,9 +990,9 @@ tcp_state_change(struct sock *sk)
break;
}
out:
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible_all(sk->sleep);
- read_unlock(&sk->callback_lock);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_all(sk->sk_sleep);
+ read_unlock(&sk->sk_callback_lock);
}
/*
@@ -1006,8 +1007,8 @@ xprt_write_space(struct sock *sk)
struct rpc_xprt *xprt;
struct socket *sock;
- read_lock(&sk->callback_lock);
- if (!(xprt = xprt_from_sock(sk)) || !(sock = sk->socket))
+ read_lock(&sk->sk_callback_lock);
+ if (!(xprt = xprt_from_sock(sk)) || !(sock = sk->sk_socket))
goto out;
if (xprt->shutdown)
goto out;
@@ -1030,10 +1031,10 @@ xprt_write_space(struct sock *sk)
if (xprt->snd_task && xprt->snd_task->tk_rpcwait == &xprt->pending)
rpc_wake_up_task(xprt->snd_task);
spin_unlock_bh(&xprt->sock_lock);
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
out:
- read_unlock(&sk->callback_lock);
+ read_unlock(&sk->sk_callback_lock);
}
/*
@@ -1465,28 +1466,28 @@ xprt_bind_socket(struct rpc_xprt *xprt, struct socket *sock)
if (xprt->inet)
return;
- write_lock_bh(&sk->callback_lock);
- sk->user_data = xprt;
- xprt->old_data_ready = sk->data_ready;
- xprt->old_state_change = sk->state_change;
- xprt->old_write_space = sk->write_space;
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_user_data = xprt;
+ xprt->old_data_ready = sk->sk_data_ready;
+ xprt->old_state_change = sk->sk_state_change;
+ xprt->old_write_space = sk->sk_write_space;
if (xprt->prot == IPPROTO_UDP) {
- sk->data_ready = udp_data_ready;
- sk->no_check = UDP_CSUM_NORCV;
+ sk->sk_data_ready = udp_data_ready;
+ sk->sk_no_check = UDP_CSUM_NORCV;
xprt_set_connected(xprt);
} else {
struct tcp_opt *tp = tcp_sk(sk);
tp->nonagle = 1; /* disable Nagle's algorithm */
- sk->data_ready = tcp_data_ready;
- sk->state_change = tcp_state_change;
+ sk->sk_data_ready = tcp_data_ready;
+ sk->sk_state_change = tcp_state_change;
xprt_clear_connected(xprt);
}
- sk->write_space = xprt_write_space;
+ sk->sk_write_space = xprt_write_space;
/* Reset to new socket */
xprt->sock = sock;
xprt->inet = sk;
- write_unlock_bh(&sk->callback_lock);
+ write_unlock_bh(&sk->sk_callback_lock);
return;
}
@@ -1502,13 +1503,13 @@ xprt_sock_setbufsize(struct rpc_xprt *xprt)
if (xprt->stream)
return;
if (xprt->rcvsize) {
- sk->userlocks |= SOCK_RCVBUF_LOCK;
- sk->rcvbuf = xprt->rcvsize * RPC_MAXCONG * 2;
+ sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
+ sk->sk_rcvbuf = xprt->rcvsize * RPC_MAXCONG * 2;
}
if (xprt->sndsize) {
- sk->userlocks |= SOCK_SNDBUF_LOCK;
- sk->sndbuf = xprt->sndsize * RPC_MAXCONG * 2;
- sk->write_space(sk);
+ sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+ sk->sk_sndbuf = xprt->sndsize * RPC_MAXCONG * 2;
+ sk->sk_write_space(sk);
}
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 725bdae8ff57..c5f058001647 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -145,7 +145,7 @@ static inline unsigned unix_hash_fold(unsigned hash)
return hash&(UNIX_HASH_SIZE-1);
}
-#define unix_peer(sk) ((sk)->pair)
+#define unix_peer(sk) ((sk)->sk_pair)
static inline int unix_our_peer(unix_socket *sk, unix_socket *osk)
{
@@ -215,15 +215,15 @@ static void __unix_remove_socket(unix_socket *sk)
unix_socket **list = u->list;
if (list) {
- if (sk->next)
- sk->next->prev = sk->prev;
- if (sk->prev)
- sk->prev->next = sk->next;
+ if (sk->sk_next)
+ sk->sk_next->sk_prev = sk->sk_prev;
+ if (sk->sk_prev)
+ sk->sk_prev->sk_next = sk->sk_next;
if (*list == sk)
- *list = sk->next;
+ *list = sk->sk_next;
u->list = NULL;
- sk->prev = NULL;
- sk->next = NULL;
+ sk->sk_prev = NULL;
+ sk->sk_next = NULL;
__sock_put(sk);
}
}
@@ -234,10 +234,10 @@ static void __unix_insert_socket(unix_socket **list, unix_socket *sk)
BUG_TRAP(!u->list);
u->list = list;
- sk->prev = NULL;
- sk->next = *list;
+ sk->sk_prev = NULL;
+ sk->sk_next = *list;
if (*list)
- (*list)->prev = sk;
+ (*list)->sk_prev = sk;
*list=sk;
sock_hold(sk);
}
@@ -261,7 +261,7 @@ static unix_socket *__unix_find_socket_byname(struct sockaddr_un *sunname,
{
unix_socket *s;
- for (s=unix_socket_table[hash^type]; s; s=s->next) {
+ for (s = unix_socket_table[hash ^ type]; s; s = s->sk_next) {
struct unix_sock *u = unix_sk(s);
if (u->addr->len == len &&
@@ -290,8 +290,8 @@ static unix_socket *unix_find_socket_byinode(struct inode *i)
unix_socket *s;
read_lock(&unix_table_lock);
- for (s=unix_socket_table[i->i_ino & (UNIX_HASH_SIZE-1)]; s; s=s->next)
- {
+ for (s = unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]; s;
+ s = s->sk_next) {
struct dentry *dentry = unix_sk(s)->dentry;
if(dentry && dentry->d_inode == i)
@@ -306,18 +306,18 @@ static unix_socket *unix_find_socket_byinode(struct inode *i)
static inline int unix_writable(struct sock *sk)
{
- return ((atomic_read(&sk->wmem_alloc)<<2) <= sk->sndbuf);
+ return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
}
static void unix_write_space(struct sock *sk)
{
- read_lock(&sk->callback_lock);
+ read_lock(&sk->sk_callback_lock);
if (unix_writable(sk)) {
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
sk_wake_async(sk, 2, POLL_OUT);
}
- read_unlock(&sk->callback_lock);
+ read_unlock(&sk->sk_callback_lock);
}
/* When dgram socket disconnects (or changes its peer), we clear its receive
@@ -326,8 +326,8 @@ static void unix_write_space(struct sock *sk)
* may receive messages only from that peer. */
static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
{
- if (skb_queue_len(&sk->receive_queue)) {
- skb_queue_purge(&sk->receive_queue);
+ if (skb_queue_len(&sk->sk_receive_queue)) {
+ skb_queue_purge(&sk->sk_receive_queue);
wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
/* If one link of bidirectional dgram pipe is disconnected,
@@ -335,8 +335,8 @@ static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
* when peer was not connected to us.
*/
if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
- other->err = ECONNRESET;
- other->error_report(other);
+ other->sk_err = ECONNRESET;
+ other->sk_error_report(other);
}
}
}
@@ -345,11 +345,11 @@ static void unix_sock_destructor(struct sock *sk)
{
struct unix_sock *u = unix_sk(sk);
- skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
- BUG_TRAP(atomic_read(&sk->wmem_alloc) == 0);
+ BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
BUG_TRAP(!u->list);
- BUG_TRAP(sk->socket==NULL);
+ BUG_TRAP(!sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) {
printk("Attempt to release alive unix socket: %p\n", sk);
return;
@@ -378,13 +378,13 @@ static int unix_release_sock (unix_socket *sk, int embrion)
/* Clear state */
unix_state_wlock(sk);
sock_orphan(sk);
- sk->shutdown = SHUTDOWN_MASK;
+ sk->sk_shutdown = SHUTDOWN_MASK;
dentry = u->dentry;
u->dentry = NULL;
mnt = u->mnt;
u->mnt = NULL;
- state = sk->state;
- sk->state = TCP_CLOSE;
+ state = sk->sk_state;
+ sk->sk_state = TCP_CLOSE;
unix_state_wunlock(sk);
wake_up_interruptible_all(&u->peer_wait);
@@ -392,16 +392,17 @@ static int unix_release_sock (unix_socket *sk, int embrion)
skpair=unix_peer(sk);
if (skpair!=NULL) {
- if (sk->type==SOCK_STREAM) {
+ if (sk->sk_type == SOCK_STREAM) {
unix_state_wlock(skpair);
- skpair->shutdown=SHUTDOWN_MASK; /* No more writes*/
- if (!skb_queue_empty(&sk->receive_queue) || embrion)
- skpair->err = ECONNRESET;
+ /* No more writes */
+ skpair->sk_shutdown = SHUTDOWN_MASK;
+ if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
+ skpair->sk_err = ECONNRESET;
unix_state_wunlock(skpair);
- skpair->state_change(skpair);
- read_lock(&skpair->callback_lock);
+ skpair->sk_state_change(skpair);
+ read_lock(&skpair->sk_callback_lock);
sk_wake_async(skpair,1,POLL_HUP);
- read_unlock(&skpair->callback_lock);
+ read_unlock(&skpair->sk_callback_lock);
}
sock_put(skpair); /* It may now die */
unix_peer(sk) = NULL;
@@ -409,8 +410,7 @@ static int unix_release_sock (unix_socket *sk, int embrion)
/* Try to flush out this socket. Throw out buffers at least */
- while((skb=skb_dequeue(&sk->receive_queue))!=NULL)
- {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (state==TCP_LISTEN)
unix_release_sock(skb->sk, 1);
/* passed fds are erased in the kfree_skb hook */
@@ -456,16 +456,16 @@ static int unix_listen(struct socket *sock, int backlog)
if (!u->addr)
goto out; /* No listens on an unbound socket */
unix_state_wlock(sk);
- if (sk->state != TCP_CLOSE && sk->state != TCP_LISTEN)
+ if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
goto out_unlock;
- if (backlog > sk->max_ack_backlog)
+ if (backlog > sk->sk_max_ack_backlog)
wake_up_interruptible_all(&u->peer_wait);
- sk->max_ack_backlog=backlog;
- sk->state=TCP_LISTEN;
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_state = TCP_LISTEN;
/* set credentials so connect can copy them */
- sk->peercred.pid = current->pid;
- sk->peercred.uid = current->euid;
- sk->peercred.gid = current->egid;
+ sk->sk_peercred.pid = current->pid;
+ sk->sk_peercred.uid = current->euid;
+ sk->sk_peercred.gid = current->egid;
err = 0;
out_unlock:
@@ -495,10 +495,9 @@ static struct sock * unix_create1(struct socket *sock)
sock_init_data(sock,sk);
sk_set_owner(sk, THIS_MODULE);
- sk->write_space = unix_write_space;
-
- sk->max_ack_backlog = sysctl_unix_max_dgram_qlen;
- sk->destruct = unix_sock_destructor;
+ sk->sk_write_space = unix_write_space;
+ sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen;
+ sk->sk_destruct = unix_sock_destructor;
u = unix_sk(sk);
u->dentry = NULL;
u->mnt = NULL;
@@ -589,7 +588,7 @@ retry:
yield();
goto retry;
}
- addr->hash ^= sk->type;
+ addr->hash ^= sk->sk_type;
__unix_remove_socket(sk);
u->addr = addr;
@@ -623,13 +622,13 @@ static unix_socket *unix_find_other(struct sockaddr_un *sunname, int len,
if (!u)
goto put_fail;
- if (u->type == type)
+ if (u->sk_type == type)
update_atime(nd.dentry->d_inode);
path_release(&nd);
err=-EPROTOTYPE;
- if (u->type != type) {
+ if (u->sk_type != type) {
sock_put(u);
goto fail;
}
@@ -693,7 +692,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
memcpy(addr->name, sunaddr, addr_len);
addr->len = addr_len;
- addr->hash = hash^sk->type;
+ addr->hash = hash ^ sk->sk_type;
atomic_set(&addr->refcnt, 1);
if (sunaddr->sun_path[0]) {
@@ -736,7 +735,8 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
/*
* All right, let's create it.
*/
- mode = S_IFSOCK | (SOCK_INODE(sock)->i_mode & ~current->fs->umask);
+ mode = S_IFSOCK |
+ (SOCK_INODE(sock)->i_mode & ~current->fs->umask);
err = vfs_mknod(nd.dentry->d_inode, dentry, mode, 0);
if (err)
goto out_mknod_dput;
@@ -752,7 +752,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (!sunaddr->sun_path[0]) {
err = -EADDRINUSE;
if (__unix_find_socket_byname(sunaddr, addr_len,
- sk->type, hash)) {
+ sk->sk_type, hash)) {
unix_release_addr(addr);
goto out_unlock;
}
@@ -818,7 +818,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
if (!unix_may_send(sk, other))
goto out_unlock;
- err = security_unix_may_send(sk->socket, other->socket);
+ err = security_unix_may_send(sk->sk_socket, other->sk_socket);
if (err)
goto out_unlock;
@@ -863,8 +863,9 @@ static long unix_wait_for_peer(unix_socket *other, long timeo)
prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
sched = !sock_flag(other, SOCK_DEAD) &&
- !(other->shutdown & RCV_SHUTDOWN) &&
- skb_queue_len(&other->receive_queue) > other->max_ack_backlog;
+ !(other->sk_shutdown & RCV_SHUTDOWN) &&
+ (skb_queue_len(&other->sk_receive_queue) >
+ other->sk_max_ack_backlog);
unix_state_runlock(other);
@@ -918,7 +919,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
restart:
/* Find listening sock. */
- other=unix_find_other(sunaddr, addr_len, sk->type, hash, &err);
+ other = unix_find_other(sunaddr, addr_len, sk->sk_type, hash, &err);
if (!other)
goto out;
@@ -933,10 +934,11 @@ restart:
}
err = -ECONNREFUSED;
- if (other->state != TCP_LISTEN)
+ if (other->sk_state != TCP_LISTEN)
goto out_unlock;
- if (skb_queue_len(&other->receive_queue) > other->max_ack_backlog) {
+ if (skb_queue_len(&other->sk_receive_queue) >
+ other->sk_max_ack_backlog) {
err = -EAGAIN;
if (!timeo)
goto out_unlock;
@@ -961,7 +963,7 @@ restart:
Well, and we have to recheck the state after socket locked.
*/
- st = sk->state;
+ st = sk->sk_state;
switch (st) {
case TCP_CLOSE:
@@ -978,14 +980,14 @@ restart:
unix_state_wlock(sk);
- if (sk->state != st) {
+ if (sk->sk_state != st) {
unix_state_wunlock(sk);
unix_state_runlock(other);
sock_put(other);
goto restart;
}
- err = security_unix_stream_connect(sock, other->socket, newsk);
+ err = security_unix_stream_connect(sock, other->sk_socket, newsk);
if (err) {
unix_state_wunlock(sk);
goto out_unlock;
@@ -994,14 +996,14 @@ restart:
/* The way is open! Fastly set all the necessary fields... */
sock_hold(sk);
- unix_peer(newsk)=sk;
- newsk->state=TCP_ESTABLISHED;
- newsk->type=SOCK_STREAM;
- newsk->peercred.pid = current->pid;
- newsk->peercred.uid = current->euid;
- newsk->peercred.gid = current->egid;
+ unix_peer(newsk) = sk;
+ newsk->sk_state = TCP_ESTABLISHED;
+ newsk->sk_type = SOCK_STREAM;
+ newsk->sk_peercred.pid = current->pid;
+ newsk->sk_peercred.uid = current->euid;
+ newsk->sk_peercred.gid = current->egid;
newu = unix_sk(newsk);
- newsk->sleep = &newu->peer_wait;
+ newsk->sk_sleep = &newu->peer_wait;
otheru = unix_sk(other);
/* copy address information from listening to new sock*/
@@ -1015,24 +1017,24 @@ restart:
}
/* Set credentials */
- sk->peercred = other->peercred;
+ sk->sk_peercred = other->sk_peercred;
sock_hold(newsk);
- unix_peer(sk)=newsk;
- sock->state=SS_CONNECTED;
- sk->state=TCP_ESTABLISHED;
+ unix_peer(sk) = newsk;
+ sock->state = SS_CONNECTED;
+ sk->sk_state = TCP_ESTABLISHED;
unix_state_wunlock(sk);
/* take ten and and send info to listening sock */
- spin_lock(&other->receive_queue.lock);
- __skb_queue_tail(&other->receive_queue,skb);
+ spin_lock(&other->sk_receive_queue.lock);
+ __skb_queue_tail(&other->sk_receive_queue, skb);
/* Undo artificially decreased inflight after embrion
* is installed to listening socket. */
atomic_inc(&newu->inflight);
- spin_unlock(&other->receive_queue.lock);
+ spin_unlock(&other->sk_receive_queue.lock);
unix_state_runlock(other);
- other->data_ready(other, 0);
+ other->sk_data_ready(other, 0);
sock_put(other);
return 0;
@@ -1059,16 +1061,15 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb)
sock_hold(skb);
unix_peer(ska)=skb;
unix_peer(skb)=ska;
- ska->peercred.pid = skb->peercred.pid = current->pid;
- ska->peercred.uid = skb->peercred.uid = current->euid;
- ska->peercred.gid = skb->peercred.gid = current->egid;
-
- if (ska->type != SOCK_DGRAM)
- {
- ska->state=TCP_ESTABLISHED;
- skb->state=TCP_ESTABLISHED;
- socka->state=SS_CONNECTED;
- sockb->state=SS_CONNECTED;
+ ska->sk_peercred.pid = skb->sk_peercred.pid = current->pid;
+ ska->sk_peercred.uid = skb->sk_peercred.uid = current->euid;
+ ska->sk_peercred.gid = skb->sk_peercred.gid = current->egid;
+
+ if (ska->sk_type != SOCK_DGRAM) {
+ ska->sk_state = TCP_ESTABLISHED;
+ skb->sk_state = TCP_ESTABLISHED;
+ socka->state = SS_CONNECTED;
+ sockb->state = SS_CONNECTED;
}
return 0;
}
@@ -1085,7 +1086,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
goto out;
err = -EINVAL;
- if (sk->state!=TCP_LISTEN)
+ if (sk->sk_state != TCP_LISTEN)
goto out;
/* If socket state is TCP_LISTEN it cannot change (for now...),
@@ -1232,7 +1233,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
goto out;
err = -EMSGSIZE;
- if ((unsigned)len > sk->sndbuf - 32)
+ if ((unsigned)len > sk->sk_sndbuf - 32)
goto out;
skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
@@ -1256,7 +1257,8 @@ restart:
if (sunaddr == NULL)
goto out_free;
- other = unix_find_other(sunaddr, namelen, sk->type, hash, &err);
+ other = unix_find_other(sunaddr, namelen, sk->sk_type,
+ hash, &err);
if (other==NULL)
goto out_free;
}
@@ -1294,15 +1296,16 @@ restart:
}
err = -EPIPE;
- if (other->shutdown&RCV_SHUTDOWN)
+ if (other->sk_shutdown & RCV_SHUTDOWN)
goto out_unlock;
- err = security_unix_may_send(sk->socket, other->socket);
+ err = security_unix_may_send(sk->sk_socket, other->sk_socket);
if (err)
goto out_unlock;
if (unix_peer(other) != sk &&
- skb_queue_len(&other->receive_queue) > other->max_ack_backlog) {
+ (skb_queue_len(&other->sk_receive_queue) >
+ other->sk_max_ack_backlog)) {
if (!timeo) {
err = -EAGAIN;
goto out_unlock;
@@ -1317,9 +1320,9 @@ restart:
goto restart;
}
- skb_queue_tail(&other->receive_queue, skb);
+ skb_queue_tail(&other->sk_receive_queue, skb);
unix_state_runlock(other);
- other->data_ready(other, len);
+ other->sk_data_ready(other, len);
sock_put(other);
scm_destroy(siocb->scm);
return len;
@@ -1359,7 +1362,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
goto out_err;
if (msg->msg_namelen) {
- err = (sk->state==TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP);
+ err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
goto out_err;
} else {
sunaddr = NULL;
@@ -1369,7 +1372,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
goto out_err;
}
- if (sk->shutdown&SEND_SHUTDOWN)
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
goto pipe_err;
while(sent < len)
@@ -1382,8 +1385,8 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
size=len-sent;
/* Keep two messages in the pipe so it schedules better */
- if (size > sk->sndbuf/2 - 64)
- size = sk->sndbuf/2 - 64;
+ if (size > sk->sk_sndbuf / 2 - 64)
+ size = sk->sk_sndbuf / 2 - 64;
if (size > SKB_MAX_ALLOC)
size = SKB_MAX_ALLOC;
@@ -1418,12 +1421,12 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
unix_state_rlock(other);
if (sock_flag(other, SOCK_DEAD) ||
- (other->shutdown & RCV_SHUTDOWN))
+ (other->sk_shutdown & RCV_SHUTDOWN))
goto pipe_err_free;
- skb_queue_tail(&other->receive_queue, skb);
+ skb_queue_tail(&other->sk_receive_queue, skb);
unix_state_runlock(other);
- other->data_ready(other, size);
+ other->sk_data_ready(other, size);
sent+=size;
}
sock_put(other);
@@ -1544,23 +1547,23 @@ static long unix_stream_data_wait(unix_socket * sk, long timeo)
unix_state_rlock(sk);
for (;;) {
- prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
- if (skb_queue_len(&sk->receive_queue) ||
- sk->err ||
- (sk->shutdown & RCV_SHUTDOWN) ||
+ if (skb_queue_len(&sk->sk_receive_queue) ||
+ sk->sk_err ||
+ (sk->sk_shutdown & RCV_SHUTDOWN) ||
signal_pending(current) ||
!timeo)
break;
- set_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
unix_state_runlock(sk);
timeo = schedule_timeout(timeo);
unix_state_rlock(sk);
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
+ clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
}
- finish_wait(sk->sleep, &wait);
+ finish_wait(sk->sk_sleep, &wait);
unix_state_runlock(sk);
return timeo;
}
@@ -1583,7 +1586,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
long timeo;
err = -EINVAL;
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
goto out;
err = -EOPNOTSUPP;
@@ -1611,7 +1614,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
int chunk;
struct sk_buff *skb;
- skb=skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (skb==NULL)
{
if (copied >= target)
@@ -1623,7 +1626,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
if ((err = sock_error(sk)) != 0)
break;
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
err = -EAGAIN;
if (!timeo)
@@ -1643,7 +1646,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
if (check_creds) {
/* Never glue messages from different writers */
if (memcmp(UNIXCREDS(skb), &siocb->scm->creds, sizeof(siocb->scm->creds)) != 0) {
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
} else {
@@ -1661,7 +1664,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
chunk = min_t(unsigned int, skb->len, size);
if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
if (copied == 0)
copied = -EFAULT;
break;
@@ -1680,7 +1683,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
/* put the skb back if we didn't use it up.. */
if (skb->len)
{
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
@@ -1697,7 +1700,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
/* put message back and return */
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
} while (size);
@@ -1717,14 +1720,14 @@ static int unix_shutdown(struct socket *sock, int mode)
if (mode) {
unix_state_wlock(sk);
- sk->shutdown |= mode;
+ sk->sk_shutdown |= mode;
other=unix_peer(sk);
if (other)
sock_hold(other);
unix_state_wunlock(sk);
- sk->state_change(sk);
+ sk->sk_state_change(sk);
- if (other && sk->type == SOCK_STREAM) {
+ if (other && sk->sk_type == SOCK_STREAM) {
int peer_mode = 0;
if (mode&RCV_SHUTDOWN)
@@ -1732,15 +1735,15 @@ static int unix_shutdown(struct socket *sock, int mode)
if (mode&SEND_SHUTDOWN)
peer_mode |= RCV_SHUTDOWN;
unix_state_wlock(other);
- other->shutdown |= peer_mode;
+ other->sk_shutdown |= peer_mode;
unix_state_wunlock(other);
- other->state_change(other);
- read_lock(&other->callback_lock);
+ other->sk_state_change(other);
+ read_lock(&other->sk_callback_lock);
if (peer_mode == SHUTDOWN_MASK)
sk_wake_async(other,1,POLL_HUP);
else if (peer_mode & RCV_SHUTDOWN)
sk_wake_async(other,1,POLL_IN);
- read_unlock(&other->callback_lock);
+ read_unlock(&other->sk_callback_lock);
}
if (other)
sock_put(other);
@@ -1757,21 +1760,22 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
switch(cmd)
{
case SIOCOUTQ:
- amount = atomic_read(&sk->wmem_alloc);
+ amount = atomic_read(&sk->sk_wmem_alloc);
err = put_user(amount, (int *)arg);
break;
case SIOCINQ:
{
struct sk_buff *skb;
- if (sk->state==TCP_LISTEN) {
+ if (sk->sk_state == TCP_LISTEN) {
err = -EINVAL;
break;
}
- spin_lock(&sk->receive_queue.lock);
- if((skb=skb_peek(&sk->receive_queue))!=NULL)
+ spin_lock(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
+ if (skb)
amount=skb->len;
- spin_unlock(&sk->receive_queue.lock);
+ spin_unlock(&sk->sk_receive_queue.lock);
err = put_user(amount, (int *)arg);
break;
}
@@ -1788,21 +1792,22 @@ static unsigned int unix_poll(struct file * file, struct socket *sock, poll_tabl
struct sock *sk = sock->sk;
unsigned int mask;
- poll_wait(file, sk->sleep, wait);
+ poll_wait(file, sk->sk_sleep, wait);
mask = 0;
/* exceptional events? */
- if (sk->err)
+ if (sk->sk_err)
mask |= POLLERR;
- if (sk->shutdown == SHUTDOWN_MASK)
+ if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
/* readable? */
- if (!skb_queue_empty(&sk->receive_queue) || (sk->shutdown&RCV_SHUTDOWN))
+ if (!skb_queue_empty(&sk->sk_receive_queue) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN))
mask |= POLLIN | POLLRDNORM;
/* Connection-based need to check for termination and startup */
- if (sk->type == SOCK_STREAM && sk->state==TCP_CLOSE)
+ if (sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_CLOSE)
mask |= POLLHUP;
/*
@@ -1837,13 +1842,13 @@ static int unix_read_proc(char *buffer, char **start, off_t offset,
len+=sprintf(buffer+len,"%p: %08X %08X %08X %04X %02X %5lu",
s,
- atomic_read(&s->refcnt),
+ atomic_read(&s->sk_refcnt),
0,
- s->state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
- s->type,
- s->socket ?
- (s->state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
- (s->state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
+ s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
+ s->sk_type,
+ s->sk_socket ?
+ (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
+ (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
sock_i_ino(s));
if (u->addr) {
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 8dd0d6f2e12b..86c6998d35ae 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -219,8 +219,8 @@ void unix_gc(void)
* negative inflight counter to close race window.
* It is trick of course and dirty one.
*/
- if(s->socket && s->socket->file)
- open_count = file_count(s->socket->file);
+ if (s->sk_socket && s->sk_socket->file)
+ open_count = file_count(s->sk_socket->file);
if (open_count > atomic_read(&unix_sk(s)->inflight))
maybe_unmark_and_push(s);
}
@@ -234,15 +234,14 @@ void unix_gc(void)
unix_socket *x = pop_stack();
unix_socket *sk;
- spin_lock(&x->receive_queue.lock);
- skb=skb_peek(&x->receive_queue);
+ spin_lock(&x->sk_receive_queue.lock);
+ skb = skb_peek(&x->sk_receive_queue);
/*
* Loop through all but first born
*/
- while(skb && skb != (struct sk_buff *)&x->receive_queue)
- {
+ while (skb && skb != (struct sk_buff *)&x->sk_receive_queue) {
/*
* Do we have file descriptors ?
*/
@@ -266,12 +265,11 @@ void unix_gc(void)
}
}
/* We have to scan not-yet-accepted ones too */
- if (x->state == TCP_LISTEN) {
+ if (x->sk_state == TCP_LISTEN)
maybe_unmark_and_push(skb->sk);
- }
skb=skb->next;
}
- spin_unlock(&x->receive_queue.lock);
+ spin_unlock(&x->sk_receive_queue.lock);
sock_put(x);
}
@@ -283,10 +281,11 @@ void unix_gc(void)
if (u->gc_tree == GC_ORPHAN) {
struct sk_buff *nextsk;
- spin_lock(&s->receive_queue.lock);
- skb=skb_peek(&s->receive_queue);
- while(skb && skb != (struct sk_buff *)&s->receive_queue)
- {
+
+ spin_lock(&s->sk_receive_queue.lock);
+ skb = skb_peek(&s->sk_receive_queue);
+ while (skb &&
+ skb != (struct sk_buff *)&s->sk_receive_queue) {
nextsk=skb->next;
/*
* Do we have file descriptors ?
@@ -298,7 +297,7 @@ void unix_gc(void)
}
skb=nextsk;
}
- spin_unlock(&s->receive_queue.lock);
+ spin_unlock(&s->sk_receive_queue.lock);
}
u->gc_tree = GC_ORPHAN;
}
diff --git a/net/wanrouter/af_wanpipe.c b/net/wanrouter/af_wanpipe.c
index 191ea011b9ec..56d8eec95fcb 100644
--- a/net/wanrouter/af_wanpipe.c
+++ b/net/wanrouter/af_wanpipe.c
@@ -84,12 +84,12 @@
* When the user sends a packet via send() system call
* the wanpipe_sendmsg() function is executed.
*
- * Each packet is enqueud into sk->write_queue transmit
+ * Each packet is enqueud into sk->sk_write_queue transmit
* queue. When the packet is enqueued, a delayed transmit
* timer is triggerd which acts as a Bottom Half hander.
*
* wanpipe_delay_transmit() function (BH), dequeues packets
- * from the sk->write_queue transmit queue and sends it
+ * from the sk->sk_write_queue transmit queue and sends it
* to the deriver via dev->hard_start_xmit(skb, dev) function.
* Note, this function is actual a function pointer of if_send()
* routine in the wanpipe driver.
@@ -99,7 +99,7 @@
* In order to provide 100% guaranteed packet delivery,
* an atomic 'packet_sent' counter is implemented. Counter
* is incremented for each packet enqueued
- * into sk->write_queue. Counter is decremented each
+ * into sk->sk_write_queue. Counter is decremented each
* time wanpipe_delayed_transmit() function successfuly
* passes the packet to the driver. Before each send(), a poll
* routine checks the sock resources The maximum value of
@@ -263,16 +263,16 @@ static int wanpipe_rcv(struct sk_buff *skb, struct net_device *dev,
}
break;
case WAN_PACKET_CMD:
- sk->state = chan->state;
+ sk->sk_state = chan->state;
/* Bug fix: update Mar6.
* Do not set the sock lcn number here, since
* cmd is not guaranteed to be executed on the
* board, thus Lcn could be wrong */
- sk->data_ready(sk,skb->len);
+ sk->sk_data_ready(sk, skb->len);
kfree_skb(skb);
break;
case WAN_PACKET_ERR:
- sk->state = chan->state;
+ sk->sk_state = chan->state;
if (sock_queue_err_skb(sk,skb)<0){
return -ENOMEM;
}
@@ -284,11 +284,11 @@ static int wanpipe_rcv(struct sk_buff *skb, struct net_device *dev,
}
//??????????????????????
-// if (sk->state == WANSOCK_DISCONNECTED){
-// if (sk->zapped){
+// if (sk->sk_state == WANSOCK_DISCONNECTED){
+// if (sk->sk_zapped) {
// //printk(KERN_INFO "wansock: Disconnected, killing early\n");
// wanpipe_unlink_driver(sk);
-// sk->bound_dev_if = 0;
+// sk->sk_bound_dev_if = 0;
// }
// }
@@ -359,7 +359,7 @@ static int wanpipe_listen_rcv (struct sk_buff *skb, struct sock *sk)
/* Initialize the new sock structure
*/
- newsk->bound_dev_if = dev->ifindex;
+ newsk->sk_bound_dev_if = dev->ifindex;
newwp = wp_sk(newsk);
newwp->card = wp->card;
@@ -395,7 +395,7 @@ static int wanpipe_listen_rcv (struct sk_buff *skb, struct sock *sk)
chan->lcn = mbox_ptr->cmd.lcn;
card->u.x.svc_to_dev_map[(chan->lcn%MAX_X25_LCN)] = dev;
- newsk->zapped=0;
+ newsk->sk_zapped = 0;
newwp->num = htons(X25_PROT);
if (wanpipe_do_bind(newsk, dev, newwp->num)) {
@@ -403,7 +403,7 @@ static int wanpipe_listen_rcv (struct sk_buff *skb, struct sock *sk)
release_device(dev);
return -EINVAL;
}
- newsk->state = WANSOCK_CONNECTING;
+ newsk->sk_state = WANSOCK_CONNECTING;
/* Fill in the standard sock address info */
@@ -416,23 +416,23 @@ static int wanpipe_listen_rcv (struct sk_buff *skb, struct sock *sk)
sll->sll_halen = 0;
skb->dev = dev;
- sk->ack_backlog++;
+ sk->sk_ack_backlog++;
/* We must do this manually, since the sock_queue_rcv_skb()
* function sets the skb->dev to NULL. However, we use
* the dev field in the accept function.*/
- if (atomic_read(&sk->rmem_alloc) + skb->truesize >=
- (unsigned)sk->rcvbuf){
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf) {
wanpipe_unlink_driver(newsk);
wanpipe_kill_sock_irq (newsk);
- --sk->ack_backlog;
+ --sk->sk_ack_backlog;
return -ENOMEM;
}
skb_set_owner_r(skb, sk);
- skb_queue_tail(&sk->receive_queue, skb);
- sk->data_ready(sk,skb->len);
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
return 0;
}
@@ -456,22 +456,22 @@ static struct sock *wanpipe_make_new(struct sock *osk)
{
struct sock *sk;
- if (osk->type != SOCK_RAW)
+ if (osk->sk_type != SOCK_RAW)
return NULL;
if ((sk = wanpipe_alloc_socket()) == NULL)
return NULL;
- sk->type = osk->type;
- sk->socket = osk->socket;
- sk->priority = osk->priority;
- sk->protocol = osk->protocol;
+ sk->sk_type = osk->sk_type;
+ sk->sk_socket = osk->sk_socket;
+ sk->sk_priority = osk->sk_priority;
+ sk->sk_protocol = osk->sk_protocol;
wp_sk(sk)->num = wp_sk(osk)->num;
- sk->rcvbuf = osk->rcvbuf;
- sk->sndbuf = osk->sndbuf;
- sk->debug = osk->debug;
- sk->state = WANSOCK_CONNECTING;
- sk->sleep = osk->sleep;
+ sk->sk_rcvbuf = osk->sk_rcvbuf;
+ sk->sk_sndbuf = osk->sk_sndbuf;
+ sk->sk_debug = osk->sk_debug;
+ sk->sk_state = WANSOCK_CONNECTING;
+ sk->sk_sleep = osk->sk_sleep;
return sk;
}
@@ -521,17 +521,17 @@ static struct sock *wanpipe_alloc_socket(void)
*
* This function implements a sendto() system call,
* for AF_WANPIPE socket family.
- * During socket bind() sk->bound_dev_if is initialized
+ * During socket bind() sk->sk_bound_dev_if is initialized
* to a correct network device. This number is used
* to find a network device to which the packet should
* be passed to.
*
- * Each packet is queued into sk->write_queue and
+ * Each packet is queued into sk->sk_write_queue and
* delayed transmit bottom half handler is marked for
* execution.
*
* A socket must be in WANSOCK_CONNECTED state before
- * a packet is queued into sk->write_queue.
+ * a packet is queued into sk->sk_write_queue.
*===========================================================*/
static int wanpipe_sendmsg(struct kiocb *iocb, struct socket *sock,
@@ -547,10 +547,10 @@ static int wanpipe_sendmsg(struct kiocb *iocb, struct socket *sock,
int ifindex, err, reserve = 0;
- if (!sk->zapped)
+ if (!sk->sk_zapped)
return -ENETDOWN;
- if (sk->state != WANSOCK_CONNECTED)
+ if (sk->sk_state != WANSOCK_CONNECTED)
return -ENOTCONN;
if (msg->msg_flags&~MSG_DONTWAIT)
@@ -564,7 +564,7 @@ static int wanpipe_sendmsg(struct kiocb *iocb, struct socket *sock,
wp = wp_sk(sk);
if (saddr == NULL) {
- ifindex = sk->bound_dev_if;
+ ifindex = sk->sk_bound_dev_if;
proto = wp->num;
addr = NULL;
@@ -573,7 +573,7 @@ static int wanpipe_sendmsg(struct kiocb *iocb, struct socket *sock,
return -EINVAL;
}
- ifindex = sk->bound_dev_if;
+ ifindex = sk->sk_bound_dev_if;
proto = saddr->sll_protocol;
addr = saddr->sll_addr;
}
@@ -619,19 +619,20 @@ static int wanpipe_sendmsg(struct kiocb *iocb, struct socket *sock,
skb->protocol = proto;
skb->dev = dev;
- skb->priority = sk->priority;
+ skb->priority = sk->sk_priority;
skb->pkt_type = WAN_PACKET_DATA;
err = -ENETDOWN;
if (!(dev->flags & IFF_UP))
goto out_free;
- if (atomic_read(&sk->wmem_alloc) + skb->truesize > (unsigned int)sk->sndbuf){
+ if (atomic_read(&sk->sk_wmem_alloc) + skb->truesize >
+ (unsigned int)sk->sk_sndbuf){
kfree_skb(skb);
return -ENOBUFS;
}
- skb_queue_tail(&sk->write_queue,skb);
+ skb_queue_tail(&sk->sk_write_queue,skb);
atomic_inc(&wp->packet_sent);
if (!(test_and_set_bit(0, &wp->timer))){
@@ -652,7 +653,7 @@ out_unlock:
* wanpipe_delayed_tarnsmit
*
* Transmit bottom half handler. It dequeues packets
- * from sk->write_queue and passes them to the
+ * from sk->sk_write_queue and passes them to the
* driver. If the driver is busy, the packet is
* re-enqueued.
*
@@ -675,7 +676,7 @@ static void wanpipe_delayed_transmit (unsigned long data)
return;
}
- if (sk->state != WANSOCK_CONNECTED || !sk->zapped){
+ if (sk->sk_state != WANSOCK_CONNECTED || !sk->sk_zapped) {
clear_bit(0, &wp->timer);
DBG_PRINTK(KERN_INFO "wansock: Tx Timer, State not CONNECTED\n");
return;
@@ -701,13 +702,13 @@ static void wanpipe_delayed_transmit (unsigned long data)
}
/* Check for a packet in the fifo and send */
- if ((skb=skb_dequeue(&sk->write_queue)) != NULL){
+ if ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL){
if (dev->hard_start_xmit(skb, dev) != 0){
/* Driver failed to transmit, re-enqueue
* the packet and retry again later */
- skb_queue_head(&sk->write_queue,skb);
+ skb_queue_head(&sk->sk_write_queue,skb);
clear_bit(0,&wanpipe_tx_critical);
return;
}else{
@@ -718,11 +719,11 @@ static void wanpipe_delayed_transmit (unsigned long data)
*/
atomic_dec(&wp->packet_sent);
- if (skb_peek(&sk->write_queue) == NULL){
+ if (skb_peek(&sk->sk_write_queue) == NULL) {
/* If there is nothing to send, kick
* the poll routine, which will trigger
* the application to send more data */
- sk->data_ready(sk,0);
+ sk->sk_data_ready(sk, 0);
clear_bit(0, &wp->timer);
}else{
/* Reschedule as fast as possible */
@@ -763,10 +764,10 @@ static int execute_command(struct sock *sk, unsigned char cmd, unsigned int fla
int err=0;
DECLARE_WAITQUEUE(wait, current);
- dev = dev_get_by_index(sk->bound_dev_if);
+ dev = dev_get_by_index(sk->sk_bound_dev_if);
if (dev == NULL){
printk(KERN_INFO "wansock: Exec failed no dev %i\n",
- sk->bound_dev_if);
+ sk->sk_bound_dev_if);
return -ENODEV;
}
dev_put(dev);
@@ -799,7 +800,7 @@ static int execute_command(struct sock *sk, unsigned char cmd, unsigned int fla
atomic_set(&chan->command, cmd);
}
- add_wait_queue(sk->sleep,&wait);
+ add_wait_queue(sk->sk_sleep,&wait);
current->state = TASK_INTERRUPTIBLE;
for (;;){
if (((mbox_cmd_t*)wp->mbox)->cmd.result != 0x7F) {
@@ -813,7 +814,7 @@ static int execute_command(struct sock *sk, unsigned char cmd, unsigned int fla
schedule();
}
current->state = TASK_RUNNING;
- remove_wait_queue(sk->sleep,&wait);
+ remove_wait_queue(sk->sk_sleep,&wait);
return err;
}
@@ -830,27 +831,29 @@ static void wanpipe_destroy_timer(unsigned long data)
struct sock *sk=(struct sock *)data;
wanpipe_opt *wp = wp_sk(sk);
- if ((!atomic_read(&sk->wmem_alloc) && !atomic_read(&sk->rmem_alloc)) ||
+ if ((!atomic_read(&sk->sk_wmem_alloc) &&
+ !atomic_read(&sk->sk_rmem_alloc)) ||
(++wp->force == 5)) {
- if (atomic_read(&sk->wmem_alloc) || atomic_read(&sk->rmem_alloc))
+ if (atomic_read(&sk->sk_wmem_alloc) ||
+ atomic_read(&sk->sk_rmem_alloc))
printk(KERN_INFO "wansock: Warning, Packet Discarded due to sock shutdown!\n");
kfree(wp);
wp_sk(sk) = NULL;
- if (atomic_read(&sk->refcnt) != 1){
- atomic_set(&sk->refcnt,1);
+ if (atomic_read(&sk->sk_refcnt) != 1) {
+ atomic_set(&sk->sk_refcnt, 1);
DBG_PRINTK(KERN_INFO "wansock: Error, wrong reference count: %i ! :delay.\n",
- atomic_read(&sk->refcnt));
+ atomic_read(&sk->sk_refcnt));
}
sock_put(sk);
atomic_dec(&wanpipe_socks_nr);
return;
}
- sk->timer.expires=jiffies+5*HZ;
- add_timer(&sk->timer);
+ sk->sk_timer.expires = jiffies + 5 * HZ;
+ add_timer(&sk->sk_timer);
printk(KERN_INFO "wansock: packet sk destroy delayed\n");
}
@@ -866,11 +869,11 @@ static void wanpipe_unlink_driver (struct sock *sk)
struct net_device *dev;
wanpipe_common_t *chan=NULL;
- sk->zapped=0;
- sk->state = WANSOCK_DISCONNECTED;
+ sk->sk_zapped = 0;
+ sk->sk_state = WANSOCK_DISCONNECTED;
wp_sk(sk)->dev = NULL;
- dev = dev_get_by_index(sk->bound_dev_if);
+ dev = dev_get_by_index(sk->sk_bound_dev_if);
if (!dev){
printk(KERN_INFO "wansock: No dev on release\n");
return;
@@ -915,7 +918,7 @@ static void wanpipe_link_driver(struct net_device *dev, struct sock *sk)
chan->mbox = wp->mbox;
chan->tx_timer = &wp->tx_timer;
wp->dev = dev;
- sk->zapped = 1;
+ sk->sk_zapped = 1;
clear_bit(0,&chan->common_critical);
}
@@ -966,23 +969,23 @@ static int wanpipe_release(struct socket *sock)
*/
if (wp->num == htons(X25_PROT) &&
- sk->state != WANSOCK_DISCONNECTED && sk->zapped) {
- struct net_device *dev = dev_get_by_index(sk->bound_dev_if);
+ sk->sk_state != WANSOCK_DISCONNECTED && sk->sk_zapped) {
+ struct net_device *dev = dev_get_by_index(sk->sk_bound_dev_if);
wanpipe_common_t *chan;
if (dev){
chan=dev->priv;
atomic_set(&chan->disconnect,1);
DBG_PRINTK(KERN_INFO "wansock: Sending Clear Indication %i\n",
- sk->state);
+ sk->sk_state);
dev_put(dev);
}
}
set_bit(1,&wanpipe_tx_critical);
write_lock(&wanpipe_sklist_lock);
- for (skp = &wanpipe_sklist; *skp; skp = &(*skp)->next) {
+ for (skp = &wanpipe_sklist; *skp; skp = &(*skp)->sk_next) {
if (*skp == sk) {
- *skp = sk->next;
+ *skp = sk->sk_next;
__sock_put(sk);
break;
}
@@ -999,35 +1002,37 @@ static int wanpipe_release(struct socket *sock)
* Now the socket is dead. No more input will appear.
*/
- sk->state_change(sk); /* It is useless. Just for sanity. */
+ sk->sk_state_change(sk); /* It is useless. Just for sanity. */
sock->sk = NULL;
- sk->socket = NULL;
+ sk->sk_socket = NULL;
sock_set_flag(sk, SOCK_DEAD);
/* Purge queues */
- skb_queue_purge(&sk->receive_queue);
- skb_queue_purge(&sk->write_queue);
- skb_queue_purge(&sk->error_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+ skb_queue_purge(&sk->sk_error_queue);
- if (atomic_read(&sk->rmem_alloc) || atomic_read(&sk->wmem_alloc)) {
- del_timer(&sk->timer);
+ if (atomic_read(&sk->sk_rmem_alloc) ||
+ atomic_read(&sk->sk_wmem_alloc)) {
+ del_timer(&sk->sk_timer);
printk(KERN_INFO "wansock: Killing in Timer R %i , W %i\n",
- atomic_read(&sk->rmem_alloc),atomic_read(&sk->wmem_alloc));
- sk->timer.data=(unsigned long)sk;
- sk->timer.expires=jiffies+HZ;
- sk->timer.function=wanpipe_destroy_timer;
- add_timer(&sk->timer);
+ atomic_read(&sk->sk_rmem_alloc),
+ atomic_read(&sk->sk_wmem_alloc));
+ sk->sk_timer.data = (unsigned long)sk;
+ sk->sk_timer.expires = jiffies + HZ;
+ sk->sk_timer.function = wanpipe_destroy_timer;
+ add_timer(&sk->sk_timer);
return 0;
}
kfree(wp);
wp_sk(sk) = NULL;
- if (atomic_read(&sk->refcnt) != 1){
+ if (atomic_read(&sk->sk_refcnt) != 1) {
DBG_PRINTK(KERN_INFO "wansock: Error, wrong reference count: %i !:release.\n",
- atomic_read(&sk->refcnt));
- atomic_set(&sk->refcnt,1);
+ atomic_read(&sk->sk_refcnt));
+ atomic_set(&sk->sk_refcnt, 1);
}
sock_put(sk);
atomic_dec(&wanpipe_socks_nr);
@@ -1046,10 +1051,10 @@ static int wanpipe_release(struct socket *sock)
static void check_write_queue(struct sock *sk)
{
- if (sk->state != WANSOCK_CONNECTED)
+ if (sk->sk_state != WANSOCK_CONNECTED)
return;
- if (!atomic_read(&sk->wmem_alloc))
+ if (!atomic_read(&sk->sk_wmem_alloc))
return;
printk(KERN_INFO "wansock: MAJOR ERROR, Data lost on sock release !!!\n");
@@ -1071,8 +1076,9 @@ static void release_driver(struct sock *sk)
struct sk_buff *skb=NULL;
struct sock *deadsk=NULL;
- if (sk->state == WANSOCK_LISTEN || sk->state == WANSOCK_BIND_LISTEN){
- while ((skb=skb_dequeue(&sk->receive_queue))!=NULL){
+ if (sk->sk_state == WANSOCK_LISTEN ||
+ sk->sk_state == WANSOCK_BIND_LISTEN) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if ((deadsk = get_newsk_from_skb(skb))){
DBG_PRINTK (KERN_INFO "wansock: RELEASE: FOUND DEAD SOCK\n");
sock_set_flag(deadsk, SOCK_DEAD);
@@ -1080,15 +1086,15 @@ static void release_driver(struct sock *sk)
}
kfree_skb(skb);
}
- if (sk->zapped)
+ if (sk->sk_zapped)
wanpipe_unlink_card(sk);
}else{
- if (sk->zapped)
+ if (sk->sk_zapped)
wanpipe_unlink_driver(sk);
}
- sk->state = WANSOCK_DISCONNECTED;
- sk->bound_dev_if = 0;
- sk->zapped=0;
+ sk->sk_state = WANSOCK_DISCONNECTED;
+ sk->sk_bound_dev_if = 0;
+ sk->sk_zapped = 0;
wp = wp_sk(sk);
if (wp && wp->mbox) {
@@ -1108,11 +1114,11 @@ static void release_driver(struct sock *sk)
static void start_cleanup_timer (struct sock *sk)
{
- del_timer(&sk->timer);
- sk->timer.data = (unsigned long)sk;
- sk->timer.expires = jiffies + HZ;
- sk->timer.function = wanpipe_kill_sock_timer;
- add_timer(&sk->timer);
+ del_timer(&sk->sk_timer);
+ sk->sk_timer.data = (unsigned long)sk;
+ sk->sk_timer.expires = jiffies + HZ;
+ sk->sk_timer.function = wanpipe_kill_sock_timer;
+ add_timer(&sk->sk_timer);
}
@@ -1137,15 +1143,15 @@ static void wanpipe_kill_sock_timer (unsigned long data)
* appropriate locks */
if (test_bit(1,&wanpipe_tx_critical)){
- sk->timer.expires=jiffies+10;
- add_timer(&sk->timer);
+ sk->sk_timer.expires = jiffies + 10;
+ add_timer(&sk->sk_timer);
return;
}
write_lock(&wanpipe_sklist_lock);
- for (skp = &wanpipe_sklist; *skp; skp = &(*skp)->next) {
+ for (skp = &wanpipe_sklist; *skp; skp = &(*skp)->sk_next) {
if (*skp == sk) {
- *skp = sk->next;
+ *skp = sk->sk_next;
__sock_put(sk);
break;
}
@@ -1154,8 +1160,8 @@ static void wanpipe_kill_sock_timer (unsigned long data)
if (wp_sk(sk)->num == htons(X25_PROT) &&
- sk->state != WANSOCK_DISCONNECTED){
- struct net_device *dev = dev_get_by_index(sk->bound_dev_if);
+ sk->sk_state != WANSOCK_DISCONNECTED) {
+ struct net_device *dev = dev_get_by_index(sk->sk_bound_dev_if);
wanpipe_common_t *chan;
if (dev){
chan=dev->priv;
@@ -1166,20 +1172,21 @@ static void wanpipe_kill_sock_timer (unsigned long data)
release_driver(sk);
- sk->socket = NULL;
+ sk->sk_socket = NULL;
/* Purge queues */
- skb_queue_purge(&sk->receive_queue);
- skb_queue_purge(&sk->write_queue);
- skb_queue_purge(&sk->error_queue);
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+ skb_queue_purge(&sk->sk_error_queue);
- if (atomic_read(&sk->rmem_alloc) || atomic_read(&sk->wmem_alloc)) {
- del_timer(&sk->timer);
+ if (atomic_read(&sk->sk_rmem_alloc) ||
+ atomic_read(&sk->sk_wmem_alloc)) {
+ del_timer(&sk->sk_timer);
printk(KERN_INFO "wansock: Killing SOCK in Timer\n");
- sk->timer.data=(unsigned long)sk;
- sk->timer.expires=jiffies+HZ;
- sk->timer.function=wanpipe_destroy_timer;
- add_timer(&sk->timer);
+ sk->sk_timer.data = (unsigned long)sk;
+ sk->sk_timer.expires = jiffies + HZ;
+ sk->sk_timer.function = wanpipe_destroy_timer;
+ add_timer(&sk->sk_timer);
return;
}
@@ -1188,10 +1195,10 @@ static void wanpipe_kill_sock_timer (unsigned long data)
wp_sk(sk) = NULL;
}
- if (atomic_read(&sk->refcnt) != 1){
- atomic_set(&sk->refcnt,1);
+ if (atomic_read(&sk->sk_refcnt) != 1) {
+ atomic_set(&sk->sk_refcnt, 1);
DBG_PRINTK(KERN_INFO "wansock: Error, wrong reference count: %i ! :timer.\n",
- atomic_read(&sk->refcnt));
+ atomic_read(&sk->sk_refcnt));
}
sock_put(sk);
atomic_dec(&wanpipe_socks_nr);
@@ -1210,16 +1217,16 @@ static void wanpipe_kill_sock_accept (struct sock *sk)
* appropriate locks */
write_lock(&wanpipe_sklist_lock);
- for (skp = &wanpipe_sklist; *skp; skp = &(*skp)->next) {
+ for (skp = &wanpipe_sklist; *skp; skp = &(*skp)->sk_next) {
if (*skp == sk) {
- *skp = sk->next;
+ *skp = sk->sk_next;
__sock_put(sk);
break;
}
}
write_unlock(&wanpipe_sklist_lock);
- sk->socket = NULL;
+ sk->sk_socket = NULL;
if (wp_sk(sk)) {
@@ -1227,10 +1234,10 @@ static void wanpipe_kill_sock_accept (struct sock *sk)
wp_sk(sk) = NULL;
}
- if (atomic_read(&sk->refcnt) != 1){
- atomic_set(&sk->refcnt,1);
+ if (atomic_read(&sk->sk_refcnt) != 1) {
+ atomic_set(&sk->sk_refcnt, 1);
DBG_PRINTK(KERN_INFO "wansock: Error, wrong reference count: %i ! :timer.\n",
- atomic_read(&sk->refcnt));
+ atomic_read(&sk->sk_refcnt));
}
sock_put(sk);
atomic_dec(&wanpipe_socks_nr);
@@ -1244,17 +1251,17 @@ static void wanpipe_kill_sock_irq (struct sock *sk)
if (!sk)
return;
- sk->socket = NULL;
+ sk->sk_socket = NULL;
if (wp_sk(sk)) {
kfree(wp_sk(sk));
wp_sk(sk) = NULL;
}
- if (atomic_read(&sk->refcnt) != 1){
- atomic_set(&sk->refcnt,1);
+ if (atomic_read(&sk->sk_refcnt) != 1) {
+ atomic_set(&sk->sk_refcnt, 1);
DBG_PRINTK(KERN_INFO "wansock: Error, wrong reference count: %i !:listen.\n",
- atomic_read(&sk->refcnt));
+ atomic_read(&sk->sk_refcnt));
}
sock_put(sk);
atomic_dec(&wanpipe_socks_nr);
@@ -1277,7 +1284,7 @@ static int wanpipe_do_bind(struct sock *sk, struct net_device *dev,
wanpipe_common_t *chan=NULL;
int err=0;
- if (sk->zapped){
+ if (sk->sk_zapped) {
err = -EALREADY;
goto bind_unlock_exit;
}
@@ -1293,29 +1300,29 @@ static int wanpipe_do_bind(struct sock *sk, struct net_device *dev,
if (dev) {
if (dev->flags&IFF_UP) {
chan=dev->priv;
- sk->state = chan->state;
+ sk->sk_state = chan->state;
if (wp->num == htons(X25_PROT) &&
- sk->state != WANSOCK_DISCONNECTED &&
- sk->state != WANSOCK_CONNECTING){
+ sk->sk_state != WANSOCK_DISCONNECTED &&
+ sk->sk_state != WANSOCK_CONNECTING) {
DBG_PRINTK(KERN_INFO
"wansock: Binding to Device not DISCONNECTED %i\n",
- sk->state);
+ sk->sk_state);
release_device(dev);
err = -EAGAIN;
goto bind_unlock_exit;
}
wanpipe_link_driver(dev,sk);
- sk->bound_dev_if = dev->ifindex;
+ sk->sk_bound_dev_if = dev->ifindex;
/* X25 Specific option */
if (wp->num == htons(X25_PROT))
wp_sk(sk)->svc = chan->svc;
} else {
- sk->err = ENETDOWN;
- sk->error_report(sk);
+ sk->sk_err = ENETDOWN;
+ sk->sk_error_report(sk);
release_device(dev);
err = -EINVAL;
}
@@ -1386,7 +1393,7 @@ static int wanpipe_bind(struct socket *sock, struct sockaddr *uaddr, int addr_le
if (sll->sll_protocol)
wp->num = sll->sll_protocol;
- sk->state = WANSOCK_BIND_LISTEN;
+ sk->sk_state = WANSOCK_BIND_LISTEN;
return 0;
}else if (!strcmp(sll->sll_device,"svc_connect")){
@@ -1527,16 +1534,16 @@ static int wanpipe_create(struct socket *sock, int protocol)
if ((sk = wanpipe_alloc_socket()) == NULL)
return -ENOBUFS;
- sk->reuse = 1;
+ sk->sk_reuse = 1;
sock->ops = &wanpipe_ops;
sock_init_data(sock,sk);
- sk->zapped=0;
- sk->family = PF_WANPIPE;
- wp_sk(sk)->num = protocol;
- sk->state = WANSOCK_DISCONNECTED;
- sk->ack_backlog = 0;
- sk->bound_dev_if=0;
+ sk->sk_zapped = 0;
+ sk->sk_family = PF_WANPIPE;
+ wp_sk(sk)->num = protocol;
+ sk->sk_state = WANSOCK_DISCONNECTED;
+ sk->sk_ack_backlog = 0;
+ sk->sk_bound_dev_if = 0;
atomic_inc(&wanpipe_socks_nr);
@@ -1544,7 +1551,7 @@ static int wanpipe_create(struct socket *sock, int protocol)
* can also change the list */
set_bit(1,&wanpipe_tx_critical);
write_lock(&wanpipe_sklist_lock);
- sk->next = wanpipe_sklist;
+ sk->sk_next = wanpipe_sklist;
wanpipe_sklist = sk;
sock_hold(sk);
write_unlock(&wanpipe_sklist_lock);
@@ -1586,7 +1593,7 @@ static int wanpipe_recvmsg(struct kiocb *iocb, struct socket *sock,
*/
if (flags & MSG_OOB){
- skb=skb_dequeue(&sk->error_queue);
+ skb = skb_dequeue(&sk->sk_error_queue);
}else{
skb=skb_recv_datagram(sk,flags,1,&err);
}
@@ -1653,7 +1660,7 @@ static void wanpipe_wakeup_driver(struct sock *sk)
struct net_device *dev = NULL;
wanpipe_common_t *chan=NULL;
- dev = dev_get_by_index(sk->bound_dev_if);
+ dev = dev_get_by_index(sk->sk_bound_dev_if);
if (!dev)
return;
@@ -1663,7 +1670,8 @@ static void wanpipe_wakeup_driver(struct sock *sk)
return;
if (atomic_read(&chan->receive_block)){
- if (atomic_read(&sk->rmem_alloc) < ((unsigned)sk->rcvbuf*0.9) ){
+ if (atomic_read(&sk->sk_rmem_alloc) <
+ ((unsigned)sk->sk_rcvbuf * 0.9)) {
printk(KERN_INFO "wansock: Queuing task for wanpipe\n");
atomic_set(&chan->receive_block,0);
wanpipe_queue_tq(&chan->wanpipe_task);
@@ -1689,9 +1697,9 @@ static int wanpipe_getname(struct socket *sock, struct sockaddr *uaddr,
struct wan_sockaddr_ll *sll = (struct wan_sockaddr_ll*)uaddr;
sll->sll_family = AF_WANPIPE;
- sll->sll_ifindex = sk->bound_dev_if;
+ sll->sll_ifindex = sk->sk_bound_dev_if;
sll->sll_protocol = wp_sk(sk)->num;
- dev = dev_get_by_index(sk->bound_dev_if);
+ dev = dev_get_by_index(sk->sk_bound_dev_if);
if (dev) {
sll->sll_hatype = dev->type;
sll->sll_halen = dev->addr_len;
@@ -1725,7 +1733,7 @@ static int wanpipe_notifier(struct notifier_block *this, unsigned long msg, void
struct net_device *dev = (struct net_device *)data;
struct wanpipe_opt *po;
- for (sk = wanpipe_sklist; sk; sk = sk->next) {
+ for (sk = wanpipe_sklist; sk; sk = sk->sk_next) {
if ((po = wp_sk(sk)) == NULL)
continue;
@@ -1735,25 +1743,25 @@ static int wanpipe_notifier(struct notifier_block *this, unsigned long msg, void
switch (msg) {
case NETDEV_DOWN:
case NETDEV_UNREGISTER:
- if (dev->ifindex == sk->bound_dev_if) {
+ if (dev->ifindex == sk->sk_bound_dev_if) {
printk(KERN_INFO "wansock: Device down %s\n",dev->name);
- if (sk->zapped){
+ if (sk->sk_zapped) {
wanpipe_unlink_driver(sk);
- sk->err = ENETDOWN;
- sk->error_report(sk);
+ sk->sk_err = ENETDOWN;
+ sk->sk_error_report(sk);
}
if (msg == NETDEV_UNREGISTER) {
printk(KERN_INFO "wansock: Unregistering Device: %s\n",
dev->name);
wanpipe_unlink_driver(sk);
- sk->bound_dev_if = 0;
+ sk->sk_bound_dev_if = 0;
}
}
break;
case NETDEV_UP:
- if (dev->ifindex == sk->bound_dev_if &&
- po->num && !sk->zapped) {
+ if (dev->ifindex == sk->sk_bound_dev_if &&
+ po->num && !sk->sk_zapped) {
printk(KERN_INFO "wansock: Registering Device: %s\n",
dev->name);
wanpipe_link_driver(dev,sk);
@@ -1781,20 +1789,21 @@ static int wanpipe_ioctl(struct socket *sock, unsigned int cmd, unsigned long ar
switch(cmd)
{
case SIOCGSTAMP:
- if(sk->stamp.tv_sec==0)
+ if (!sk->sk_stamp.tv_sec)
return -ENOENT;
err = -EFAULT;
- if (!copy_to_user((void *)arg, &sk->stamp, sizeof(struct timeval)))
+ if (!copy_to_user((void *)arg, &sk->sk_stamp,
+ sizeof(struct timeval)))
err = 0;
return err;
case SIOC_WANPIPE_CHECK_TX:
- return atomic_read(&sk->wmem_alloc);
+ return atomic_read(&sk->sk_wmem_alloc);
case SIOC_WANPIPE_SOCK_STATE:
- if (sk->state == WANSOCK_CONNECTED)
+ if (sk->sk_state == WANSOCK_CONNECTED)
return 0;
return 1;
@@ -1825,7 +1834,7 @@ static int wanpipe_ioctl(struct socket *sock, unsigned int cmd, unsigned long ar
case SIOC_WANPIPE_SET_NONBLOCK:
- if (sk->state != WANSOCK_DISCONNECTED)
+ if (sk->sk_state != WANSOCK_DISCONNECTED)
return -EINVAL;
sock->file->f_flags |= O_NONBLOCK;
@@ -1876,7 +1885,7 @@ static int wanpipe_debug (struct sock *origsk, void *arg)
int cnt=0, err=0;
wan_debug_t *dbg_data = (wan_debug_t *)arg;
- for (sk = wanpipe_sklist; sk; sk = sk->next){
+ for (sk = wanpipe_sklist; sk; sk = sk->sk_next) {
wanpipe_opt *wp = wp_sk(sk);
if (sk == origsk){
@@ -1885,25 +1894,31 @@ static int wanpipe_debug (struct sock *origsk, void *arg)
if ((err=put_user(1, &dbg_data->debug[cnt].free)))
return err;
- if ((err=put_user(sk->state, &dbg_data->debug[cnt].sk_state)))
+ if ((err = put_user(sk->sk_state,
+ &dbg_data->debug[cnt].state_sk)))
return err;
- if ((err=put_user(sk->rcvbuf, &dbg_data->debug[cnt].rcvbuf)))
+ if ((err = put_user(sk->sk_rcvbuf,
+ &dbg_data->debug[cnt].rcvbuf)))
return err;
- if ((err=put_user(atomic_read(&sk->rmem_alloc), &dbg_data->debug[cnt].rmem)))
+ if ((err = put_user(atomic_read(&sk->sk_rmem_alloc),
+ &dbg_data->debug[cnt].rmem)))
return err;
- if ((err=put_user(atomic_read(&sk->wmem_alloc), &dbg_data->debug[cnt].wmem)))
+ if ((err = put_user(atomic_read(&sk->sk_wmem_alloc),
+ &dbg_data->debug[cnt].wmem)))
return err;
- if ((err=put_user(sk->sndbuf, &dbg_data->debug[cnt].sndbuf)))
+ if ((err = put_user(sk->sk_sndbuf,
+ &dbg_data->debug[cnt].sndbuf)))
return err;
if ((err=put_user(sk_count, &dbg_data->debug[cnt].sk_count)))
return err;
if ((err=put_user(wp->poll_cnt, &dbg_data->debug[cnt].poll_cnt)))
return err;
- if ((err=put_user(sk->bound_dev_if, &dbg_data->debug[cnt].bound)))
+ if ((err = put_user(sk->sk_bound_dev_if,
+ &dbg_data->debug[cnt].bound)))
return err;
- if (sk->bound_dev_if){
- dev = dev_get_by_index(sk->bound_dev_if);
+ if (sk->sk_bound_dev_if) {
+ dev = dev_get_by_index(sk->sk_bound_dev_if);
if (!dev)
continue;
@@ -2014,7 +2029,7 @@ static int set_ioctl_cmd (struct sock *sk, void *arg)
if (!wp_sk(sk)->mbox) {
void *mbox_ptr;
- struct net_device *dev = dev_get_by_index(sk->bound_dev_if);
+ struct net_device *dev = dev_get_by_index(sk->sk_bound_dev_if);
if (!dev)
return -ENODEV;
@@ -2078,28 +2093,28 @@ unsigned int wanpipe_poll(struct file * file, struct socket *sock, poll_table *w
++wp_sk(sk)->poll_cnt;
- poll_wait(file, sk->sleep, wait);
+ poll_wait(file, sk->sk_sleep, wait);
mask = 0;
/* exceptional events? */
- if (sk->err || !skb_queue_empty(&sk->error_queue)){
+ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) {
mask |= POLLPRI;
return mask;
}
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLHUP;
/* readable? */
- if (!skb_queue_empty(&sk->receive_queue)){
+ if (!skb_queue_empty(&sk->sk_receive_queue)) {
mask |= POLLIN | POLLRDNORM;
}
/* connection hasn't started yet */
- if (sk->state == WANSOCK_CONNECTING){
+ if (sk->sk_state == WANSOCK_CONNECTING) {
return mask;
}
- if (sk->state == WANSOCK_DISCONNECTED){
+ if (sk->sk_state == WANSOCK_DISCONNECTED) {
mask = POLLPRI;
return mask;
}
@@ -2120,7 +2135,7 @@ unsigned int wanpipe_poll(struct file * file, struct socket *sock, poll_table *w
if (sock_writeable(sk)){
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
}else{
- set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
}
return mask;
@@ -2142,10 +2157,10 @@ static int wanpipe_listen(struct socket *sock, int backlog)
if (wp_sk(sk)->num != htons(X25_PROT))
return -EINVAL;
- if (sk->state == WANSOCK_BIND_LISTEN) {
+ if (sk->sk_state == WANSOCK_BIND_LISTEN) {
- sk->max_ack_backlog = backlog;
- sk->state = WANSOCK_LISTEN;
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_state = WANSOCK_LISTEN;
return 0;
}else{
printk(KERN_INFO "wansock: Listening sock was not binded\n");
@@ -2174,7 +2189,7 @@ static int wanpipe_link_card (struct sock *sk)
card->sk=sk;
card->func=wanpipe_listen_rcv;
- sk->zapped=1;
+ sk->sk_zapped = 1;
return 0;
}
@@ -2225,7 +2240,7 @@ static int wanpipe_exec_cmd(struct sock *sk, int cmd, unsigned int flags)
case SIOC_WANPIPE_ACCEPT_CALL:
- if (sk->state != WANSOCK_CONNECTING){
+ if (sk->sk_state != WANSOCK_CONNECTING) {
err = -EHOSTDOWN;
break;
}
@@ -2238,7 +2253,7 @@ static int wanpipe_exec_cmd(struct sock *sk, int cmd, unsigned int flags)
* Do not set the sock lcn number here, since
* it is done in wanpipe_listen_rcv().
*/
- if (sk->state == WANSOCK_CONNECTED){
+ if (sk->sk_state == WANSOCK_CONNECTED) {
wp->lcn = ((mbox_cmd_t*)wp->mbox)->cmd.lcn;
DBG_PRINTK(KERN_INFO "\nwansock: Accept OK %i\n",
wp->lcn);
@@ -2254,7 +2269,7 @@ static int wanpipe_exec_cmd(struct sock *sk, int cmd, unsigned int flags)
case SIOC_WANPIPE_CLEAR_CALL:
- if (sk->state == WANSOCK_DISCONNECTED){
+ if (sk->sk_state == WANSOCK_DISCONNECTED) {
err = -EINVAL;
break;
}
@@ -2264,7 +2279,8 @@ static int wanpipe_exec_cmd(struct sock *sk, int cmd, unsigned int flags)
* if so, check whether user wants to wait until data
* is transmitted, or clear a call and drop packets */
- if (atomic_read(&sk->wmem_alloc) || check_driver_busy(sk)){
+ if (atomic_read(&sk->sk_wmem_alloc) ||
+ check_driver_busy(sk)) {
mbox_cmd_t *mbox = wp->mbox;
if (mbox->cmd.qdm & 0x80){
mbox->cmd.result = 0x35;
@@ -2273,14 +2289,14 @@ static int wanpipe_exec_cmd(struct sock *sk, int cmd, unsigned int flags)
}
}
- sk->state = WANSOCK_DISCONNECTING;
+ sk->sk_state = WANSOCK_DISCONNECTING;
err = execute_command(sk,X25_CLEAR_CALL,0);
if (err < 0)
break;
err = -ECONNREFUSED;
- if (sk->state == WANSOCK_DISCONNECTED){
+ if (sk->sk_state == WANSOCK_DISCONNECTED) {
DBG_PRINTK(KERN_INFO "\nwansock: CLEAR OK %i\n",
wp->lcn);
wp->lcn = 0;
@@ -2290,7 +2306,7 @@ static int wanpipe_exec_cmd(struct sock *sk, int cmd, unsigned int flags)
case SIOC_WANPIPE_RESET_CALL:
- if (sk->state != WANSOCK_CONNECTED){
+ if (sk->sk_state != WANSOCK_CONNECTED) {
err = -EINVAL;
break;
}
@@ -2300,7 +2316,8 @@ static int wanpipe_exec_cmd(struct sock *sk, int cmd, unsigned int flags)
* if so, check whether user wants to wait until data
* is transmitted, or reset a call and drop packets */
- if (atomic_read(&sk->wmem_alloc) || check_driver_busy(sk)){
+ if (atomic_read(&sk->sk_wmem_alloc) ||
+ check_driver_busy(sk)) {
mbox_cmd_t *mbox = wp->mbox;
if (mbox->cmd.qdm & 0x80){
mbox->cmd.result = 0x35;
@@ -2324,7 +2341,7 @@ static int wanpipe_exec_cmd(struct sock *sk, int cmd, unsigned int flags)
if (err < 0)
break;
- if (sk->state == WANSOCK_CONNECTED){
+ if (sk->sk_state == WANSOCK_CONNECTED) {
wp->lcn = ((mbox_cmd_t*)wp->mbox)->cmd.lcn;
@@ -2332,7 +2349,8 @@ static int wanpipe_exec_cmd(struct sock *sk, int cmd, unsigned int flags)
wp->lcn);
err = 0;
- }else if (sk->state == WANSOCK_CONNECTING && (flags & O_NONBLOCK)){
+ } else if (sk->sk_state == WANSOCK_CONNECTING &&
+ (flags & O_NONBLOCK)) {
wp->lcn = ((mbox_cmd_t*)wp->mbox)->cmd.lcn;
DBG_PRINTK(KERN_INFO "\nwansock: Place Call OK: Waiting %i\n",
wp->lcn);
@@ -2355,7 +2373,7 @@ static int wanpipe_exec_cmd(struct sock *sk, int cmd, unsigned int flags)
static int check_driver_busy (struct sock *sk)
{
- struct net_device *dev = dev_get_by_index(sk->bound_dev_if);
+ struct net_device *dev = dev_get_by_index(sk->sk_bound_dev_if);
wanpipe_common_t *chan;
if (!dev)
@@ -2394,19 +2412,19 @@ static int wanpipe_accept(struct socket *sock, struct socket *newsock, int flags
if ((sk = sock->sk) == NULL)
return -EINVAL;
- if (sk->type != SOCK_RAW)
+ if (sk->sk_type != SOCK_RAW)
return -EOPNOTSUPP;
- if (sk->state != WANSOCK_LISTEN)
+ if (sk->sk_state != WANSOCK_LISTEN)
return -EINVAL;
if (wp_sk(sk)->num != htons(X25_PROT))
return -EINVAL;
- add_wait_queue(sk->sleep,&wait);
+ add_wait_queue(sk->sk_sleep,&wait);
current->state = TASK_INTERRUPTIBLE;
for (;;){
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
if (skb){
err=0;
break;
@@ -2418,7 +2436,7 @@ static int wanpipe_accept(struct socket *sock, struct socket *newsock, int flags
schedule();
}
current->state = TASK_RUNNING;
- remove_wait_queue(sk->sleep,&wait);
+ remove_wait_queue(sk->sk_sleep,&wait);
if (err != 0)
return err;
@@ -2430,18 +2448,18 @@ static int wanpipe_accept(struct socket *sock, struct socket *newsock, int flags
set_bit(1,&wanpipe_tx_critical);
write_lock(&wanpipe_sklist_lock);
- newsk->next = wanpipe_sklist;
+ newsk->sk_next = wanpipe_sklist;
wanpipe_sklist = newsk;
sock_hold(sk);
write_unlock(&wanpipe_sklist_lock);
clear_bit(1,&wanpipe_tx_critical);
- newsk->pair = NULL;
- newsk->socket = newsock;
- newsk->sleep = &newsock->wait;
+ newsk->sk_pair = NULL;
+ newsk->sk_socket = newsock;
+ newsk->sk_sleep = &newsock->wait;
/* Now attach up the new socket */
- sk->ack_backlog--;
+ sk->sk_ack_backlog--;
newsock->sk = newsk;
kfree_skb(skb);
@@ -2496,16 +2514,16 @@ static int wanpipe_connect(struct socket *sock, struct sockaddr *uaddr, int addr
if (wp_sk(sk)->num != htons(X25_PROT))
return -EINVAL;
- if (sk->state == WANSOCK_CONNECTED)
+ if (sk->sk_state == WANSOCK_CONNECTED)
return -EISCONN; /* No reconnect on a seqpacket socket */
- if (sk->state != WAN_DISCONNECTED){
+ if (sk->sk_state != WAN_DISCONNECTED) {
printk(KERN_INFO "wansock: Trying to connect on channel NON DISCONNECT\n");
return -ECONNREFUSED;
}
- sk->state = WANSOCK_DISCONNECTED;
- sock->state = SS_UNCONNECTED;
+ sk->sk_state = WANSOCK_DISCONNECTED;
+ sock->state = SS_UNCONNECTED;
if (addr_len != sizeof(struct wan_sockaddr_ll))
return -EINVAL;
@@ -2513,16 +2531,16 @@ static int wanpipe_connect(struct socket *sock, struct sockaddr *uaddr, int addr
if (addr->sll_family != AF_WANPIPE)
return -EINVAL;
- if ((dev = dev_get_by_index(sk->bound_dev_if)) == NULL)
+ if ((dev = dev_get_by_index(sk->sk_bound_dev_if)) == NULL)
return -ENETUNREACH;
dev_put(dev);
- if (!sk->zapped) /* Must bind first - autobinding does not work */
+ if (!sk->sk_zapped) /* Must bind first - autobinding does not work */
return -EINVAL;
sock->state = SS_CONNECTING;
- sk->state = WANSOCK_CONNECTING;
+ sk->sk_state = WANSOCK_CONNECTING;
if (!wp_sk(sk)->mbox) {
if (wp_sk (sk)->svc)
@@ -2536,15 +2554,15 @@ static int wanpipe_connect(struct socket *sock, struct sockaddr *uaddr, int addr
if ((err=wanpipe_exec_cmd(sk, X25_PLACE_CALL,flags)) != 0){
sock->state = SS_UNCONNECTED;
- sk->state = WANSOCK_CONNECTED;
+ sk->sk_state = WANSOCK_CONNECTED;
return err;
}
- if (sk->state != WANSOCK_CONNECTED && (flags & O_NONBLOCK)){
+ if (sk->sk_state != WANSOCK_CONNECTED && (flags & O_NONBLOCK)) {
return 0;
}
- if (sk->state != WANSOCK_CONNECTED) {
+ if (sk->sk_state != WANSOCK_CONNECTED) {
sock->state = SS_UNCONNECTED;
return -ECONNREFUSED;
}
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 4aeec135da9c..5b4ff29fec7c 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -158,15 +158,15 @@ static void x25_remove_socket(struct sock *sk)
write_lock_bh(&x25_list_lock);
if ((s = x25_list) == sk)
- x25_list = s->next;
- else while (s && s->next) {
- if (s->next == sk) {
- s->next = sk->next;
+ x25_list = s->sk_next;
+ else while (s && s->sk_next) {
+ if (s->sk_next == sk) {
+ s->sk_next = sk->sk_next;
sock_put(sk);
break;
}
- s = s->next;
+ s = s->sk_next;
}
write_unlock_bh(&x25_list_lock);
@@ -181,7 +181,7 @@ static void x25_kill_by_device(struct net_device *dev)
write_lock_bh(&x25_list_lock);
- for (s = x25_list; s; s = s->next)
+ for (s = x25_list; s; s = s->sk_next)
if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev)
x25_disconnect(s, ENETUNREACH, 0, 0);
@@ -230,7 +230,7 @@ static int x25_device_event(struct notifier_block *this, unsigned long event,
static void x25_insert_socket(struct sock *sk)
{
write_lock_bh(&x25_list_lock);
- sk->next = x25_list;
+ sk->sk_next = x25_list;
x25_list = sk;
sock_hold(sk);
write_unlock_bh(&x25_list_lock);
@@ -246,12 +246,12 @@ static struct sock *x25_find_listener(struct x25_address *addr)
read_lock_bh(&x25_list_lock);
- for (s = x25_list; s; s = s->next)
+ for (s = x25_list; s; s = s->sk_next)
if ((!strcmp(addr->x25_addr,
x25_sk(s)->source_addr.x25_addr) ||
!strcmp(addr->x25_addr,
null_x25_address.x25_addr)) &&
- s->state == TCP_LISTEN)
+ s->sk_state == TCP_LISTEN)
break;
if (s)
@@ -267,7 +267,7 @@ struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb)
{
struct sock *s;
- for (s = x25_list; s; s = s->next)
+ for (s = x25_list; s; s = s->sk_next)
if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb)
break;
if (s)
@@ -339,7 +339,7 @@ void x25_destroy_socket(struct sock *sk)
x25_remove_socket(sk);
x25_clear_queues(sk); /* Flush the queues */
- while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (skb->sk != sk) { /* A pending connection */
/*
* Queue the unaccepted socket for death
@@ -352,13 +352,14 @@ void x25_destroy_socket(struct sock *sk)
kfree_skb(skb);
}
- if (atomic_read(&sk->wmem_alloc) || atomic_read(&sk->rmem_alloc)) {
+ if (atomic_read(&sk->sk_wmem_alloc) ||
+ atomic_read(&sk->sk_rmem_alloc)) {
/* Defer: outstanding buffers */
- init_timer(&sk->timer);
- sk->timer.expires = jiffies + 10 * HZ;
- sk->timer.function = x25_destroy_timer;
- sk->timer.data = (unsigned long)sk;
- add_timer(&sk->timer);
+ init_timer(&sk->sk_timer);
+ sk->sk_timer.expires = jiffies + 10 * HZ;
+ sk->sk_timer.function = x25_destroy_timer;
+ sk->sk_timer.data = (unsigned long)sk;
+ add_timer(&sk->sk_timer);
} else
sk_free(sk);
release_sock(sk);
@@ -428,10 +429,10 @@ static int x25_listen(struct socket *sock, int backlog)
struct sock *sk = sock->sk;
int rc = -EOPNOTSUPP;
- if (sk->state != TCP_LISTEN) {
+ if (sk->sk_state != TCP_LISTEN) {
memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN);
- sk->max_ack_backlog = backlog;
- sk->state = TCP_LISTEN;
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_state = TCP_LISTEN;
rc = 0;
}
@@ -488,8 +489,8 @@ static int x25_create(struct socket *sock, int protocol)
init_timer(&x25->timer);
sock->ops = &x25_proto_ops;
- sk->protocol = protocol;
- sk->backlog_rcv = x25_backlog_rcv;
+ sk->sk_protocol = protocol;
+ sk->sk_backlog_rcv = x25_backlog_rcv;
x25->t21 = sysctl_x25_call_request_timeout;
x25->t22 = sysctl_x25_reset_request_timeout;
@@ -513,7 +514,7 @@ static struct sock *x25_make_new(struct sock *osk)
struct sock *sk = NULL;
struct x25_opt *x25, *ox25;
- if (osk->type != SOCK_SEQPACKET)
+ if (osk->sk_type != SOCK_SEQPACKET)
goto out;
if ((sk = x25_alloc_socket()) == NULL)
@@ -521,17 +522,17 @@ static struct sock *x25_make_new(struct sock *osk)
x25 = x25_sk(sk);
- sk->type = osk->type;
- sk->socket = osk->socket;
- sk->priority = osk->priority;
- sk->protocol = osk->protocol;
- sk->rcvbuf = osk->rcvbuf;
- sk->sndbuf = osk->sndbuf;
- sk->debug = osk->debug;
- sk->state = TCP_ESTABLISHED;
- sk->sleep = osk->sleep;
- sk->zapped = osk->zapped;
- sk->backlog_rcv = osk->backlog_rcv;
+ sk->sk_type = osk->sk_type;
+ sk->sk_socket = osk->sk_socket;
+ sk->sk_priority = osk->sk_priority;
+ sk->sk_protocol = osk->sk_protocol;
+ sk->sk_rcvbuf = osk->sk_rcvbuf;
+ sk->sk_sndbuf = osk->sk_sndbuf;
+ sk->sk_debug = osk->sk_debug;
+ sk->sk_state = TCP_ESTABLISHED;
+ sk->sk_sleep = osk->sk_sleep;
+ sk->sk_zapped = osk->sk_zapped;
+ sk->sk_backlog_rcv = osk->sk_backlog_rcv;
ox25 = x25_sk(osk);
x25->t21 = ox25->t21;
@@ -571,16 +572,16 @@ static int x25_release(struct socket *sock)
x25_write_internal(sk, X25_CLEAR_REQUEST);
x25_start_t23timer(sk);
x25->state = X25_STATE_2;
- sk->state = TCP_CLOSE;
- sk->shutdown |= SEND_SHUTDOWN;
- sk->state_change(sk);
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
sock_set_flag(sk, SOCK_DESTROY);
break;
}
- sock->sk = NULL;
- sk->socket = NULL; /* Not used, but we should do this */
+ sock->sk = NULL;
+ sk->sk_socket = NULL; /* Not used, but we should do this */
out:
return 0;
}
@@ -590,14 +591,14 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct sock *sk = sock->sk;
struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
- if (!sk->zapped ||
+ if (!sk->sk_zapped ||
addr_len != sizeof(struct sockaddr_x25) ||
addr->sx25_family != AF_X25)
return -EINVAL;
x25_sk(sk)->source_addr = addr->sx25_addr;
x25_insert_socket(sk);
- sk->zapped = 0;
+ sk->sk_zapped = 0;
SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
return 0;
@@ -608,7 +609,7 @@ static int x25_wait_for_connection_establishment(struct sock *sk)
DECLARE_WAITQUEUE(wait, current);
int rc;
- add_wait_queue_exclusive(sk->sleep, &wait);
+ add_wait_queue_exclusive(sk->sk_sleep, &wait);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
rc = -ERESTARTSYS;
@@ -616,11 +617,11 @@ static int x25_wait_for_connection_establishment(struct sock *sk)
break;
rc = sock_error(sk);
if (rc) {
- sk->socket->state = SS_UNCONNECTED;
+ sk->sk_socket->state = SS_UNCONNECTED;
break;
}
rc = 0;
- if (sk->state != TCP_ESTABLISHED) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
release_sock(sk);
schedule();
lock_sock(sk);
@@ -628,7 +629,7 @@ static int x25_wait_for_connection_establishment(struct sock *sk)
break;
}
__set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
return rc;
}
@@ -642,22 +643,22 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
int rc = 0;
lock_sock(sk);
- if (sk->state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
+ if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sock->state = SS_CONNECTED;
goto out; /* Connect completed during a ERESTARTSYS event */
}
rc = -ECONNREFUSED;
- if (sk->state == TCP_CLOSE && sock->state == SS_CONNECTING) {
+ if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
sock->state = SS_UNCONNECTED;
goto out;
}
rc = -EISCONN; /* No reconnect on a seqpacket socket */
- if (sk->state == TCP_ESTABLISHED)
+ if (sk->sk_state == TCP_ESTABLISHED)
goto out;
- sk->state = TCP_CLOSE;
+ sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
rc = -EINVAL;
@@ -681,7 +682,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
goto out_put_neigh;
rc = -EINVAL;
- if (sk->zapped) /* Must bind first - autobinding does not work */
+ if (sk->sk_zapped) /* Must bind first - autobinding does not work */
goto out_put_neigh;
if (!strcmp(x25->source_addr.x25_addr, null_x25_address.x25_addr))
@@ -691,7 +692,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
/* Move to connecting socket, start sending Connect Requests */
sock->state = SS_CONNECTING;
- sk->state = TCP_SYN_SENT;
+ sk->sk_state = TCP_SYN_SENT;
x25->state = X25_STATE_1;
@@ -702,7 +703,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
/* Now the loop */
rc = -EINPROGRESS;
- if (sk->state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
+ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
goto out_put_neigh;
rc = x25_wait_for_connection_establishment(sk);
@@ -726,10 +727,10 @@ static int x25_wait_for_data(struct sock *sk, int timeout)
DECLARE_WAITQUEUE(wait, current);
int rc = 0;
- add_wait_queue_exclusive(sk->sleep, &wait);
+ add_wait_queue_exclusive(sk->sk_sleep, &wait);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
- if (sk->shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
rc = -ERESTARTSYS;
if (signal_pending(current))
@@ -738,7 +739,7 @@ static int x25_wait_for_data(struct sock *sk, int timeout)
if (!timeout)
break;
rc = 0;
- if (skb_queue_empty(&sk->receive_queue)) {
+ if (skb_queue_empty(&sk->sk_receive_queue)) {
release_sock(sk);
timeout = schedule_timeout(timeout);
lock_sock(sk);
@@ -746,7 +747,7 @@ static int x25_wait_for_data(struct sock *sk, int timeout)
break;
}
__set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sleep, &wait);
+ remove_wait_queue(sk->sk_sleep, &wait);
return rc;
}
@@ -757,29 +758,29 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
struct sk_buff *skb;
int rc = -EINVAL;
- if (!sk || sk->state != TCP_LISTEN)
+ if (!sk || sk->sk_state != TCP_LISTEN)
goto out;
rc = -EOPNOTSUPP;
- if (sk->type != SOCK_SEQPACKET)
+ if (sk->sk_type != SOCK_SEQPACKET)
goto out;
- rc = x25_wait_for_data(sk, sk->rcvtimeo);
+ rc = x25_wait_for_data(sk, sk->sk_rcvtimeo);
if (rc)
goto out;
- skb = skb_dequeue(&sk->receive_queue);
+ skb = skb_dequeue(&sk->sk_receive_queue);
rc = -EINVAL;
if (!skb->sk)
goto out;
- newsk = skb->sk;
- newsk->pair = NULL;
- newsk->socket = newsock;
- newsk->sleep = &newsock->wait;
+ newsk = skb->sk;
+ newsk->sk_pair = NULL;
+ newsk->sk_socket = newsock;
+ newsk->sk_sleep = &newsock->wait;
/* Now attach up the new socket */
skb->sk = NULL;
kfree_skb(skb);
- sk->ack_backlog--;
+ sk->sk_ack_backlog--;
newsock->sk = newsk;
newsock->state = SS_CONNECTED;
rc = 0;
@@ -795,7 +796,7 @@ static int x25_getname(struct socket *sock, struct sockaddr *uaddr,
struct x25_opt *x25 = x25_sk(sk);
if (peer) {
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
sx25->sx25_addr = x25->dest_addr;
} else
@@ -836,7 +837,7 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
/*
* We can't accept the Call Request.
*/
- if (!sk || sk->ack_backlog == sk->max_ack_backlog)
+ if (!sk || sk->sk_ack_backlog == sk->sk_max_ack_backlog)
goto out_clear_request;
/*
@@ -865,7 +866,7 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
skb_pull(skb, len);
skb->sk = make;
- make->state = TCP_ESTABLISHED;
+ make->sk_state = TCP_ESTABLISHED;
makex25 = x25_sk(make);
makex25->lci = lci;
@@ -887,17 +888,17 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
makex25->state = X25_STATE_3;
- sk->ack_backlog++;
- make->pair = sk;
+ sk->sk_ack_backlog++;
+ make->sk_pair = sk;
x25_insert_socket(make);
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue, skb);
x25_start_heartbeat(make);
if (!sock_flag(sk, SOCK_DEAD))
- sk->data_ready(sk, skb->len);
+ sk->sk_data_ready(sk, skb->len);
rc = 1;
sock_put(sk);
out:
@@ -930,11 +931,11 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
goto out;
rc = -EADDRNOTAVAIL;
- if (sk->zapped)
+ if (sk->sk_zapped)
goto out;
rc = -EPIPE;
- if (sk->shutdown & SEND_SHUTDOWN) {
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
goto out;
}
@@ -961,7 +962,7 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
* to SIGPIPE, EPIPE;
*/
rc = -ENOTCONN;
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
goto out;
sx25.sx25_family = AF_X25;
@@ -1046,7 +1047,7 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
SOCK_DEBUG(sk, "x25_sendmsg: Transmitting buffer\n");
rc = -ENOTCONN;
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
goto out_kfree_skb;
if (msg->msg_flags & MSG_OOB)
@@ -1101,7 +1102,7 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
*/
- if (sk->state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
goto out;
if (flags & MSG_OOB) {
@@ -1183,8 +1184,8 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
switch (cmd) {
case TIOCOUTQ: {
- int amount;
- amount = sk->sndbuf - atomic_read(&sk->wmem_alloc);
+ int amount = sk->sk_sndbuf -
+ atomic_read(&sk->sk_wmem_alloc);
if (amount < 0)
amount = 0;
rc = put_user(amount, (unsigned int *)arg);
@@ -1198,7 +1199,7 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
* These two are safe on a single CPU system as
* only user tasks fiddle here
*/
- if ((skb = skb_peek(&sk->receive_queue)) != NULL)
+ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
rc = put_user(amount, (unsigned int *)arg);
break;
@@ -1207,9 +1208,9 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
case SIOCGSTAMP:
if (sk) {
rc = -ENOENT;
- if (!sk->stamp.tv_sec)
+ if (!sk->sk_stamp.tv_sec)
break;
- rc = copy_to_user((void *)arg, &sk->stamp,
+ rc = copy_to_user((void *)arg, &sk->sk_stamp,
sizeof(struct timeval)) ? -EFAULT : 0;
}
rc = -EINVAL;
@@ -1256,7 +1257,8 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
sizeof(facilities)))
break;
rc = -EINVAL;
- if (sk->state != TCP_LISTEN && sk->state != TCP_CLOSE)
+ if (sk->sk_state != TCP_LISTEN &&
+ sk->sk_state != TCP_CLOSE)
break;
if (facilities.pacsize_in < X25_PS16 ||
facilities.pacsize_in > X25_PS4096)
@@ -1360,7 +1362,7 @@ void x25_kill_by_neigh(struct x25_neigh *nb)
write_lock_bh(&x25_list_lock);
- for (s = x25_list; s; s = s->next)
+ for (s = x25_list; s; s = s->sk_next)
if (x25_sk(s)->neighbour == nb)
x25_disconnect(s, ENETUNREACH, 0, 0);
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index ac921f93699f..802bc1ae252b 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -85,9 +85,9 @@ static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
}
skb_set_owner_r(skbn, sk);
- skb_queue_tail(&sk->receive_queue, skbn);
+ skb_queue_tail(&sk->sk_receive_queue, skbn);
if (!sock_flag(sk, SOCK_DEAD))
- sk->data_ready(sk, skbn->len);
+ sk->sk_data_ready(sk, skbn->len);
return 0;
}
@@ -112,7 +112,7 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
x25->vr = 0;
x25->vl = 0;
x25->state = X25_STATE_3;
- sk->state = TCP_ESTABLISHED;
+ sk->sk_state = TCP_ESTABLISHED;
/*
* Parse the data in the frame.
*/
@@ -130,7 +130,7 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
x25->calluserdata.cudlength = skb->len;
}
if (!sock_flag(sk, SOCK_DEAD))
- sk->state_change(sk);
+ sk->sk_state_change(sk);
break;
}
case X25_CLEAR_REQUEST:
@@ -255,7 +255,8 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
x25->state = X25_STATE_4;
break;
}
- if (atomic_read(&sk->rmem_alloc) > (sk->rcvbuf / 2))
+ if (atomic_read(&sk->sk_rmem_alloc) >
+ (sk->sk_rcvbuf / 2))
x25->condition |= X25_COND_OWN_RX_BUSY;
}
/*
diff --git a/net/x25/x25_out.c b/net/x25/x25_out.c
index 97048413829b..ed6b6d84b3d0 100644
--- a/net/x25/x25_out.c
+++ b/net/x25/x25_out.c
@@ -111,13 +111,13 @@ int x25_output(struct sock *sk, struct sk_buff *skb)
skbn->data[2] |= X25_STD_M_BIT;
}
- skb_queue_tail(&sk->write_queue, skbn);
+ skb_queue_tail(&sk->sk_write_queue, skbn);
sent += len;
}
kfree_skb(skb);
} else {
- skb_queue_tail(&sk->write_queue, skb);
+ skb_queue_tail(&sk->sk_write_queue, skb);
sent = skb->len - header_len;
}
return sent;
@@ -169,7 +169,7 @@ void x25_kick(struct sock *sk)
if (x25->condition & X25_COND_PEER_RX_BUSY)
return;
- if (skb_peek(&sk->write_queue) == NULL)
+ if (!skb_peek(&sk->sk_write_queue))
return;
modulus = x25->neighbour->extended ? X25_EMODULUS : X25_SMODULUS;
@@ -187,11 +187,11 @@ void x25_kick(struct sock *sk)
* the window is full.
*/
- skb = skb_dequeue(&sk->write_queue);
+ skb = skb_dequeue(&sk->sk_write_queue);
do {
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
- skb_queue_head(&sk->write_queue, skb);
+ skb_queue_head(&sk->sk_write_queue, skb);
break;
}
@@ -209,7 +209,8 @@ void x25_kick(struct sock *sk)
*/
skb_queue_tail(&x25->ack_queue, skb);
- } while (x25->vs != end && (skb = skb_dequeue(&sk->write_queue)) != NULL);
+ } while (x25->vs != end &&
+ (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
x25->vl = x25->vr;
x25->condition &= ~X25_COND_ACK_PENDING;
diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c
index 686506f39e7f..a416ae53b10b 100644
--- a/net/x25/x25_proc.c
+++ b/net/x25/x25_proc.c
@@ -94,7 +94,7 @@ static __inline__ struct sock *x25_get_socket_idx(loff_t pos)
{
struct sock *s;
- for (s = x25_list; pos && s; s = s->next)
+ for (s = x25_list; pos && s; s = s->sk_next)
--pos;
return s;
@@ -120,7 +120,7 @@ static void *x25_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
goto out;
}
s = v;
- s = s->next;
+ s = s->sk_next;
out:
return s;
}
@@ -158,8 +158,9 @@ static int x25_seq_socket_show(struct seq_file *seq, void *v)
devname, x25->lci & 0x0FFF, x25->state, x25->vs, x25->vr,
x25->va, x25_display_timer(s) / HZ, x25->t2 / HZ,
x25->t21 / HZ, x25->t22 / HZ, x25->t23 / HZ,
- atomic_read(&s->wmem_alloc), atomic_read(&s->rmem_alloc),
- s->socket ? SOCK_INODE(s->socket)->i_ino : 0L);
+ atomic_read(&s->sk_wmem_alloc),
+ atomic_read(&s->sk_rmem_alloc),
+ s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
out:
return 0;
}
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
index 99728136e2ba..c40d4c5d46a1 100644
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -49,7 +49,7 @@ void x25_clear_queues(struct sock *sk)
{
struct x25_opt *x25 = x25_sk(sk);
- skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->sk_write_queue);
skb_queue_purge(&x25->ack_queue);
skb_queue_purge(&x25->interrupt_in_queue);
skb_queue_purge(&x25->interrupt_out_queue);
@@ -90,7 +90,7 @@ void x25_requeue_frames(struct sock *sk)
*/
while ((skb = skb_dequeue(&x25_sk(sk)->ack_queue)) != NULL) {
if (!skb_prev)
- skb_queue_head(&sk->write_queue, skb);
+ skb_queue_head(&sk->sk_write_queue, skb);
else
skb_append(skb_prev, skb);
skb_prev = skb;
@@ -340,12 +340,12 @@ void x25_disconnect(struct sock *sk, int reason, unsigned char cause,
x25->causediag.cause = cause;
x25->causediag.diagnostic = diagnostic;
- sk->state = TCP_CLOSE;
- sk->err = reason;
- sk->shutdown |= SEND_SHUTDOWN;
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_err = reason;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
- sk->state_change(sk);
+ sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
}
@@ -358,7 +358,7 @@ void x25_check_rbuf(struct sock *sk)
{
struct x25_opt *x25 = x25_sk(sk);
- if (atomic_read(&sk->rmem_alloc) < (sk->rcvbuf / 2) &&
+ if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) &&
(x25->condition & X25_COND_OWN_RX_BUSY)) {
x25->condition &= ~X25_COND_OWN_RX_BUSY;
x25->condition &= ~X25_COND_ACK_PENDING;
diff --git a/net/x25/x25_timer.c b/net/x25/x25_timer.c
index 815c29cba599..9792c7d1f98a 100644
--- a/net/x25/x25_timer.c
+++ b/net/x25/x25_timer.c
@@ -45,18 +45,18 @@ static void x25_timer_expiry(unsigned long);
void x25_start_heartbeat(struct sock *sk)
{
- del_timer(&sk->timer);
+ del_timer(&sk->sk_timer);
- sk->timer.data = (unsigned long)sk;
- sk->timer.function = &x25_heartbeat_expiry;
- sk->timer.expires = jiffies + 5 * HZ;
+ sk->sk_timer.data = (unsigned long)sk;
+ sk->sk_timer.function = &x25_heartbeat_expiry;
+ sk->sk_timer.expires = jiffies + 5 * HZ;
- add_timer(&sk->timer);
+ add_timer(&sk->sk_timer);
}
void x25_stop_heartbeat(struct sock *sk)
{
- del_timer(&sk->timer);
+ del_timer(&sk->sk_timer);
}
void x25_start_t2timer(struct sock *sk)
@@ -143,7 +143,7 @@ static void x25_heartbeat_expiry(unsigned long param)
* get removed.
*/
if (sock_flag(sk, SOCK_DESTROY) ||
- (sk->state == TCP_LISTEN &&
+ (sk->sk_state == TCP_LISTEN &&
sock_flag(sk, SOCK_DEAD))) {
x25_destroy_socket(sk);
goto unlock;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 8af65b7754fe..2ab46e487b0d 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -410,10 +410,9 @@ struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi
struct xfrm_policy *pol;
read_lock_bh(&xfrm_policy_lock);
- if ((pol = sk->policy[dir]) != NULL) {
- int match;
-
- match = xfrm_selector_match(&pol->selector, fl, sk->family);
+ if ((pol = sk->sk_policy[dir]) != NULL) {
+ int match = xfrm_selector_match(&pol->selector, fl,
+ sk->sk_family);
if (match)
xfrm_pol_hold(pol);
else
@@ -449,8 +448,8 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
struct xfrm_policy *old_pol;
write_lock_bh(&xfrm_policy_lock);
- old_pol = sk->policy[dir];
- sk->policy[dir] = pol;
+ old_pol = sk->sk_policy[dir];
+ sk->sk_policy[dir] = pol;
if (pol) {
pol->curlft.add_time = (unsigned long)xtime.tv_sec;
pol->index = xfrm_gen_index(XFRM_POLICY_MAX+dir);
@@ -490,14 +489,13 @@ static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
int __xfrm_sk_clone_policy(struct sock *sk)
{
- struct xfrm_policy *p0, *p1;
- p0 = sk->policy[0];
- p1 = sk->policy[1];
- sk->policy[0] = NULL;
- sk->policy[1] = NULL;
- if (p0 && (sk->policy[0] = clone_policy(p0, 0)) == NULL)
+ struct xfrm_policy *p0 = sk->sk_policy[0],
+ *p1 = sk->sk_policy[1];
+
+ sk->sk_policy[0] = sk->sk_policy[1] = NULL;
+ if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
return -ENOMEM;
- if (p1 && (sk->policy[1] = clone_policy(p1, 1)) == NULL)
+ if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
return -ENOMEM;
return 0;
}
@@ -643,7 +641,7 @@ int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
restart:
genid = atomic_read(&flow_cache_genid);
policy = NULL;
- if (sk && sk->policy[1])
+ if (sk && sk->sk_policy[1])
policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
if (!policy) {
@@ -831,7 +829,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
}
pol = NULL;
- if (sk && sk->policy[dir])
+ if (sk && sk->sk_policy[dir])
pol = xfrm_sk_policy_lookup(sk, dir, &fl);
if (!pol)
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 7633fcf15647..08a62ba505c3 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -701,7 +701,8 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 *optval, int optlen)
err = -EINVAL;
read_lock(&xfrm_km_lock);
list_for_each_entry(km, &xfrm_km_list, list) {
- pol = km->compile_policy(sk->family, optname, data, optlen, &err);
+ pol = km->compile_policy(sk->sk_family, optname, data,
+ optlen, &err);
if (err >= 0)
break;
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index f7609cfe3bec..b142693abc7c 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -946,10 +946,11 @@ static void xfrm_netlink_rcv(struct sock *sk, int len)
down(&xfrm_cfg_sem);
- while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) {
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (xfrm_user_rcv_skb(skb)) {
if (skb->len)
- skb_queue_head(&sk->receive_queue, skb);
+ skb_queue_head(&sk->sk_receive_queue,
+ skb);
else
kfree_skb(skb);
break;
@@ -959,7 +960,7 @@ static void xfrm_netlink_rcv(struct sock *sk, int len)
up(&xfrm_cfg_sem);
- } while (xfrm_nl && xfrm_nl->receive_queue.qlen);
+ } while (xfrm_nl && xfrm_nl->sk_receive_queue.qlen);
}
static int build_expire(struct sk_buff *skb, struct xfrm_state *x, int hard)
@@ -1126,7 +1127,7 @@ static int __init xfrm_user_init(void)
static void __exit xfrm_user_exit(void)
{
xfrm_unregister_km(&netlink_mgr);
- sock_release(xfrm_nl->socket);
+ sock_release(xfrm_nl->sk_socket);
}
module_init(xfrm_user_init);