diff options
| author | Arnaldo Carvalho de Melo <acme@conectiva.com.br> | 2003-06-04 16:00:32 -0300 |
|---|---|---|
| committer | Arnaldo Carvalho de Melo <acme@conectiva.com.br> | 2003-06-04 16:00:32 -0300 |
| commit | 63413da5d31d99ba8d89cf9d9ff997f93b49443d (patch) | |
| tree | 4add196b24e554f140663a7bd92260a7fd9ebdc4 /net/ipv4/tcp_minisocks.c | |
| parent | fbf47415732b00ff5578d55c6be779a109a01e1f (diff) | |
o net: create struct sock_common and use in struct sock & tcp_tw_bucket
With this the data dependency is reduced to just making sure that the first
member of both struct sock and struct tcp_tw_bucket are a struct sock_common.
Also makes it easier to grep for struct sock and struct tcp_tw_bucket usage in
the tree as all the members in those structs are prefixed, respectively, with
sk_ and tw_, like struct inode (i_), struct block_device (bd_), etc.
Checked namespace with make tags/ctags, just one colision with the macros for
the struct sock members, with a wanrouter struct, fixed that
s/sk_state/state_sk/g in the wanrouter struct.
Checked as well if the names of the members in both structs collided with some
macro, none found.
Diffstat (limited to 'net/ipv4/tcp_minisocks.c')
| -rw-r--r-- | net/ipv4/tcp_minisocks.c | 261 |
1 files changed, 132 insertions, 129 deletions
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index ae762b5c5d7a..e0f6ea8075c1 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -61,32 +61,33 @@ static void tcp_timewait_kill(struct tcp_tw_bucket *tw) struct tcp_bind_bucket *tb; /* Unlink from established hashes. */ - ehead = &tcp_ehash[tw->hashent]; + ehead = &tcp_ehash[tw->tw_hashent]; write_lock(&ehead->lock); - if (!tw->pprev) { + if (!tw->tw_pprev) { write_unlock(&ehead->lock); return; } - if(tw->next) - tw->next->pprev = tw->pprev; - *(tw->pprev) = tw->next; - tw->pprev = NULL; + if (tw->tw_next) + tw->tw_next->sk_pprev = tw->tw_pprev; + *(tw->tw_pprev) = tw->tw_next; + tw->tw_pprev = NULL; write_unlock(&ehead->lock); /* Disassociate with bind bucket. */ - bhead = &tcp_bhash[tcp_bhashfn(tw->num)]; + bhead = &tcp_bhash[tcp_bhashfn(tw->tw_num)]; spin_lock(&bhead->lock); - tb = tw->tb; - if(tw->bind_next) - tw->bind_next->bind_pprev = tw->bind_pprev; - *(tw->bind_pprev) = tw->bind_next; - tw->tb = NULL; + tb = tw->tw_tb; + if (tw->tw_bind_next) + tw->tw_bind_next->sk_bind_pprev = tw->tw_bind_pprev; + *(tw->tw_bind_pprev) = tw->tw_bind_next; + tw->tw_tb = NULL; tcp_bucket_destroy(tb); spin_unlock(&bhead->lock); #ifdef INET_REFCNT_DEBUG - if (atomic_read(&tw->refcnt) != 1) { - printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw, atomic_read(&tw->refcnt)); + if (atomic_read(&tw->tw_refcnt) != 1) { + printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw, + atomic_read(&tw->tw_refcnt)); } #endif tcp_tw_put(tw); @@ -128,33 +129,34 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, int paws_reject = 0; tp.saw_tstamp = 0; - if (th->doff > (sizeof(struct tcphdr)>>2) && tw->ts_recent_stamp) { + if (th->doff > (sizeof(struct tcphdr) >> 2) && tw->tw_ts_recent_stamp) { tcp_parse_options(skb, &tp, 0); if (tp.saw_tstamp) { - tp.ts_recent = tw->ts_recent; - tp.ts_recent_stamp = tw->ts_recent_stamp; + tp.ts_recent = tw->tw_ts_recent; + tp.ts_recent_stamp = tw->tw_ts_recent_stamp; paws_reject = tcp_paws_check(&tp, th->rst); } } - if (tw->substate == TCP_FIN_WAIT2) { + if (tw->tw_substate == TCP_FIN_WAIT2) { /* Just repeat all the checks of tcp_rcv_state_process() */ /* Out of window, send ACK */ if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, - tw->rcv_nxt, tw->rcv_nxt + tw->rcv_wnd)) + tw->tw_rcv_nxt, + tw->tw_rcv_nxt + tw->tw_rcv_wnd)) return TCP_TW_ACK; if (th->rst) goto kill; - if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->rcv_nxt)) + if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt)) goto kill_with_rst; /* Dup ACK? */ - if (!after(TCP_SKB_CB(skb)->end_seq, tw->rcv_nxt) || + if (!after(TCP_SKB_CB(skb)->end_seq, tw->tw_rcv_nxt) || TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { tcp_tw_put(tw); return TCP_TW_SUCCESS; @@ -163,7 +165,8 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, /* New data or FIN. If new data arrive after half-duplex close, * reset. */ - if (!th->fin || TCP_SKB_CB(skb)->end_seq != tw->rcv_nxt+1) { + if (!th->fin || + TCP_SKB_CB(skb)->end_seq != tw->tw_rcv_nxt + 1) { kill_with_rst: tcp_tw_deschedule(tw); tcp_tw_put(tw); @@ -171,11 +174,11 @@ kill_with_rst: } /* FIN arrived, enter true time-wait state. */ - tw->substate = TCP_TIME_WAIT; - tw->rcv_nxt = TCP_SKB_CB(skb)->end_seq; + tw->tw_substate = TCP_TIME_WAIT; + tw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; if (tp.saw_tstamp) { - tw->ts_recent_stamp = xtime.tv_sec; - tw->ts_recent = tp.rcv_tsval; + tw->tw_ts_recent_stamp = xtime.tv_sec; + tw->tw_ts_recent = tp.rcv_tsval; } /* I am shamed, but failed to make it more elegant. @@ -183,10 +186,10 @@ kill_with_rst: * to generalize to IPv6. Taking into account that IPv6 * do not undertsnad recycling in any case, it not * a big problem in practice. --ANK */ - if (tw->family == AF_INET && - sysctl_tcp_tw_recycle && tw->ts_recent_stamp && + if (tw->tw_family == AF_INET && + sysctl_tcp_tw_recycle && tw->tw_ts_recent_stamp && tcp_v4_tw_remember_stamp(tw)) - tcp_tw_schedule(tw, tw->timeout); + tcp_tw_schedule(tw, tw->tw_timeout); else tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN); return TCP_TW_ACK; @@ -210,7 +213,7 @@ kill_with_rst: */ if (!paws_reject && - (TCP_SKB_CB(skb)->seq == tw->rcv_nxt && + (TCP_SKB_CB(skb)->seq == tw->tw_rcv_nxt && (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { /* In window segment, it may be only reset or bare ack. */ @@ -229,8 +232,8 @@ kill: tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN); if (tp.saw_tstamp) { - tw->ts_recent = tp.rcv_tsval; - tw->ts_recent_stamp = xtime.tv_sec; + tw->tw_ts_recent = tp.rcv_tsval; + tw->tw_ts_recent_stamp = xtime.tv_sec; } tcp_tw_put(tw); @@ -255,9 +258,9 @@ kill: */ if (th->syn && !th->rst && !th->ack && !paws_reject && - (after(TCP_SKB_CB(skb)->seq, tw->rcv_nxt) || - (tp.saw_tstamp && (s32)(tw->ts_recent - tp.rcv_tsval) < 0))) { - u32 isn = tw->snd_nxt+65535+2; + (after(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt) || + (tp.saw_tstamp && (s32)(tw->tw_ts_recent - tp.rcv_tsval) < 0))) { + u32 isn = tw->tw_snd_nxt + 65535 + 2; if (isn == 0) isn++; TCP_SKB_CB(skb)->when = isn; @@ -293,7 +296,7 @@ kill: */ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) { - struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->hashent]; + struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent]; struct tcp_bind_hashbucket *bhead; struct sock **head, *sktw; @@ -303,33 +306,33 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) */ bhead = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)]; spin_lock(&bhead->lock); - tw->tb = (struct tcp_bind_bucket *)sk->prev; - BUG_TRAP(sk->prev!=NULL); - if ((tw->bind_next = tw->tb->owners) != NULL) - tw->tb->owners->bind_pprev = &tw->bind_next; - tw->tb->owners = (struct sock*)tw; - tw->bind_pprev = &tw->tb->owners; + tw->tw_tb = (struct tcp_bind_bucket *)sk->sk_prev; + BUG_TRAP(sk->sk_prev); + if ((tw->tw_bind_next = tw->tw_tb->owners) != NULL) + tw->tw_tb->owners->sk_bind_pprev = &tw->tw_bind_next; + tw->tw_tb->owners = (struct sock *)tw; + tw->tw_bind_pprev = &tw->tw_tb->owners; spin_unlock(&bhead->lock); write_lock(&ehead->lock); /* Step 2: Remove SK from established hash. */ - if (sk->pprev) { - if(sk->next) - sk->next->pprev = sk->pprev; - *sk->pprev = sk->next; - sk->pprev = NULL; - sock_prot_dec_use(sk->prot); + if (sk->sk_pprev) { + if (sk->sk_next) + sk->sk_next->sk_pprev = sk->sk_pprev; + *sk->sk_pprev = sk->sk_next; + sk->sk_pprev = NULL; + sock_prot_dec_use(sk->sk_prot); } /* Step 3: Hash TW into TIMEWAIT half of established hash table. */ head = &(ehead + tcp_ehash_size)->chain; sktw = (struct sock *)tw; - if((sktw->next = *head) != NULL) - (*head)->pprev = &sktw->next; + if ((sktw->sk_next = *head) != NULL) + (*head)->sk_pprev = &sktw->sk_next; *head = sktw; - sktw->pprev = head; - atomic_inc(&tw->refcnt); + sktw->sk_pprev = head; + atomic_inc(&tw->tw_refcnt); write_unlock(&ehead->lock); } @@ -354,33 +357,33 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) int rto = (tp->rto<<2) - (tp->rto>>1); /* Give us an identity. */ - tw->daddr = inet->daddr; - tw->rcv_saddr = inet->rcv_saddr; - tw->bound_dev_if= sk->bound_dev_if; - tw->num = inet->num; - tw->state = TCP_TIME_WAIT; - tw->substate = state; - tw->sport = inet->sport; - tw->dport = inet->dport; - tw->family = sk->family; - tw->reuse = sk->reuse; - tw->rcv_wscale = tp->rcv_wscale; - atomic_set(&tw->refcnt, 1); - - tw->hashent = sk->hashent; - tw->rcv_nxt = tp->rcv_nxt; - tw->snd_nxt = tp->snd_nxt; - tw->rcv_wnd = tcp_receive_window(tp); - tw->ts_recent = tp->ts_recent; - tw->ts_recent_stamp= tp->ts_recent_stamp; - tw->pprev_death = NULL; + tw->tw_daddr = inet->daddr; + tw->tw_rcv_saddr = inet->rcv_saddr; + tw->tw_bound_dev_if = sk->sk_bound_dev_if; + tw->tw_num = inet->num; + tw->tw_state = TCP_TIME_WAIT; + tw->tw_substate = state; + tw->tw_sport = inet->sport; + tw->tw_dport = inet->dport; + tw->tw_family = sk->sk_family; + tw->tw_reuse = sk->sk_reuse; + tw->tw_rcv_wscale = tp->rcv_wscale; + atomic_set(&tw->tw_refcnt, 1); + + tw->tw_hashent = sk->sk_hashent; + tw->tw_rcv_nxt = tp->rcv_nxt; + tw->tw_snd_nxt = tp->snd_nxt; + tw->tw_rcv_wnd = tcp_receive_window(tp); + tw->tw_ts_recent = tp->ts_recent; + tw->tw_ts_recent_stamp = tp->ts_recent_stamp; + tw->tw_pprev_death = NULL; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) - if(tw->family == PF_INET6) { + if (tw->tw_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); - ipv6_addr_copy(&tw->v6_daddr, &np->daddr); - ipv6_addr_copy(&tw->v6_rcv_saddr, &np->rcv_saddr); + ipv6_addr_copy(&tw->tw_v6_daddr, &np->daddr); + ipv6_addr_copy(&tw->tw_v6_rcv_saddr, &np->rcv_saddr); } #endif /* Linkage updates. */ @@ -391,9 +394,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) timeo = rto; if (recycle_ok) { - tw->timeout = rto; + tw->tw_timeout = rto; } else { - tw->timeout = TCP_TIMEWAIT_LEN; + tw->tw_timeout = TCP_TIMEWAIT_LEN; if (state == TCP_TIME_WAIT) timeo = TCP_TIMEWAIT_LEN; } @@ -443,10 +446,10 @@ static void tcp_twkill(unsigned long dummy) goto out; while((tw = tcp_tw_death_row[tcp_tw_death_row_slot]) != NULL) { - tcp_tw_death_row[tcp_tw_death_row_slot] = tw->next_death; - if (tw->next_death) - tw->next_death->pprev_death = tw->pprev_death; - tw->pprev_death = NULL; + tcp_tw_death_row[tcp_tw_death_row_slot] = tw->tw_next_death; + if (tw->tw_next_death) + tw->tw_next_death->tw_pprev_death = tw->tw_pprev_death; + tw->tw_pprev_death = NULL; spin_unlock(&tw_death_lock); tcp_timewait_kill(tw); @@ -474,11 +477,11 @@ out: void tcp_tw_deschedule(struct tcp_tw_bucket *tw) { spin_lock(&tw_death_lock); - if (tw->pprev_death) { - if(tw->next_death) - tw->next_death->pprev_death = tw->pprev_death; - *tw->pprev_death = tw->next_death; - tw->pprev_death = NULL; + if (tw->tw_pprev_death) { + if (tw->tw_next_death) + tw->tw_next_death->tw_pprev_death = tw->tw_pprev_death; + *tw->tw_pprev_death = tw->tw_next_death; + tw->tw_pprev_death = NULL; tcp_tw_put(tw); if (--tcp_tw_count == 0) del_timer(&tcp_tw_timer); @@ -530,14 +533,14 @@ void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo) spin_lock(&tw_death_lock); /* Unlink it, if it was scheduled */ - if (tw->pprev_death) { - if(tw->next_death) - tw->next_death->pprev_death = tw->pprev_death; - *tw->pprev_death = tw->next_death; - tw->pprev_death = NULL; + if (tw->tw_pprev_death) { + if (tw->tw_next_death) + tw->tw_next_death->tw_pprev_death = tw->tw_pprev_death; + *tw->tw_pprev_death = tw->tw_next_death; + tw->tw_pprev_death = NULL; tcp_tw_count--; } else - atomic_inc(&tw->refcnt); + atomic_inc(&tw->tw_refcnt); if (slot >= TCP_TW_RECYCLE_SLOTS) { /* Schedule to slow timer */ @@ -548,11 +551,11 @@ void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo) if (slot >= TCP_TWKILL_SLOTS) slot = TCP_TWKILL_SLOTS-1; } - tw->ttd = jiffies + timeo; + tw->tw_ttd = jiffies + timeo; slot = (tcp_tw_death_row_slot + slot) & (TCP_TWKILL_SLOTS - 1); tpp = &tcp_tw_death_row[slot]; } else { - tw->ttd = jiffies + (slot<<TCP_TW_RECYCLE_TICK); + tw->tw_ttd = jiffies + (slot << TCP_TW_RECYCLE_TICK); if (tcp_twcal_hand < 0) { tcp_twcal_hand = 0; @@ -567,10 +570,10 @@ void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo) tpp = &tcp_twcal_row[slot]; } - if((tw->next_death = *tpp) != NULL) - (*tpp)->pprev_death = &tw->next_death; + if ((tw->tw_next_death = *tpp) != NULL) + (*tpp)->tw_pprev_death = &tw->tw_next_death; *tpp = tw; - tw->pprev_death = tpp; + tw->tw_pprev_death = tpp; if (tcp_tw_count++ == 0) mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD); @@ -597,8 +600,8 @@ void tcp_twcal_tick(unsigned long dummy) struct tcp_tw_bucket *tw; while((tw = tcp_twcal_row[slot]) != NULL) { - tcp_twcal_row[slot] = tw->next_death; - tw->pprev_death = NULL; + tcp_twcal_row[slot] = tw->tw_next_death; + tw->tw_pprev_death = NULL; tcp_timewait_kill(tw); tcp_tw_put(tw); @@ -639,18 +642,18 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, /* allocate the newsk from the same slab of the master sock, * if not, at sk_free time we'll try to free it from the wrong * slabcache (i.e. is it TCPv4 or v6?) -acme */ - struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, 0, sk->slab); + struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, 0, sk->sk_slab); if(newsk != NULL) { struct tcp_opt *newtp; struct sk_filter *filter; memcpy(newsk, sk, sizeof(struct tcp_sock)); - newsk->state = TCP_SYN_RECV; + newsk->sk_state = TCP_SYN_RECV; /* SANITY */ - newsk->pprev = NULL; - newsk->prev = NULL; + newsk->sk_pprev = NULL; + newsk->sk_prev = NULL; /* Clone the TCP header template */ inet_sk(newsk)->dport = req->rmt_port; @@ -658,29 +661,29 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, sock_lock_init(newsk); bh_lock_sock(newsk); - newsk->dst_lock = RW_LOCK_UNLOCKED; - atomic_set(&newsk->rmem_alloc, 0); - skb_queue_head_init(&newsk->receive_queue); - atomic_set(&newsk->wmem_alloc, 0); - skb_queue_head_init(&newsk->write_queue); - atomic_set(&newsk->omem_alloc, 0); - newsk->wmem_queued = 0; - newsk->forward_alloc = 0; + newsk->sk_dst_lock = RW_LOCK_UNLOCKED; + atomic_set(&newsk->sk_rmem_alloc, 0); + skb_queue_head_init(&newsk->sk_receive_queue); + atomic_set(&newsk->sk_wmem_alloc, 0); + skb_queue_head_init(&newsk->sk_write_queue); + atomic_set(&newsk->sk_omem_alloc, 0); + newsk->sk_wmem_queued = 0; + newsk->sk_forward_alloc = 0; sock_reset_flag(newsk, SOCK_DONE); - newsk->userlocks = sk->userlocks & ~SOCK_BINDPORT_LOCK; - newsk->backlog.head = newsk->backlog.tail = NULL; - newsk->callback_lock = RW_LOCK_UNLOCKED; - skb_queue_head_init(&newsk->error_queue); - newsk->write_space = tcp_write_space; + newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; + newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; + newsk->sk_callback_lock = RW_LOCK_UNLOCKED; + skb_queue_head_init(&newsk->sk_error_queue); + newsk->sk_write_space = tcp_write_space; - if ((filter = newsk->filter) != NULL) + if ((filter = newsk->sk_filter) != NULL) sk_filter_charge(newsk, filter); if (unlikely(xfrm_sk_clone_policy(newsk))) { /* It is still raw copy of parent, so invalidate * destructor and make plain sk_free() */ - newsk->destruct = NULL; + newsk->sk_destruct = NULL; sk_free(newsk); return NULL; } @@ -744,9 +747,9 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, memset(&newtp->syn_wait_lock, 0, sizeof(newtp->syn_wait_lock)); /* Back to base struct sock members. */ - newsk->err = 0; - newsk->priority = 0; - atomic_set(&newsk->refcnt, 2); + newsk->sk_err = 0; + newsk->sk_priority = 0; + atomic_set(&newsk->sk_refcnt, 2); #ifdef INET_REFCNT_DEBUG atomic_inc(&inet_sock_nr); #endif @@ -755,9 +758,9 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, if (sock_flag(newsk, SOCK_KEEPOPEN)) tcp_reset_keepalive_timer(newsk, keepalive_time_when(newtp)); - newsk->socket = NULL; - newsk->sleep = NULL; - newsk->owner = NULL; + newsk->sk_socket = NULL; + newsk->sk_sleep = NULL; + newsk->sk_owner = NULL; newtp->tstamp_ok = req->tstamp_ok; if((newtp->sack_ok = req->sack_ok) != 0) { @@ -791,7 +794,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, newtp->mss_clamp = req->mss; TCP_ECN_openreq_child(newtp, req); if (newtp->ecn_flags&TCP_ECN_OK) - newsk->no_largesend = 1; + newsk->sk_no_largesend = 1; TCP_INC_STATS_BH(TcpPassiveOpens); } @@ -967,7 +970,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, if (child == NULL) goto listen_overflow; - sk_set_owner(child, sk->owner); + sk_set_owner(child, sk->sk_owner); tcp_synq_unlink(tp, req, prev); tcp_synq_removed(sk, req); @@ -999,14 +1002,14 @@ int tcp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb) { int ret = 0; - int state = child->state; + int state = child->sk_state; if (!sock_owned_by_user(child)) { ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len); /* Wakeup parent, send SIGIO */ - if (state == TCP_SYN_RECV && child->state != state) - parent->data_ready(parent, 0); + if (state == TCP_SYN_RECV && child->sk_state != state) + parent->sk_data_ready(parent, 0); } else { /* Alas, it is possible again, because we do lookup * in main socket hash table and lock on listening |
