diff options
| author | Jody McIntyre <scjody@gribbl.modernduck.com> | 2005-02-22 14:32:08 -0500 |
|---|---|---|
| committer | Jody McIntyre <scjody@modernduck.com> | 2005-02-22 14:32:08 -0500 |
| commit | c62d34883db103208055167e0b15ceb97a947859 (patch) | |
| tree | a2458026eb37ded7633502acc9df15b872c631a9 | |
| parent | bdc1a5f487e06a0e07f2867d214722dee09916d6 (diff) | |
| parent | 9f459dee9e73e90a0a357e5bf53b4bc56282c1da (diff) | |
Merge gribbl.modernduck.com:/usr/src/bk/linux-2.5
into gribbl.modernduck.com:/usr/src/bk/1394-2.6
| -rw-r--r-- | arch/ppc/mm/pgtable.c | 5 | ||||
| -rw-r--r-- | fs/nfs/file.c | 6 | ||||
| -rw-r--r-- | include/linux/fs.h | 35 | ||||
| -rw-r--r-- | include/linux/tcp.h | 57 | ||||
| -rw-r--r-- | include/net/tcp.h | 58 | ||||
| -rw-r--r-- | net/ipv4/tcp.c | 18 | ||||
| -rw-r--r-- | net/ipv4/tcp_input.c | 230 | ||||
| -rw-r--r-- | net/ipv4/tcp_ipv4.c | 56 | ||||
| -rw-r--r-- | net/ipv4/tcp_minisocks.c | 176 | ||||
| -rw-r--r-- | net/ipv4/tcp_output.c | 48 | ||||
| -rw-r--r-- | net/ipv4/tcp_timer.c | 2 | ||||
| -rw-r--r-- | net/ipv6/tcp_ipv6.c | 27 |
12 files changed, 364 insertions, 354 deletions
diff --git a/arch/ppc/mm/pgtable.c b/arch/ppc/mm/pgtable.c index bda12bd5786f..b5316b479228 100644 --- a/arch/ppc/mm/pgtable.c +++ b/arch/ppc/mm/pgtable.c @@ -107,8 +107,11 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) ptepage->mapping = (void *) mm; ptepage->index = address & PMD_MASK; } - } else + } else { pte = (pte_t *)early_get_page(); + if (pte) + clear_page(pte); + } return pte; } diff --git a/fs/nfs/file.c b/fs/nfs/file.c index a6860e1f7bff..57ccbd277fce 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -303,10 +303,10 @@ static int do_getlk(struct file *filp, int cmd, struct file_lock *fl) status = NFS_PROTO(inode)->lock(filp, cmd, fl); else { struct file_lock *cfl = posix_test_lock(filp, fl); - if (cfl != NULL) { + + fl->fl_type = F_UNLCK; + if (cfl != NULL) memcpy(fl, cfl, sizeof(*fl)); - fl->fl_type = F_UNLCK; - } } unlock_kernel(); return status; diff --git a/include/linux/fs.h b/include/linux/fs.h index 84b3b114f943..30cb440cb6c8 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -7,25 +7,8 @@ */ #include <linux/config.h> -#include <linux/linkage.h> #include <linux/limits.h> -#include <linux/wait.h> -#include <linux/types.h> -#include <linux/kdev_t.h> #include <linux/ioctl.h> -#include <linux/dcache.h> -#include <linux/stat.h> -#include <linux/cache.h> -#include <linux/kobject.h> -#include <asm/atomic.h> - -struct iovec; -struct nameidata; -struct pipe_inode_info; -struct poll_table_struct; -struct kstatfs; -struct vm_area_struct; -struct vfsmount; /* * It's silly to have NR_OPEN bigger than NR_FILE, but you can change @@ -216,14 +199,32 @@ extern int dir_notify_enable; #ifdef __KERNEL__ +#include <linux/linkage.h> +#include <linux/wait.h> +#include <linux/types.h> +#include <linux/kdev_t.h> +#include <linux/dcache.h> +#include <linux/stat.h> +#include <linux/cache.h> +#include <linux/kobject.h> #include <linux/list.h> #include <linux/radix-tree.h> #include <linux/prio_tree.h> #include <linux/audit.h> #include <linux/init.h> + +#include <asm/atomic.h> #include <asm/semaphore.h> #include <asm/byteorder.h> +struct iovec; +struct nameidata; +struct pipe_inode_info; +struct poll_table_struct; +struct kstatfs; +struct vm_area_struct; +struct vfsmount; + /* Used to be a macro which just called the function, now just a function */ extern void update_atime (struct inode *); diff --git a/include/linux/tcp.h b/include/linux/tcp.h index b31ca0400372..f56e734c1da0 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -210,6 +210,27 @@ enum tcp_congestion_algo { TCP_BIC, }; +struct tcp_options_received { +/* PAWS/RTTM data */ + long ts_recent_stamp;/* Time we stored ts_recent (for aging) */ + __u32 ts_recent; /* Time stamp to echo next */ + __u32 rcv_tsval; /* Time stamp value */ + __u32 rcv_tsecr; /* Time stamp echo reply */ + char saw_tstamp; /* Saw TIMESTAMP on last packet */ + char tstamp_ok; /* TIMESTAMP seen on SYN packet */ + char sack_ok; /* SACK seen on SYN packet */ + char wscale_ok; /* Wscale seen on SYN packet */ + __u8 snd_wscale; /* Window scaling received from sender */ + __u8 rcv_wscale; /* Window scaling to send to receiver */ +/* SACKs data */ + __u8 dsack; /* D-SACK is scheduled */ + __u8 eff_sacks; /* Size of SACK array to send with next packet */ + __u8 num_sacks; /* Number of SACK blocks */ + __u8 __pad; + __u16 user_mss; /* mss requested by user in ioctl */ + __u16 mss_clamp; /* Maximal mss, negotiated at connection setup */ +}; + struct tcp_sock { /* inet_sock has to be the first member of tcp_sock */ struct inet_sock inet; @@ -262,22 +283,19 @@ struct tcp_sock { __u32 pmtu_cookie; /* Last pmtu seen by socket */ __u32 mss_cache; /* Cached effective mss, not including SACKS */ __u16 mss_cache_std; /* Like mss_cache, but without TSO */ - __u16 mss_clamp; /* Maximal mss, negotiated at connection setup */ __u16 ext_header_len; /* Network protocol overhead (IP/IPv6 options) */ __u16 ext2_header_len;/* Options depending on route */ __u8 ca_state; /* State of fast-retransmit machine */ __u8 retransmits; /* Number of unrecovered RTO timeouts. */ + __u32 frto_highmark; /* snd_nxt when RTO occurred */ __u8 reordering; /* Packet reordering metric. */ __u8 frto_counter; /* Number of new acks after RTO */ - __u32 frto_highmark; /* snd_nxt when RTO occurred */ __u8 adv_cong; /* Using Vegas, Westwood, or BIC */ __u8 defer_accept; /* User waits for some data after accept() */ - /* one byte hole, try to pack */ /* RTT measurement */ - __u8 backoff; /* backoff */ __u32 srtt; /* smoothed round trip time << 3 */ __u32 mdev; /* medium deviation */ __u32 mdev_max; /* maximal mdev for the last rtt period */ @@ -288,7 +306,15 @@ struct tcp_sock { __u32 packets_out; /* Packets which are "in flight" */ __u32 left_out; /* Packets which leaved network */ __u32 retrans_out; /* Retransmitted packets out */ + __u8 backoff; /* backoff */ +/* + * Options received (usually on last packet, some only on SYN packets). + */ + __u8 nonagle; /* Disable Nagle algorithm? */ + __u8 keepalive_probes; /* num of allowed keep alive probes */ + __u8 probes_out; /* unanswered 0 window probes */ + struct tcp_options_received rx_opt; /* * Slow start and congestion control (see also Nagle, and Karn & Partridge) @@ -314,40 +340,19 @@ struct tcp_sock { __u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ __u32 pushed_seq; /* Last pushed seq, required to talk to windows */ __u32 copied_seq; /* Head of yet unread data */ -/* - * Options received (usually on last packet, some only on SYN packets). - */ - char tstamp_ok, /* TIMESTAMP seen on SYN packet */ - wscale_ok, /* Wscale seen on SYN packet */ - sack_ok; /* SACK seen on SYN packet */ - char saw_tstamp; /* Saw TIMESTAMP on last packet */ - __u8 snd_wscale; /* Window scaling received from sender */ - __u8 rcv_wscale; /* Window scaling to send to receiver */ - __u8 nonagle; /* Disable Nagle algorithm? */ - __u8 keepalive_probes; /* num of allowed keep alive probes */ - -/* PAWS/RTTM data */ - __u32 rcv_tsval; /* Time stamp value */ - __u32 rcv_tsecr; /* Time stamp echo reply */ - __u32 ts_recent; /* Time stamp to echo next */ - long ts_recent_stamp;/* Time we stored ts_recent (for aging) */ /* SACKs data */ - __u16 user_mss; /* mss requested by user in ioctl */ - __u8 dsack; /* D-SACK is scheduled */ - __u8 eff_sacks; /* Size of SACK array to send with next packet */ struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */ struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/ __u32 window_clamp; /* Maximal window to advertise */ __u32 rcv_ssthresh; /* Current window clamp */ - __u8 probes_out; /* unanswered 0 window probes */ - __u8 num_sacks; /* Number of SACK blocks */ __u16 advmss; /* Advertised MSS */ __u8 syn_retries; /* num of allowed syn retries */ __u8 ecn_flags; /* ECN status bits. */ __u16 prior_ssthresh; /* ssthresh saved at recovery start */ + __u16 __pad1; __u32 lost_out; /* Lost packets */ __u32 sacked_out; /* SACK'd packets */ __u32 fackets_out; /* FACK'd packets */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 72f3775f27c0..7355606725d4 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -832,9 +832,9 @@ static __inline__ void tcp_delack_init(struct tcp_sock *tp) memset(&tp->ack, 0, sizeof(tp->ack)); } -static inline void tcp_clear_options(struct tcp_sock *tp) +static inline void tcp_clear_options(struct tcp_options_received *rx_opt) { - tp->tstamp_ok = tp->sack_ok = tp->wscale_ok = tp->snd_wscale = 0; + rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0; } enum tcp_tw_status @@ -883,7 +883,7 @@ extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, extern int tcp_listen_start(struct sock *sk); extern void tcp_parse_options(struct sk_buff *skb, - struct tcp_sock *tp, + struct tcp_options_received *opt_rx, int estab); /* @@ -1071,7 +1071,7 @@ static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) static __inline__ void tcp_fast_path_on(struct tcp_sock *tp) { - __tcp_fast_path_on(tp, tp->snd_wnd>>tp->snd_wscale); + __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); } static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp) @@ -1323,7 +1323,7 @@ static inline __u32 tcp_current_ssthresh(struct tcp_sock *tp) static inline void tcp_sync_left_out(struct tcp_sock *tp) { - if (tp->sack_ok && + if (tp->rx_opt.sack_ok && (tp->sacked_out >= tp->packets_out - tp->lost_out)) tp->sacked_out = tp->packets_out - tp->lost_out; tp->left_out = tp->sacked_out + tp->lost_out; @@ -1649,39 +1649,39 @@ static __inline__ void tcp_done(struct sock *sk) tcp_destroy_sock(sk); } -static __inline__ void tcp_sack_reset(struct tcp_sock *tp) +static __inline__ void tcp_sack_reset(struct tcp_options_received *rx_opt) { - tp->dsack = 0; - tp->eff_sacks = 0; - tp->num_sacks = 0; + rx_opt->dsack = 0; + rx_opt->eff_sacks = 0; + rx_opt->num_sacks = 0; } static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp, __u32 tstamp) { - if (tp->tstamp_ok) { + if (tp->rx_opt.tstamp_ok) { *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); *ptr++ = htonl(tstamp); - *ptr++ = htonl(tp->ts_recent); + *ptr++ = htonl(tp->rx_opt.ts_recent); } - if (tp->eff_sacks) { - struct tcp_sack_block *sp = tp->dsack ? tp->duplicate_sack : tp->selective_acks; + if (tp->rx_opt.eff_sacks) { + struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks; int this_sack; *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_SACK << 8) | (TCPOLEN_SACK_BASE + - (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK))); - for(this_sack = 0; this_sack < tp->eff_sacks; this_sack++) { + (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK))); + for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) { *ptr++ = htonl(sp[this_sack].start_seq); *ptr++ = htonl(sp[this_sack].end_seq); } - if (tp->dsack) { - tp->dsack = 0; - tp->eff_sacks--; + if (tp->rx_opt.dsack) { + tp->rx_opt.dsack = 0; + tp->rx_opt.eff_sacks--; } } } @@ -1827,17 +1827,17 @@ static inline void tcp_synq_drop(struct sock *sk, struct open_request *req, } static __inline__ void tcp_openreq_init(struct open_request *req, - struct tcp_sock *tp, + struct tcp_options_received *rx_opt, struct sk_buff *skb) { req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ req->rcv_isn = TCP_SKB_CB(skb)->seq; - req->mss = tp->mss_clamp; - req->ts_recent = tp->saw_tstamp ? tp->rcv_tsval : 0; - req->tstamp_ok = tp->tstamp_ok; - req->sack_ok = tp->sack_ok; - req->snd_wscale = tp->snd_wscale; - req->wscale_ok = tp->wscale_ok; + req->mss = rx_opt->mss_clamp; + req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; + req->tstamp_ok = rx_opt->tstamp_ok; + req->sack_ok = rx_opt->sack_ok; + req->snd_wscale = rx_opt->snd_wscale; + req->wscale_ok = rx_opt->wscale_ok; req->acked = 0; req->ecn_ok = 0; req->rmt_port = skb->h.th->source; @@ -1886,11 +1886,11 @@ static inline int tcp_fin_time(const struct tcp_sock *tp) return fin_timeout; } -static inline int tcp_paws_check(const struct tcp_sock *tp, int rst) +static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int rst) { - if ((s32)(tp->rcv_tsval - tp->ts_recent) >= 0) + if ((s32)(rx_opt->rcv_tsval - rx_opt->ts_recent) >= 0) return 0; - if (xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_24DAYS) + if (xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS) return 0; /* RST segments are not recommended to carry timestamp, @@ -1905,7 +1905,7 @@ static inline int tcp_paws_check(const struct tcp_sock *tp, int rst) However, we can relax time bounds for RST segments to MSL. */ - if (rst && xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_MSL) + if (rst && xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) return 0; return 1; } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index d34035d63c75..0903d6ece86f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1829,8 +1829,8 @@ int tcp_disconnect(struct sock *sk, int flags) tcp_clear_retrans(tp); tcp_delack_init(tp); sk->sk_send_head = NULL; - tp->saw_tstamp = 0; - tcp_sack_reset(tp); + tp->rx_opt.saw_tstamp = 0; + tcp_sack_reset(&tp->rx_opt); __sk_dst_reset(sk); BUG_TRAP(!inet->num || tp->bind_hash); @@ -1969,7 +1969,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, err = -EINVAL; break; } - tp->user_mss = val; + tp->rx_opt.user_mss = val; break; case TCP_NODELAY: @@ -2119,14 +2119,14 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) info->tcpi_probes = tp->probes_out; info->tcpi_backoff = tp->backoff; - if (tp->tstamp_ok) + if (tp->rx_opt.tstamp_ok) info->tcpi_options |= TCPI_OPT_TIMESTAMPS; - if (tp->sack_ok) + if (tp->rx_opt.sack_ok) info->tcpi_options |= TCPI_OPT_SACK; - if (tp->wscale_ok) { + if (tp->rx_opt.wscale_ok) { info->tcpi_options |= TCPI_OPT_WSCALE; - info->tcpi_snd_wscale = tp->snd_wscale; - info->tcpi_rcv_wscale = tp->rcv_wscale; + info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; + info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; } if (tp->ecn_flags&TCP_ECN_OK) @@ -2186,7 +2186,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, case TCP_MAXSEG: val = tp->mss_cache_std; if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) - val = tp->user_mss; + val = tp->rx_opt.user_mss; break; case TCP_NODELAY: val = !!(tp->nonagle&TCP_NAGLE_OFF); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 674002dc3c4d..8a71e824e17e 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -119,9 +119,9 @@ int sysctl_tcp_bic_beta = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */ #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) -#define IsReno(tp) ((tp)->sack_ok == 0) -#define IsFack(tp) ((tp)->sack_ok & 2) -#define IsDSack(tp) ((tp)->sack_ok & 4) +#define IsReno(tp) ((tp)->rx_opt.sack_ok == 0) +#define IsFack(tp) ((tp)->rx_opt.sack_ok & 2) +#define IsDSack(tp) ((tp)->rx_opt.sack_ok & 4) #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) @@ -205,7 +205,7 @@ static __inline__ int tcp_in_quickack_mode(struct tcp_sock *tp) static void tcp_fixup_sndbuf(struct sock *sk) { - int sndmem = tcp_sk(sk)->mss_clamp + MAX_TCP_HEADER + 16 + + int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff); if (sk->sk_sndbuf < 3 * sndmem) @@ -440,10 +440,10 @@ new_measure: static inline void tcp_rcv_rtt_measure_ts(struct tcp_sock *tp, struct sk_buff *skb) { - if (tp->rcv_tsecr && + if (tp->rx_opt.rcv_tsecr && (TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq >= tp->ack.rcv_mss)) - tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_tsecr, 0); + tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); } /* @@ -833,7 +833,7 @@ static void tcp_init_metrics(struct sock *sk) } if (dst_metric(dst, RTAX_REORDERING) && tp->reordering != dst_metric(dst, RTAX_REORDERING)) { - tp->sack_ok &= ~2; + tp->rx_opt.sack_ok &= ~2; tp->reordering = dst_metric(dst, RTAX_REORDERING); } @@ -867,7 +867,7 @@ static void tcp_init_metrics(struct sock *sk) } tcp_set_rto(tp); tcp_bound_rto(tp); - if (tp->rto < TCP_TIMEOUT_INIT && !tp->saw_tstamp) + if (tp->rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) goto reset; tp->snd_cwnd = tcp_init_cwnd(tp, dst); tp->snd_cwnd_stamp = tcp_time_stamp; @@ -878,7 +878,7 @@ reset: * supported, TCP will fail to recalculate correct * rtt, if initial rto is too small. FORGET ALL AND RESET! */ - if (!tp->saw_tstamp && tp->srtt) { + if (!tp->rx_opt.saw_tstamp && tp->srtt) { tp->srtt = 0; tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT; tp->rto = TCP_TIMEOUT_INIT; @@ -901,14 +901,14 @@ static void tcp_update_reordering(struct tcp_sock *tp, int metric, int ts) NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER); #if FASTRETRANS_DEBUG > 1 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", - tp->sack_ok, tp->ca_state, + tp->rx_opt.sack_ok, tp->ca_state, tp->reordering, tp->fackets_out, tp->sacked_out, tp->undo_marker ? tp->undo_retrans : 0); #endif /* Disable FACK yet. */ - tp->sack_ok &= ~2; + tp->rx_opt.sack_ok &= ~2; } } @@ -998,13 +998,13 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ if (before(start_seq, ack)) { dup_sack = 1; - tp->sack_ok |= 4; + tp->rx_opt.sack_ok |= 4; NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV); } else if (num_sacks > 1 && !after(end_seq, ntohl(sp[1].end_seq)) && !before(start_seq, ntohl(sp[1].start_seq))) { dup_sack = 1; - tp->sack_ok |= 4; + tp->rx_opt.sack_ok |= 4; NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV); } @@ -1629,8 +1629,8 @@ static void tcp_cwnd_down(struct tcp_sock *tp) static inline int tcp_packet_delayed(struct tcp_sock *tp) { return !tp->retrans_stamp || - (tp->saw_tstamp && tp->rcv_tsecr && - (__s32)(tp->rcv_tsecr - tp->retrans_stamp) < 0); + (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && + (__s32)(tp->rx_opt.rcv_tsecr - tp->retrans_stamp) < 0); } /* Undo procedures. */ @@ -1978,7 +1978,7 @@ static void tcp_ack_saw_tstamp(struct tcp_sock *tp, int flag) * answer arrives rto becomes 120 seconds! If at least one of segments * in window is lost... Voila. --ANK (010210) */ - seq_rtt = tcp_time_stamp - tp->rcv_tsecr; + seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; tcp_rtt_estimator(tp, seq_rtt); tcp_set_rto(tp); tp->backoff = 0; @@ -2009,7 +2009,7 @@ static inline void tcp_ack_update_rtt(struct tcp_sock *tp, int flag, s32 seq_rtt) { /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ - if (tp->saw_tstamp && tp->rcv_tsecr) + if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) tcp_ack_saw_tstamp(tp, flag); else if (seq_rtt >= 0) tcp_ack_no_tstamp(tp, seq_rtt, flag); @@ -2483,7 +2483,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) BUG_TRAP((int)tp->sacked_out >= 0); BUG_TRAP((int)tp->lost_out >= 0); BUG_TRAP((int)tp->retrans_out >= 0); - if (!tp->packets_out && tp->sack_ok) { + if (!tp->packets_out && tp->rx_opt.sack_ok) { if (tp->lost_out) { printk(KERN_DEBUG "Leak l=%u %d\n", tp->lost_out, tp->ca_state); @@ -2559,7 +2559,7 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp, u32 nwin = ntohs(skb->h.th->window); if (likely(!skb->h.th->syn)) - nwin <<= tp->snd_wscale; + nwin <<= tp->rx_opt.snd_wscale; if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { flag |= FLAG_WIN_UPDATE; @@ -2979,14 +2979,14 @@ uninteresting_ack: * But, this can also be called on packets in the established flow when * the fast version below fails. */ -void tcp_parse_options(struct sk_buff *skb, struct tcp_sock *tp, int estab) +void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab) { unsigned char *ptr; struct tcphdr *th = skb->h.th; int length=(th->doff*4)-sizeof(struct tcphdr); ptr = (unsigned char *)(th + 1); - tp->saw_tstamp = 0; + opt_rx->saw_tstamp = 0; while(length>0) { int opcode=*ptr++; @@ -3009,41 +3009,41 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_sock *tp, int estab) if(opsize==TCPOLEN_MSS && th->syn && !estab) { u16 in_mss = ntohs(*(__u16 *)ptr); if (in_mss) { - if (tp->user_mss && tp->user_mss < in_mss) - in_mss = tp->user_mss; - tp->mss_clamp = in_mss; + if (opt_rx->user_mss && opt_rx->user_mss < in_mss) + in_mss = opt_rx->user_mss; + opt_rx->mss_clamp = in_mss; } } break; case TCPOPT_WINDOW: if(opsize==TCPOLEN_WINDOW && th->syn && !estab) if (sysctl_tcp_window_scaling) { - tp->wscale_ok = 1; - tp->snd_wscale = *(__u8 *)ptr; - if(tp->snd_wscale > 14) { + opt_rx->wscale_ok = 1; + opt_rx->snd_wscale = *(__u8 *)ptr; + if(opt_rx->snd_wscale > 14) { if(net_ratelimit()) printk(KERN_INFO "tcp_parse_options: Illegal window " "scaling value %d >14 received.\n", - tp->snd_wscale); - tp->snd_wscale = 14; + opt_rx->snd_wscale); + opt_rx->snd_wscale = 14; } } break; case TCPOPT_TIMESTAMP: if(opsize==TCPOLEN_TIMESTAMP) { - if ((estab && tp->tstamp_ok) || + if ((estab && opt_rx->tstamp_ok) || (!estab && sysctl_tcp_timestamps)) { - tp->saw_tstamp = 1; - tp->rcv_tsval = ntohl(*(__u32 *)ptr); - tp->rcv_tsecr = ntohl(*(__u32 *)(ptr+4)); + opt_rx->saw_tstamp = 1; + opt_rx->rcv_tsval = ntohl(*(__u32 *)ptr); + opt_rx->rcv_tsecr = ntohl(*(__u32 *)(ptr+4)); } } break; case TCPOPT_SACK_PERM: if(opsize==TCPOLEN_SACK_PERM && th->syn && !estab) { if (sysctl_tcp_sack) { - tp->sack_ok = 1; - tcp_sack_reset(tp); + opt_rx->sack_ok = 1; + tcp_sack_reset(opt_rx); } } break; @@ -3051,7 +3051,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_sock *tp, int estab) case TCPOPT_SACK: if((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) && !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) && - tp->sack_ok) { + opt_rx->sack_ok) { TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; } }; @@ -3068,34 +3068,34 @@ static inline int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, struct tcp_sock *tp) { if (th->doff == sizeof(struct tcphdr)>>2) { - tp->saw_tstamp = 0; + tp->rx_opt.saw_tstamp = 0; return 0; - } else if (tp->tstamp_ok && + } else if (tp->rx_opt.tstamp_ok && th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) { __u32 *ptr = (__u32 *)(th + 1); if (*ptr == ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { - tp->saw_tstamp = 1; + tp->rx_opt.saw_tstamp = 1; ++ptr; - tp->rcv_tsval = ntohl(*ptr); + tp->rx_opt.rcv_tsval = ntohl(*ptr); ++ptr; - tp->rcv_tsecr = ntohl(*ptr); + tp->rx_opt.rcv_tsecr = ntohl(*ptr); return 1; } } - tcp_parse_options(skb, tp, 1); + tcp_parse_options(skb, &tp->rx_opt, 1); return 1; } static inline void tcp_store_ts_recent(struct tcp_sock *tp) { - tp->ts_recent = tp->rcv_tsval; - tp->ts_recent_stamp = xtime.tv_sec; + tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; + tp->rx_opt.ts_recent_stamp = xtime.tv_sec; } static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) { - if (tp->saw_tstamp && !after(seq, tp->rcv_wup)) { + if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { /* PAWS bug workaround wrt. ACK frames, the PAWS discard * extra check below makes sure this can only happen * for pure ACK frames. -DaveM @@ -3103,8 +3103,8 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) * Not only, also it occurs for expired timestamps. */ - if((s32)(tp->rcv_tsval - tp->ts_recent) >= 0 || - xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_24DAYS) + if((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 || + xtime.tv_sec >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS) tcp_store_ts_recent(tp); } } @@ -3145,16 +3145,16 @@ static int tcp_disordered_ack(struct tcp_sock *tp, struct sk_buff *skb) ack == tp->snd_una && /* 3. ... and does not update window. */ - !tcp_may_update_window(tp, ack, seq, ntohs(th->window)<<tp->snd_wscale) && + !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && /* 4. ... and sits in replay window. */ - (s32)(tp->ts_recent - tp->rcv_tsval) <= (tp->rto*1024)/HZ); + (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (tp->rto*1024)/HZ); } static inline int tcp_paws_discard(struct tcp_sock *tp, struct sk_buff *skb) { - return ((s32)(tp->ts_recent - tp->rcv_tsval) > TCP_PAWS_WINDOW && - xtime.tv_sec < tp->ts_recent_stamp + TCP_PAWS_24DAYS && + return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW && + xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS && !tcp_disordered_ack(tp, skb)); } @@ -3267,8 +3267,8 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) * Probably, we should reset in this case. For now drop them. */ __skb_queue_purge(&tp->out_of_order_queue); - if (tp->sack_ok) - tcp_sack_reset(tp); + if (tp->rx_opt.sack_ok) + tcp_sack_reset(&tp->rx_opt); sk_stream_mem_reclaim(sk); if (!sock_flag(sk, SOCK_DEAD)) { @@ -3298,22 +3298,22 @@ tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq) static inline void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq) { - if (tp->sack_ok && sysctl_tcp_dsack) { + if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) { if (before(seq, tp->rcv_nxt)) NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT); else NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT); - tp->dsack = 1; + tp->rx_opt.dsack = 1; tp->duplicate_sack[0].start_seq = seq; tp->duplicate_sack[0].end_seq = end_seq; - tp->eff_sacks = min(tp->num_sacks+1, 4-tp->tstamp_ok); + tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 1, 4 - tp->rx_opt.tstamp_ok); } } static inline void tcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq) { - if (!tp->dsack) + if (!tp->rx_opt.dsack) tcp_dsack_set(tp, seq, end_seq); else tcp_sack_extend(tp->duplicate_sack, seq, end_seq); @@ -3328,7 +3328,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); tcp_enter_quickack_mode(tp); - if (tp->sack_ok && sysctl_tcp_dsack) { + if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) { u32 end_seq = TCP_SKB_CB(skb)->end_seq; if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) @@ -3352,16 +3352,16 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) /* See if the recent change to the first SACK eats into * or hits the sequence space of other SACK blocks, if so coalesce. */ - for (this_sack = 1; this_sack < tp->num_sacks; ) { + for (this_sack = 1; this_sack < tp->rx_opt.num_sacks; ) { if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) { int i; /* Zap SWALK, by moving every further SACK up by one slot. * Decrease num_sacks. */ - tp->num_sacks--; - tp->eff_sacks = min(tp->num_sacks+tp->dsack, 4-tp->tstamp_ok); - for(i=this_sack; i < tp->num_sacks; i++) + tp->rx_opt.num_sacks--; + tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok); + for(i=this_sack; i < tp->rx_opt.num_sacks; i++) sp[i] = sp[i+1]; continue; } @@ -3386,7 +3386,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_sack_block *sp = &tp->selective_acks[0]; - int cur_sacks = tp->num_sacks; + int cur_sacks = tp->rx_opt.num_sacks; int this_sack; if (!cur_sacks) @@ -3411,7 +3411,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) */ if (this_sack >= 4) { this_sack--; - tp->num_sacks--; + tp->rx_opt.num_sacks--; sp--; } for(; this_sack > 0; this_sack--, sp--) @@ -3421,8 +3421,8 @@ new_sack: /* Build the new head SACK, and we're done. */ sp->start_seq = seq; sp->end_seq = end_seq; - tp->num_sacks++; - tp->eff_sacks = min(tp->num_sacks + tp->dsack, 4 - tp->tstamp_ok); + tp->rx_opt.num_sacks++; + tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok); } /* RCV.NXT advances, some SACKs should be eaten. */ @@ -3430,13 +3430,13 @@ new_sack: static void tcp_sack_remove(struct tcp_sock *tp) { struct tcp_sack_block *sp = &tp->selective_acks[0]; - int num_sacks = tp->num_sacks; + int num_sacks = tp->rx_opt.num_sacks; int this_sack; /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ if (skb_queue_len(&tp->out_of_order_queue) == 0) { - tp->num_sacks = 0; - tp->eff_sacks = tp->dsack; + tp->rx_opt.num_sacks = 0; + tp->rx_opt.eff_sacks = tp->rx_opt.dsack; return; } @@ -3457,9 +3457,9 @@ static void tcp_sack_remove(struct tcp_sock *tp) this_sack++; sp++; } - if (num_sacks != tp->num_sacks) { - tp->num_sacks = num_sacks; - tp->eff_sacks = min(tp->num_sacks+tp->dsack, 4-tp->tstamp_ok); + if (num_sacks != tp->rx_opt.num_sacks) { + tp->rx_opt.num_sacks = num_sacks; + tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok); } } @@ -3517,10 +3517,10 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) TCP_ECN_accept_cwr(tp, skb); - if (tp->dsack) { - tp->dsack = 0; - tp->eff_sacks = min_t(unsigned int, tp->num_sacks, - 4 - tp->tstamp_ok); + if (tp->rx_opt.dsack) { + tp->rx_opt.dsack = 0; + tp->rx_opt.eff_sacks = min_t(unsigned int, tp->rx_opt.num_sacks, + 4 - tp->rx_opt.tstamp_ok); } /* Queue data for delivery to the user. @@ -3578,7 +3578,7 @@ queue_and_out: tp->ack.pingpong = 0; } - if (tp->num_sacks) + if (tp->rx_opt.num_sacks) tcp_sack_remove(tp); tcp_fast_path_check(sk, tp); @@ -3645,10 +3645,10 @@ drop: if (!skb_peek(&tp->out_of_order_queue)) { /* Initial out of order segment, build 1 SACK. */ - if (tp->sack_ok) { - tp->num_sacks = 1; - tp->dsack = 0; - tp->eff_sacks = 1; + if (tp->rx_opt.sack_ok) { + tp->rx_opt.num_sacks = 1; + tp->rx_opt.dsack = 0; + tp->rx_opt.eff_sacks = 1; tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; tp->selective_acks[0].end_seq = TCP_SKB_CB(skb)->end_seq; @@ -3662,7 +3662,7 @@ drop: if (seq == TCP_SKB_CB(skb1)->end_seq) { __skb_append(skb1, skb); - if (!tp->num_sacks || + if (!tp->rx_opt.num_sacks || tp->selective_acks[0].end_seq != seq) goto add_sack; @@ -3710,7 +3710,7 @@ drop: } add_sack: - if (tp->sack_ok) + if (tp->rx_opt.sack_ok) tcp_sack_new_ofo_skb(sk, seq, end_seq); } } @@ -3892,8 +3892,8 @@ static int tcp_prune_queue(struct sock *sk) * is in a sad state like this, we care only about integrity * of the connection not performance. */ - if (tp->sack_ok) - tcp_sack_reset(tp); + if (tp->rx_opt.sack_ok) + tcp_sack_reset(&tp->rx_opt); sk_stream_mem_reclaim(sk); } @@ -3948,7 +3948,7 @@ static void tcp_new_space(struct sock *sk) !(sk->sk_userlocks & SOCK_SNDBUF_LOCK) && !tcp_memory_pressure && atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { - int sndmem = max_t(u32, tp->mss_clamp, tp->mss_cache_std) + + int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache_std) + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff), demanded = max_t(unsigned int, tp->snd_cwnd, tp->reordering + 1); @@ -4215,7 +4215,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, * We do checksum and copy also but from device to kernel. */ - tp->saw_tstamp = 0; + tp->rx_opt.saw_tstamp = 0; /* pred_flags is 0xS?10 << 16 + snd_wnd * if header_predition is to be made @@ -4244,14 +4244,14 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) goto slow_path; - tp->saw_tstamp = 1; + tp->rx_opt.saw_tstamp = 1; ++ptr; - tp->rcv_tsval = ntohl(*ptr); + tp->rx_opt.rcv_tsval = ntohl(*ptr); ++ptr; - tp->rcv_tsecr = ntohl(*ptr); + tp->rx_opt.rcv_tsecr = ntohl(*ptr); /* If PAWS failed, check it more carefully in slow path */ - if ((s32)(tp->rcv_tsval - tp->ts_recent) < 0) + if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) goto slow_path; /* DO NOT update ts_recent here, if checksum fails @@ -4377,7 +4377,7 @@ slow_path: /* * RFC1323: H1. Apply PAWS check first. */ - if (tcp_fast_parse_options(skb, th, tp) && tp->saw_tstamp && + if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && tcp_paws_discard(tp, skb)) { if (!th->rst) { NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); @@ -4449,9 +4449,9 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, struct tcphdr *th, unsigned len) { struct tcp_sock *tp = tcp_sk(sk); - int saved_clamp = tp->mss_clamp; + int saved_clamp = tp->rx_opt.mss_clamp; - tcp_parse_options(skb, tp, 0); + tcp_parse_options(skb, &tp->rx_opt, 0); if (th->ack) { /* rfc793: @@ -4468,8 +4468,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt) goto reset_and_undo; - if (tp->saw_tstamp && tp->rcv_tsecr && - !between(tp->rcv_tsecr, tp->retrans_stamp, + if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && + !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, tcp_time_stamp)) { NET_INC_STATS_BH(LINUX_MIB_PAWSACTIVEREJECTED); goto reset_and_undo; @@ -4524,13 +4524,13 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, tp->snd_wnd = ntohs(th->window); tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(skb)->seq); - if (!tp->wscale_ok) { - tp->snd_wscale = tp->rcv_wscale = 0; + if (!tp->rx_opt.wscale_ok) { + tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; tp->window_clamp = min(tp->window_clamp, 65535U); } - if (tp->saw_tstamp) { - tp->tstamp_ok = 1; + if (tp->rx_opt.saw_tstamp) { + tp->rx_opt.tstamp_ok = 1; tp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; @@ -4539,8 +4539,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, tp->tcp_header_len = sizeof(struct tcphdr); } - if (tp->sack_ok && sysctl_tcp_fack) - tp->sack_ok |= 2; + if (tp->rx_opt.sack_ok && sysctl_tcp_fack) + tp->rx_opt.sack_ok |= 2; tcp_sync_mss(sk, tp->pmtu_cookie); tcp_initialize_rcv_mss(sk); @@ -4567,7 +4567,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, if (sock_flag(sk, SOCK_KEEPOPEN)) tcp_reset_keepalive_timer(sk, keepalive_time_when(tp)); - if (!tp->snd_wscale) + if (!tp->rx_opt.snd_wscale) __tcp_fast_path_on(tp, tp->snd_wnd); else tp->pred_flags = 0; @@ -4614,7 +4614,7 @@ discard: } /* PAWS check. */ - if (tp->ts_recent_stamp && tp->saw_tstamp && tcp_paws_check(tp, 0)) + if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && tcp_paws_check(&tp->rx_opt, 0)) goto discard_and_undo; if (th->syn) { @@ -4624,8 +4624,8 @@ discard: */ tcp_set_state(sk, TCP_SYN_RECV); - if (tp->saw_tstamp) { - tp->tstamp_ok = 1; + if (tp->rx_opt.saw_tstamp) { + tp->rx_opt.tstamp_ok = 1; tcp_store_ts_recent(tp); tp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; @@ -4672,13 +4672,13 @@ discard: */ discard_and_undo: - tcp_clear_options(tp); - tp->mss_clamp = saved_clamp; + tcp_clear_options(&tp->rx_opt); + tp->rx_opt.mss_clamp = saved_clamp; goto discard; reset_and_undo: - tcp_clear_options(tp); - tp->mss_clamp = saved_clamp; + tcp_clear_options(&tp->rx_opt); + tp->rx_opt.mss_clamp = saved_clamp; return 1; } @@ -4696,7 +4696,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, struct tcp_sock *tp = tcp_sk(sk); int queued = 0; - tp->saw_tstamp = 0; + tp->rx_opt.saw_tstamp = 0; switch (sk->sk_state) { case TCP_CLOSE: @@ -4751,7 +4751,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, return 0; } - if (tcp_fast_parse_options(skb, th, tp) && tp->saw_tstamp && + if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && tcp_paws_discard(tp, skb)) { if (!th->rst) { NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); @@ -4811,7 +4811,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, tp->snd_una = TCP_SKB_CB(skb)->ack_seq; tp->snd_wnd = ntohs(th->window) << - tp->snd_wscale; + tp->rx_opt.snd_wscale; tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(skb)->seq); @@ -4819,11 +4819,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, * and does not calculate rtt. * Fix it at least with timestamps. */ - if (tp->saw_tstamp && tp->rcv_tsecr && + if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && !tp->srtt) tcp_ack_saw_tstamp(tp, 0); - if (tp->tstamp_ok) + if (tp->rx_opt.tstamp_ok) tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; /* Make sure socket is routed, for diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index e0a8ff2d7d69..37d4ef7b58fa 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -591,8 +591,8 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport, if ((tp->write_seq = tw->tw_snd_nxt + 65535 + 2) == 0) tp->write_seq = 1; - tp->ts_recent = tw->tw_ts_recent; - tp->ts_recent_stamp = tw->tw_ts_recent_stamp; + tp->rx_opt.ts_recent = tw->tw_ts_recent; + tp->rx_opt.ts_recent_stamp = tw->tw_ts_recent_stamp; sock_hold(sk2); goto unique; } else @@ -783,25 +783,25 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->saddr = rt->rt_src; inet->rcv_saddr = inet->saddr; - if (tp->ts_recent_stamp && inet->daddr != daddr) { + if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) { /* Reset inherited state */ - tp->ts_recent = 0; - tp->ts_recent_stamp = 0; - tp->write_seq = 0; + tp->rx_opt.ts_recent = 0; + tp->rx_opt.ts_recent_stamp = 0; + tp->write_seq = 0; } if (sysctl_tcp_tw_recycle && - !tp->ts_recent_stamp && rt->rt_dst == daddr) { + !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) { struct inet_peer *peer = rt_get_peer(rt); /* VJ's idea. We save last timestamp seen from * the destination in peer table, when entering state TIME-WAIT - * and initialize ts_recent from it, when trying new connection. + * and initialize rx_opt.ts_recent from it, when trying new connection. */ if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) { - tp->ts_recent_stamp = peer->tcp_ts_stamp; - tp->ts_recent = peer->tcp_ts; + tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; + tp->rx_opt.ts_recent = peer->tcp_ts; } } @@ -812,7 +812,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (inet->opt) tp->ext_header_len = inet->opt->optlen; - tp->mss_clamp = 536; + tp->rx_opt.mss_clamp = 536; /* Socket identity is still unknown (sport may be zero). * However we set state to SYN-SENT and not releasing socket @@ -1393,7 +1393,7 @@ struct or_calltable or_ipv4 = { int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) { - struct tcp_sock tp; + struct tcp_options_received tmp_opt; struct open_request *req; __u32 saddr = skb->nh.iph->saddr; __u32 daddr = skb->nh.iph->daddr; @@ -1435,29 +1435,29 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) if (!req) goto drop; - tcp_clear_options(&tp); - tp.mss_clamp = 536; - tp.user_mss = tcp_sk(sk)->user_mss; + tcp_clear_options(&tmp_opt); + tmp_opt.mss_clamp = 536; + tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss; - tcp_parse_options(skb, &tp, 0); + tcp_parse_options(skb, &tmp_opt, 0); if (want_cookie) { - tcp_clear_options(&tp); - tp.saw_tstamp = 0; + tcp_clear_options(&tmp_opt); + tmp_opt.saw_tstamp = 0; } - if (tp.saw_tstamp && !tp.rcv_tsval) { + if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) { /* Some OSes (unknown ones, but I see them on web server, which * contains information interesting only for windows' * users) do not send their stamp in SYN. It is easy case. * We simply do not advertise TS support. */ - tp.saw_tstamp = 0; - tp.tstamp_ok = 0; + tmp_opt.saw_tstamp = 0; + tmp_opt.tstamp_ok = 0; } - tp.tstamp_ok = tp.saw_tstamp; + tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; - tcp_openreq_init(req, &tp, skb); + tcp_openreq_init(req, &tmp_opt, skb); req->af.v4_req.loc_addr = daddr; req->af.v4_req.rmt_addr = saddr; @@ -1483,7 +1483,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) * timewait bucket, so that all the necessary checks * are made in the function processing timewait state. */ - if (tp.saw_tstamp && + if (tmp_opt.saw_tstamp && sysctl_tcp_tw_recycle && (dst = tcp_v4_route_req(sk, req)) != NULL && (peer = rt_get_peer((struct rtable *)dst)) != NULL && @@ -1987,11 +1987,11 @@ int tcp_v4_remember_stamp(struct sock *sk) } if (peer) { - if ((s32)(peer->tcp_ts - tp->ts_recent) <= 0 || + if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 || (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec && - peer->tcp_ts_stamp <= tp->ts_recent_stamp)) { - peer->tcp_ts_stamp = tp->ts_recent_stamp; - peer->tcp_ts = tp->ts_recent; + peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) { + peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp; + peer->tcp_ts = tp->rx_opt.ts_recent; } if (release_it) inet_putpeer(peer); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 2876f505674d..1e2265c086ba 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -125,17 +125,17 @@ enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, struct tcphdr *th, unsigned len) { - struct tcp_sock tp; + struct tcp_options_received tmp_opt; int paws_reject = 0; - tp.saw_tstamp = 0; + tmp_opt.saw_tstamp = 0; if (th->doff > (sizeof(struct tcphdr) >> 2) && tw->tw_ts_recent_stamp) { - tcp_parse_options(skb, &tp, 0); + tcp_parse_options(skb, &tmp_opt, 0); - if (tp.saw_tstamp) { - tp.ts_recent = tw->tw_ts_recent; - tp.ts_recent_stamp = tw->tw_ts_recent_stamp; - paws_reject = tcp_paws_check(&tp, th->rst); + if (tmp_opt.saw_tstamp) { + tmp_opt.ts_recent = tw->tw_ts_recent; + tmp_opt.ts_recent_stamp = tw->tw_ts_recent_stamp; + paws_reject = tcp_paws_check(&tmp_opt, th->rst); } } @@ -176,9 +176,9 @@ kill_with_rst: /* FIN arrived, enter true time-wait state. */ tw->tw_substate = TCP_TIME_WAIT; tw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; - if (tp.saw_tstamp) { + if (tmp_opt.saw_tstamp) { tw->tw_ts_recent_stamp = xtime.tv_sec; - tw->tw_ts_recent = tp.rcv_tsval; + tw->tw_ts_recent = tmp_opt.rcv_tsval; } /* I am shamed, but failed to make it more elegant. @@ -231,8 +231,8 @@ kill: } tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN); - if (tp.saw_tstamp) { - tw->tw_ts_recent = tp.rcv_tsval; + if (tmp_opt.saw_tstamp) { + tw->tw_ts_recent = tmp_opt.rcv_tsval; tw->tw_ts_recent_stamp = xtime.tv_sec; } @@ -259,7 +259,7 @@ kill: if (th->syn && !th->rst && !th->ack && !paws_reject && (after(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt) || - (tp.saw_tstamp && (s32)(tw->tw_ts_recent - tp.rcv_tsval) < 0))) { + (tmp_opt.saw_tstamp && (s32)(tw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { u32 isn = tw->tw_snd_nxt + 65535 + 2; if (isn == 0) isn++; @@ -332,7 +332,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) struct tcp_sock *tp = tcp_sk(sk); int recycle_ok = 0; - if (sysctl_tcp_tw_recycle && tp->ts_recent_stamp) + if (sysctl_tcp_tw_recycle && tp->rx_opt.ts_recent_stamp) recycle_ok = tp->af_specific->remember_stamp(sk); if (tcp_tw_count < sysctl_tcp_max_tw_buckets) @@ -353,15 +353,15 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) tw->tw_dport = inet->dport; tw->tw_family = sk->sk_family; tw->tw_reuse = sk->sk_reuse; - tw->tw_rcv_wscale = tp->rcv_wscale; + tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; atomic_set(&tw->tw_refcnt, 1); tw->tw_hashent = sk->sk_hashent; tw->tw_rcv_nxt = tp->rcv_nxt; tw->tw_snd_nxt = tp->snd_nxt; tw->tw_rcv_wnd = tcp_receive_window(tp); - tw->tw_ts_recent = tp->ts_recent; - tw->tw_ts_recent_stamp = tp->ts_recent_stamp; + tw->tw_ts_recent = tp->rx_opt.ts_recent; + tw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; tw_dead_node_init(tw); #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) @@ -780,13 +780,13 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, newtp->pushed_seq = newtp->write_seq; newtp->copied_seq = req->rcv_isn + 1; - newtp->saw_tstamp = 0; + newtp->rx_opt.saw_tstamp = 0; - newtp->dsack = 0; - newtp->eff_sacks = 0; + newtp->rx_opt.dsack = 0; + newtp->rx_opt.eff_sacks = 0; newtp->probes_out = 0; - newtp->num_sacks = 0; + newtp->rx_opt.num_sacks = 0; newtp->urg_data = 0; newtp->listen_opt = NULL; newtp->accept_queue = newtp->accept_queue_tail = NULL; @@ -809,36 +809,36 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, newsk->sk_sleep = NULL; newsk->sk_owner = NULL; - newtp->tstamp_ok = req->tstamp_ok; - if((newtp->sack_ok = req->sack_ok) != 0) { + newtp->rx_opt.tstamp_ok = req->tstamp_ok; + if((newtp->rx_opt.sack_ok = req->sack_ok) != 0) { if (sysctl_tcp_fack) - newtp->sack_ok |= 2; + newtp->rx_opt.sack_ok |= 2; } newtp->window_clamp = req->window_clamp; newtp->rcv_ssthresh = req->rcv_wnd; newtp->rcv_wnd = req->rcv_wnd; - newtp->wscale_ok = req->wscale_ok; - if (newtp->wscale_ok) { - newtp->snd_wscale = req->snd_wscale; - newtp->rcv_wscale = req->rcv_wscale; + newtp->rx_opt.wscale_ok = req->wscale_ok; + if (newtp->rx_opt.wscale_ok) { + newtp->rx_opt.snd_wscale = req->snd_wscale; + newtp->rx_opt.rcv_wscale = req->rcv_wscale; } else { - newtp->snd_wscale = newtp->rcv_wscale = 0; + newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; newtp->window_clamp = min(newtp->window_clamp, 65535U); } - newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->snd_wscale; + newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale; newtp->max_window = newtp->snd_wnd; - if (newtp->tstamp_ok) { - newtp->ts_recent = req->ts_recent; - newtp->ts_recent_stamp = xtime.tv_sec; + if (newtp->rx_opt.tstamp_ok) { + newtp->rx_opt.ts_recent = req->ts_recent; + newtp->rx_opt.ts_recent_stamp = xtime.tv_sec; newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; } else { - newtp->ts_recent_stamp = 0; + newtp->rx_opt.ts_recent_stamp = 0; newtp->tcp_header_len = sizeof(struct tcphdr); } if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len; - newtp->mss_clamp = req->mss; + newtp->rx_opt.mss_clamp = req->mss; TCP_ECN_openreq_child(newtp, req); if (newtp->ecn_flags&TCP_ECN_OK) newsk->sk_no_largesend = 1; @@ -863,21 +863,21 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, struct tcp_sock *tp = tcp_sk(sk); u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); int paws_reject = 0; - struct tcp_sock ttp; + struct tcp_options_received tmp_opt; struct sock *child; - ttp.saw_tstamp = 0; + tmp_opt.saw_tstamp = 0; if (th->doff > (sizeof(struct tcphdr)>>2)) { - tcp_parse_options(skb, &ttp, 0); + tcp_parse_options(skb, &tmp_opt, 0); - if (ttp.saw_tstamp) { - ttp.ts_recent = req->ts_recent; + if (tmp_opt.saw_tstamp) { + tmp_opt.ts_recent = req->ts_recent; /* We do not store true stamp, but it is not required, * it can be estimated (approximately) * from another data. */ - ttp.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans); - paws_reject = tcp_paws_check(&ttp, th->rst); + tmp_opt.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans); + paws_reject = tcp_paws_check(&tmp_opt, th->rst); } } @@ -982,63 +982,63 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, /* In sequence, PAWS is OK. */ - if (ttp.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, req->rcv_isn+1)) - req->ts_recent = ttp.rcv_tsval; + if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, req->rcv_isn+1)) + req->ts_recent = tmp_opt.rcv_tsval; - if (TCP_SKB_CB(skb)->seq == req->rcv_isn) { - /* Truncate SYN, it is out of window starting - at req->rcv_isn+1. */ - flg &= ~TCP_FLAG_SYN; - } + if (TCP_SKB_CB(skb)->seq == req->rcv_isn) { + /* Truncate SYN, it is out of window starting + at req->rcv_isn+1. */ + flg &= ~TCP_FLAG_SYN; + } - /* RFC793: "second check the RST bit" and - * "fourth, check the SYN bit" - */ - if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) - goto embryonic_reset; + /* RFC793: "second check the RST bit" and + * "fourth, check the SYN bit" + */ + if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) + goto embryonic_reset; - /* ACK sequence verified above, just make sure ACK is - * set. If ACK not set, just silently drop the packet. - */ - if (!(flg & TCP_FLAG_ACK)) - return NULL; + /* ACK sequence verified above, just make sure ACK is + * set. If ACK not set, just silently drop the packet. + */ + if (!(flg & TCP_FLAG_ACK)) + return NULL; - /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ - if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == req->rcv_isn+1) { - req->acked = 1; - return NULL; - } + /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ + if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == req->rcv_isn+1) { + req->acked = 1; + return NULL; + } - /* OK, ACK is valid, create big socket and - * feed this segment to it. It will repeat all - * the tests. THIS SEGMENT MUST MOVE SOCKET TO - * ESTABLISHED STATE. If it will be dropped after - * socket is created, wait for troubles. - */ - child = tp->af_specific->syn_recv_sock(sk, skb, req, NULL); - if (child == NULL) - goto listen_overflow; + /* OK, ACK is valid, create big socket and + * feed this segment to it. It will repeat all + * the tests. THIS SEGMENT MUST MOVE SOCKET TO + * ESTABLISHED STATE. If it will be dropped after + * socket is created, wait for troubles. + */ + child = tp->af_specific->syn_recv_sock(sk, skb, req, NULL); + if (child == NULL) + goto listen_overflow; - sk_set_owner(child, sk->sk_owner); - tcp_synq_unlink(tp, req, prev); - tcp_synq_removed(sk, req); + sk_set_owner(child, sk->sk_owner); + tcp_synq_unlink(tp, req, prev); + tcp_synq_removed(sk, req); - tcp_acceptq_queue(sk, req, child); - return child; + tcp_acceptq_queue(sk, req, child); + return child; -listen_overflow: - if (!sysctl_tcp_abort_on_overflow) { - req->acked = 1; - return NULL; - } + listen_overflow: + if (!sysctl_tcp_abort_on_overflow) { + req->acked = 1; + return NULL; + } -embryonic_reset: - NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); - if (!(flg & TCP_FLAG_RST)) - req->class->send_reset(skb); + embryonic_reset: + NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); + if (!(flg & TCP_FLAG_RST)) + req->class->send_reset(skb); - tcp_synq_drop(sk, req, prev); - return NULL; + tcp_synq_drop(sk, req, prev); + return NULL; } /* diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 56938a6f76ac..a29ac2763600 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -236,13 +236,13 @@ static __inline__ u16 tcp_select_window(struct sock *sk) /* Make sure we do not exceed the maximum possible * scaled window. */ - if (!tp->rcv_wscale) + if (!tp->rx_opt.rcv_wscale) new_win = min(new_win, MAX_TCP_WINDOW); else - new_win = min(new_win, (65535U << tp->rcv_wscale)); + new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); /* RFC1323 scaling applied */ - new_win >>= tp->rcv_wscale; + new_win >>= tp->rx_opt.rcv_wscale; /* If we advertise zero window, disable fast path. */ if (new_win == 0) @@ -296,12 +296,12 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb) if(!(sysctl_flags & SYSCTL_FLAG_TSTAMPS)) tcp_header_size += TCPOLEN_SACKPERM_ALIGNED; } - } else if (tp->eff_sacks) { + } else if (tp->rx_opt.eff_sacks) { /* A SACK is 2 pad bytes, a 2 byte header, plus * 2 32-bit sequence numbers for each SACK block. */ tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED + - (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK)); + (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); } /* @@ -349,9 +349,9 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb) (sysctl_flags & SYSCTL_FLAG_TSTAMPS), (sysctl_flags & SYSCTL_FLAG_SACK), (sysctl_flags & SYSCTL_FLAG_WSCALE), - tp->rcv_wscale, + tp->rx_opt.rcv_wscale, tcb->when, - tp->ts_recent); + tp->rx_opt.ts_recent); } else { tcp_build_and_update_options((__u32 *)(th + 1), tp, tcb->when); @@ -607,10 +607,10 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) /* This function synchronize snd mss to current pmtu/exthdr set. - tp->user_mss is mss set by user by TCP_MAXSEG. It does NOT counts + tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts for TCP options, but includes only bare TCP header. - tp->mss_clamp is mss negotiated at connection setup. + tp->rx_opt.mss_clamp is mss negotiated at connection setup. It is minumum of user_mss and mss received with SYN. It also does not include TCP options. @@ -619,7 +619,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) tp->mss_cache is current effective sending mss, including all tcp options except for SACKs. It is evaluated, taking into account current pmtu, but never exceeds - tp->mss_clamp. + tp->rx_opt.mss_clamp. NOTE1. rfc1122 clearly states that advertised MSS DOES NOT include either tcp or ip options. @@ -643,8 +643,8 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) mss_now = pmtu - tp->af_specific->net_header_len - sizeof(struct tcphdr); /* Clamp it (mss_clamp does not include tcp options) */ - if (mss_now > tp->mss_clamp) - mss_now = tp->mss_clamp; + if (mss_now > tp->rx_opt.mss_clamp) + mss_now = tp->rx_opt.mss_clamp; /* Now subtract optional transport overhead */ mss_now -= tp->ext_header_len + tp->ext2_header_len; @@ -723,9 +723,9 @@ unsigned int tcp_current_mss(struct sock *sk, int large) mss_now = tp->mss_cache; } - if (tp->eff_sacks) + if (tp->rx_opt.eff_sacks) mss_now -= (TCPOLEN_SACK_BASE_ALIGNED + - (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK)); + (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); return mss_now; } @@ -875,16 +875,16 @@ u32 __tcp_select_window(struct sock *sk) * scaled window will not line up with the MSS boundary anyway. */ window = tp->rcv_wnd; - if (tp->rcv_wscale) { + if (tp->rx_opt.rcv_wscale) { window = free_space; /* Advertise enough space so that it won't get scaled away. * Import case: prevent zero window announcement if * 1<<rcv_wscale > mss. */ - if (((window >> tp->rcv_wscale) << tp->rcv_wscale) != window) - window = (((window >> tp->rcv_wscale) + 1) - << tp->rcv_wscale); + if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) + window = (((window >> tp->rx_opt.rcv_wscale) + 1) + << tp->rx_opt.rcv_wscale); } else { /* Get the largest window that is a nice multiple of mss. * Window clamp already applied above. @@ -962,7 +962,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m tp->left_out -= tcp_skb_pcount(next_skb); } /* Reno case is special. Sigh... */ - if (!tp->sack_ok && tp->sacked_out) { + if (!tp->rx_opt.sack_ok && tp->sacked_out) { tcp_dec_pcount_approx(&tp->sacked_out, next_skb); tp->left_out -= tcp_skb_pcount(next_skb); } @@ -1200,7 +1200,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) return; /* No forward retransmissions in Reno are possible. */ - if (!tp->sack_ok) + if (!tp->rx_opt.sack_ok) return; /* Yeah, we have to make difficult choice between forward transmission @@ -1439,8 +1439,8 @@ static inline void tcp_connect_init(struct sock *sk) (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); /* If user gave his TCP_MAXSEG, record it to clamp */ - if (tp->user_mss) - tp->mss_clamp = tp->user_mss; + if (tp->rx_opt.user_mss) + tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; tp->max_window = 0; tcp_sync_mss(sk, dst_pmtu(dst)); @@ -1451,11 +1451,11 @@ static inline void tcp_connect_init(struct sock *sk) tcp_ca_init(tp); tcp_select_initial_window(tcp_full_space(sk), - tp->advmss - (tp->ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), + tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), &tp->rcv_wnd, &tp->window_clamp, sysctl_tcp_window_scaling, - &tp->rcv_wscale); + &tp->rx_opt.rcv_wscale); tp->rcv_ssthresh = tp->rcv_wnd; diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index c3751508ed24..84844eeeb654 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -353,7 +353,7 @@ static void tcp_retransmit_timer(struct sock *sk) if (tp->retransmits == 0) { if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) { - if (tp->sack_ok) { + if (tp->rx_opt.sack_ok) { if (tp->ca_state == TCP_CA_Recovery) NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL); else diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 3bc7ca44a60b..516775894665 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -473,8 +473,8 @@ static int tcp_v6_check_established(struct sock *sk) tp->write_seq = tw->tw_snd_nxt + 65535 + 2; if (!tp->write_seq) tp->write_seq = 1; - tp->ts_recent = tw->tw_ts_recent; - tp->ts_recent_stamp = tw->tw_ts_recent_stamp; + tp->rx_opt.ts_recent = tw->tw_ts_recent; + tp->rx_opt.ts_recent_stamp = tw->tw_ts_recent_stamp; sock_hold(sk2); goto unique; } else @@ -609,10 +609,10 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, return -EINVAL; } - if (tp->ts_recent_stamp && + if (tp->rx_opt.ts_recent_stamp && !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) { - tp->ts_recent = 0; - tp->ts_recent_stamp = 0; + tp->rx_opt.ts_recent = 0; + tp->rx_opt.ts_recent_stamp = 0; tp->write_seq = 0; } @@ -703,7 +703,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, tp->ext_header_len = np->opt->opt_flen + np->opt->opt_nflen; tp->ext2_header_len = dst->header_len; - tp->mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); + tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); inet->dport = usin->sin6_port; @@ -1202,7 +1202,8 @@ static void tcp_v6_synq_add(struct sock *sk, struct open_request *req) static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) { struct ipv6_pinfo *np = inet6_sk(sk); - struct tcp_sock tmptp, *tp = tcp_sk(sk); + struct tcp_options_received tmp_opt; + struct tcp_sock *tp = tcp_sk(sk); struct open_request *req = NULL; __u32 isn = TCP_SKB_CB(skb)->when; @@ -1228,14 +1229,14 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) if (req == NULL) goto drop; - tcp_clear_options(&tmptp); - tmptp.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); - tmptp.user_mss = tp->user_mss; + tcp_clear_options(&tmp_opt); + tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); + tmp_opt.user_mss = tp->rx_opt.user_mss; - tcp_parse_options(skb, &tmptp, 0); + tcp_parse_options(skb, &tmp_opt, 0); - tmptp.tstamp_ok = tmptp.saw_tstamp; - tcp_openreq_init(req, &tmptp, skb); + tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; + tcp_openreq_init(req, &tmp_opt, skb); req->class = &or_ipv6; ipv6_addr_copy(&req->af.v6_req.rmt_addr, &skb->nh.ipv6h->saddr); |
