summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@nuts.davemloft.net>2004-09-06 21:03:16 -0700
committerPatrick McHardy <kaber@trash.net>2004-09-06 21:03:16 -0700
commit5a6bdc92894c920dcc7fcf7010d0eb05de2e3d21 (patch)
tree0cae8445add8c3bee110ea3b61b1ec2377a44141
parent95d267365e82205bada1b50fe699fb2284aa090e (diff)
[TCP]: Make sure SKB tso factor is setup early enough.
It needs to be set so that congestion window calculations have a valid value to work with. This means that doing it at write queue running time is too late. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/tcp.h7
-rw-r--r--net/ipv4/tcp_output.c19
2 files changed, 17 insertions, 9 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 1de15c7a560a..1a8a317f2bd5 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1469,6 +1469,8 @@ tcp_nagle_check(struct tcp_opt *tp, struct sk_buff *skb, unsigned mss_now, int n
tcp_minshall_check(tp))));
}
+extern void tcp_set_skb_tso_factor(struct sk_buff *, unsigned int, unsigned int);
+
/* This checks if the data bearing packet SKB (usually sk->sk_send_head)
* should be put on the wire right now.
*/
@@ -1477,6 +1479,11 @@ static __inline__ int tcp_snd_test(struct tcp_opt *tp, struct sk_buff *skb,
{
int pkts = TCP_SKB_CB(skb)->tso_factor;
+ if (!pkts) {
+ tcp_set_skb_tso_factor(skb, cur_mss, tp->mss_cache_std);
+ pkts = TCP_SKB_CB(skb)->tso_factor;
+ }
+
/* RFC 1122 - section 4.2.3.4
*
* We must queue if
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 336c7121b6b6..32174549304e 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -422,8 +422,8 @@ void tcp_push_one(struct sock *sk, unsigned cur_mss)
}
}
-static void tcp_set_skb_tso_factor(struct sk_buff *skb, unsigned int mss,
- unsigned int mss_std)
+void tcp_set_skb_tso_factor(struct sk_buff *skb, unsigned int mss,
+ unsigned int mss_std)
{
if (skb->len <= mss_std) {
/* Avoid the costly divide in the normal
@@ -652,7 +652,7 @@ int tcp_sync_mss(struct sock *sk, u32 pmtu)
int tcp_write_xmit(struct sock *sk, int nonagle)
{
struct tcp_opt *tp = tcp_sk(sk);
- unsigned int mss_now, mss_std;
+ unsigned int mss_now;
/* If we are closed, the bytes will have to remain here.
* In time closedown will finish, we empty the write queue and all
@@ -668,7 +668,6 @@ int tcp_write_xmit(struct sock *sk, int nonagle)
* IP options mid-stream. Silly to do, but cover it.
*/
mss_now = tcp_current_mss(sk, 1);
- mss_std = tp->mss_cache_std;
while ((skb = sk->sk_send_head) &&
tcp_snd_test(tp, skb, mss_now,
@@ -677,8 +676,7 @@ int tcp_write_xmit(struct sock *sk, int nonagle)
if (skb->len > mss_now) {
if (tcp_fragment(sk, skb, mss_now))
break;
- } else
- tcp_set_skb_tso_factor(skb, mss_now, mss_std);
+ }
TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC)))
@@ -1059,6 +1057,8 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
__u8 sacked = TCP_SKB_CB(skb)->sacked;
int pkts = TCP_SKB_CB(skb)->tso_factor;
+ BUG_ON(!pkts);
+
if ((tcp_packets_in_flight(tp) + (pkts-1)) >=
tp->snd_cwnd)
return;
@@ -1109,6 +1109,8 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
sk_stream_for_retrans_queue(skb, sk) {
int pkts = TCP_SKB_CB(skb)->tso_factor;
+ BUG_ON(!pkts);
+
packet_cnt += pkts;
if (packet_cnt > tcp_get_pcount(&tp->fackets_out))
break;
@@ -1536,7 +1538,6 @@ int tcp_write_wakeup(struct sock *sk)
before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) {
int err;
unsigned int mss = tcp_current_mss(sk, 0);
- unsigned int mss_std = tp->mss_cache_std;
unsigned int seg_size = tp->snd_una+tp->snd_wnd-TCP_SKB_CB(skb)->seq;
if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
@@ -1559,8 +1560,8 @@ int tcp_write_wakeup(struct sock *sk)
sk->sk_route_caps &= ~NETIF_F_TSO;
tp->mss_cache = tp->mss_cache_std;
}
- } else
- tcp_set_skb_tso_factor(skb, mss, mss_std);
+ } else if (!TCP_SKB_CB(skb)->tso_factor)
+ tcp_set_skb_tso_factor(skb, mss, tp->mss_cache_std);
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
TCP_SKB_CB(skb)->when = tcp_time_stamp;