summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@osdl.org>2004-09-26 22:00:50 -0700
committerDavid S. Miller <davem@kernel.bkbits.net>2004-09-26 22:00:50 -0700
commitc390466b578afe29a1a0191c014e64ad2116d545 (patch)
tree0373150a43b0025437ec45c5b6aaacac3931c28a
parentc8cbfea286b893875029cb15100c4fe9102701d8 (diff)
[TCP]: Westwood cleanup
Westwood code cleanup; * use const. * avoid needless paren's and returns * inline acked_count (called once) Signed-off-by: Stephen Hemminger <shemminger@osdl.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv4/tcp_input.c41
1 files changed, 19 insertions, 22 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8a2968840be4..a7c1f4a5277f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2620,18 +2620,16 @@ static void westwood_filter(struct sock *sk, __u32 delta)
* WESTWOOD_RTT_MIN minimum bound since we could be on a LAN!
*/
-static inline __u32 westwood_update_rttmin(struct sock *sk)
+static inline __u32 westwood_update_rttmin(const struct sock *sk)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ const struct tcp_opt *tp = tcp_sk(sk);
__u32 rttmin = tp->westwood.rtt_min;
- if (tp->westwood.rtt == 0)
- return(rttmin);
-
- if (tp->westwood.rtt < tp->westwood.rtt_min || !rttmin)
+ if (tp->westwood.rtt != 0 &&
+ (tp->westwood.rtt < tp->westwood.rtt_min || !rttmin))
rttmin = tp->westwood.rtt;
- return(rttmin);
+ return rttmin;
}
/*
@@ -2639,11 +2637,11 @@ static inline __u32 westwood_update_rttmin(struct sock *sk)
* Evaluate increases for dk.
*/
-static inline __u32 westwood_acked(struct sock *sk)
+static inline __u32 westwood_acked(const struct sock *sk)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ const struct tcp_opt *tp = tcp_sk(sk);
- return ((tp->snd_una) - (tp->westwood.snd_una));
+ return tp->snd_una - tp->westwood.snd_una;
}
/*
@@ -2655,9 +2653,9 @@ static inline __u32 westwood_acked(struct sock *sk)
* window, 1 if the sample has to be considered in the next window.
*/
-static int westwood_new_window(struct sock *sk)
+static int westwood_new_window(const struct sock *sk)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ const struct tcp_opt *tp = tcp_sk(sk);
__u32 left_bound;
__u32 rtt;
int ret = 0;
@@ -2691,14 +2689,13 @@ static void __westwood_update_window(struct sock *sk, __u32 now)
struct tcp_opt *tp = tcp_sk(sk);
__u32 delta = now - tp->westwood.rtt_win_sx;
- if (!delta)
- return;
+ if (delta) {
+ if (tp->westwood.rtt)
+ westwood_filter(sk, delta);
- if (tp->westwood.rtt)
- westwood_filter(sk, delta);
-
- tp->westwood.bk = 0;
- tp->westwood.rtt_win_sx = tcp_time_stamp;
+ tp->westwood.bk = 0;
+ tp->westwood.rtt_win_sx = tcp_time_stamp;
+ }
}
@@ -2742,7 +2739,7 @@ static void westwood_dupack_update(struct sock *sk)
static inline int westwood_may_change_cumul(struct tcp_opt *tp)
{
- return ((tp->westwood.cumul_ack) > tp->mss_cache_std);
+ return (tp->westwood.cumul_ack > tp->mss_cache_std);
}
static inline void westwood_partial_update(struct tcp_opt *tp)
@@ -2763,7 +2760,7 @@ static inline void westwood_complete_update(struct tcp_opt *tp)
* delayed or partial acks.
*/
-static __u32 westwood_acked_count(struct sock *sk)
+static inline __u32 westwood_acked_count(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
@@ -2777,7 +2774,7 @@ static __u32 westwood_acked_count(struct sock *sk)
if (westwood_may_change_cumul(tp)) {
/* Partial or delayed ack */
- if ((tp->westwood.accounted) >= (tp->westwood.cumul_ack))
+ if (tp->westwood.accounted >= tp->westwood.cumul_ack)
westwood_partial_update(tp);
else
westwood_complete_update(tp);