diff options
Diffstat (limited to 'net/ipv4/tcp_input.c')
| -rw-r--r-- | net/ipv4/tcp_input.c | 84 |
1 files changed, 42 insertions, 42 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 74563cde650a..bdaf248cf950 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -168,7 +168,7 @@ static void tcp_incr_quickack(struct tcp_opt *tp) if (quickacks==0) quickacks=2; if (quickacks > tp->ack.quick) - tp->ack.quick = min(unsigned int, quickacks, TCP_MAX_QUICKACKS); + tp->ack.quick = min_t(unsigned int, quickacks, TCP_MAX_QUICKACKS); } void tcp_enter_quickack_mode(struct tcp_opt *tp) @@ -198,7 +198,7 @@ static void tcp_fixup_sndbuf(struct sock *sk) int sndmem = tp->mss_clamp+MAX_TCP_HEADER+16+sizeof(struct sk_buff); if (sk->sndbuf < 3*sndmem) - sk->sndbuf = min(int, 3*sndmem, sysctl_tcp_wmem[2]); + sk->sndbuf = min_t(int, 3*sndmem, sysctl_tcp_wmem[2]); } /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) @@ -262,7 +262,7 @@ tcp_grow_window(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb) incr = __tcp_grow_window(sk, tp, skb); if (incr) { - tp->rcv_ssthresh = min(u32, tp->rcv_ssthresh + incr, tp->window_clamp); + tp->rcv_ssthresh = min_t(u32, tp->rcv_ssthresh + incr, tp->window_clamp); tp->ack.quick |= 1; } } @@ -282,7 +282,7 @@ static void tcp_fixup_rcvbuf(struct sock *sk) while (tcp_win_from_space(rcvmem) < tp->advmss) rcvmem += 128; if (sk->rcvbuf < 4*rcvmem) - sk->rcvbuf = min(int, 4*rcvmem, sysctl_tcp_rmem[2]); + sk->rcvbuf = min_t(int, 4*rcvmem, sysctl_tcp_rmem[2]); } /* 4. Try to fixup all. It is made iimediately after connection enters @@ -304,16 +304,16 @@ static void tcp_init_buffer_space(struct sock *sk) tp->window_clamp = maxwin; if (sysctl_tcp_app_win && maxwin>4*tp->advmss) - tp->window_clamp = max(u32, maxwin-(maxwin>>sysctl_tcp_app_win), 4*tp->advmss); + tp->window_clamp = max_t(u32, maxwin-(maxwin>>sysctl_tcp_app_win), 4*tp->advmss); } /* Force reservation of one segment. */ if (sysctl_tcp_app_win && tp->window_clamp > 2*tp->advmss && tp->window_clamp + tp->advmss > maxwin) - tp->window_clamp = max(u32, 2*tp->advmss, maxwin-tp->advmss); + tp->window_clamp = max_t(u32, 2*tp->advmss, maxwin-tp->advmss); - tp->rcv_ssthresh = min(u32, tp->rcv_ssthresh, tp->window_clamp); + tp->rcv_ssthresh = min_t(u32, tp->rcv_ssthresh, tp->window_clamp); tp->snd_cwnd_stamp = tcp_time_stamp; } @@ -338,7 +338,7 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_opt *tp) !(sk->userlocks&SOCK_RCVBUF_LOCK) && !tcp_memory_pressure && atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) - sk->rcvbuf = min(int, atomic_read(&sk->rmem_alloc), sysctl_tcp_rmem[2]); + sk->rcvbuf = min_t(int, atomic_read(&sk->rmem_alloc), sysctl_tcp_rmem[2]); } if (atomic_read(&sk->rmem_alloc) > sk->rcvbuf) { app_win += ofo_win; @@ -346,11 +346,11 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_opt *tp) app_win >>= 1; if (app_win > tp->ack.rcv_mss) app_win -= tp->ack.rcv_mss; - app_win = max(unsigned int, app_win, 2*tp->advmss); + app_win = max_t(unsigned int, app_win, 2*tp->advmss); if (!ofo_win) - tp->window_clamp = min(u32, tp->window_clamp, app_win); - tp->rcv_ssthresh = min(u32, tp->window_clamp, 2*tp->advmss); + tp->window_clamp = min_t(u32, tp->window_clamp, app_win); + tp->rcv_ssthresh = min_t(u32, tp->window_clamp, 2*tp->advmss); } } @@ -472,7 +472,7 @@ static __inline__ void tcp_rtt_estimator(struct tcp_opt *tp, __u32 mrtt) /* no previous measure. */ tp->srtt = m<<3; /* take the measured time to be rtt */ tp->mdev = m<<2; /* make sure rto = 3*rtt */ - tp->mdev_max = tp->rttvar = max(u32, tp->mdev, TCP_RTO_MIN); + tp->mdev_max = tp->rttvar = max_t(u32, tp->mdev, TCP_RTO_MIN); tp->rtt_seq = tp->snd_nxt; } } @@ -575,7 +575,7 @@ void tcp_update_metrics(struct sock *sk) tp->ca_state == TCP_CA_Open) { /* Cong. avoidance phase, cwnd is reliable. */ if (!(dst->mxlock&(1<<RTAX_SSTHRESH))) - dst->ssthresh = max(u32, tp->snd_cwnd>>1, tp->snd_ssthresh); + dst->ssthresh = max_t(u32, tp->snd_cwnd>>1, tp->snd_ssthresh); if (!(dst->mxlock&(1<<RTAX_CWND))) dst->cwnd = (dst->cwnd + tp->snd_cwnd)>>1; } else { @@ -617,7 +617,7 @@ __u32 tcp_init_cwnd(struct tcp_opt *tp) else if (cwnd > tp->snd_ssthresh) cwnd = tp->snd_ssthresh; - return min(u32, cwnd, tp->snd_cwnd_clamp); + return min_t(u32, cwnd, tp->snd_cwnd_clamp); } /* Initialize metrics on socket. */ @@ -668,7 +668,7 @@ static void tcp_init_metrics(struct sock *sk) tp->srtt = dst->rtt; if (dst->rttvar > tp->mdev) { tp->mdev = dst->rttvar; - tp->mdev_max = tp->rttvar = max(u32, tp->mdev, TCP_RTO_MIN); + tp->mdev_max = tp->rttvar = max_t(u32, tp->mdev, TCP_RTO_MIN); } tcp_set_rto(tp); tcp_bound_rto(tp); @@ -693,7 +693,7 @@ reset: static void tcp_update_reordering(struct tcp_opt *tp, int metric, int ts) { if (metric > tp->reordering) { - tp->reordering = min(unsigned int, TCP_MAX_REORDERING, metric); + tp->reordering = min_t(unsigned int, TCP_MAX_REORDERING, metric); /* This exciting event is worth to be remembered. 8) */ if (ts) @@ -848,12 +848,12 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ if (sacked&TCPCB_RETRANS) { if ((dup_sack && in_sack) && (sacked&TCPCB_SACKED_ACKED)) - reord = min(int, fack_count, reord); + reord = min_t(int, fack_count, reord); } else { /* If it was in a hole, we detected reordering. */ if (fack_count < prior_fackets && !(sacked&TCPCB_SACKED_ACKED)) - reord = min(int, fack_count, reord); + reord = min_t(int, fack_count, reord); } /* Nothing to do; acked frame is about to be dropped. */ @@ -885,7 +885,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ */ if (!(sacked & TCPCB_RETRANS) && fack_count < prior_fackets) - reord = min(int, fack_count, reord); + reord = min_t(int, fack_count, reord); if (sacked & TCPCB_LOST) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; @@ -901,7 +901,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ tp->fackets_out = fack_count; } else { if (dup_sack && (sacked&TCPCB_RETRANS)) - reord = min(int, fack_count, reord); + reord = min_t(int, fack_count, reord); } /* D-SACK. We can detect redundant retransmission @@ -1019,7 +1019,7 @@ void tcp_enter_loss(struct sock *sk, int how) } tcp_sync_left_out(tp); - tp->reordering = min(unsigned int, tp->reordering, sysctl_tcp_reordering); + tp->reordering = min_t(unsigned int, tp->reordering, sysctl_tcp_reordering); tp->ca_state = TCP_CA_Loss; tp->high_seq = tp->snd_nxt; TCP_ECN_queue_cwr(tp); @@ -1177,7 +1177,7 @@ tcp_time_to_recover(struct sock *sk, struct tcp_opt *tp) * recovery more? */ if (tp->packets_out <= tp->reordering && - tp->sacked_out >= max(u32, tp->packets_out/2, sysctl_tcp_reordering) && + tp->sacked_out >= max_t(u32, tp->packets_out/2, sysctl_tcp_reordering) && !tcp_may_send_now(sk, tp)) { /* We have nothing to send. This connection is limited * either by receiver window or by application. @@ -1194,8 +1194,8 @@ tcp_time_to_recover(struct sock *sk, struct tcp_opt *tp) */ static void tcp_check_reno_reordering(struct tcp_opt *tp, int addend) { - u32 holes = min(unsigned int, - max(unsigned int, tp->lost_out, 1), + u32 holes = min_t(unsigned int, + max_t(unsigned int, tp->lost_out, 1), tp->packets_out); if (tp->sacked_out + holes > tp->packets_out) { @@ -1291,7 +1291,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_opt *tp) */ static __inline__ void tcp_moderate_cwnd(struct tcp_opt *tp) { - tp->snd_cwnd = min(u32, tp->snd_cwnd, + tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tcp_packets_in_flight(tp)+tcp_max_burst(tp)); tp->snd_cwnd_stamp = tcp_time_stamp; } @@ -1308,7 +1308,7 @@ static void tcp_cwnd_down(struct tcp_opt *tp) if (decr && tp->snd_cwnd > tp->snd_ssthresh/2) tp->snd_cwnd -= decr; - tp->snd_cwnd = min(u32, tp->snd_cwnd, tcp_packets_in_flight(tp)+1); + tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tcp_packets_in_flight(tp)+1); tp->snd_cwnd_stamp = tcp_time_stamp; } @@ -1340,7 +1340,7 @@ static void DBGUNDO(struct sock *sk, struct tcp_opt *tp, const char *msg) static void tcp_undo_cwr(struct tcp_opt *tp, int undo) { if (tp->prior_ssthresh) { - tp->snd_cwnd = max(unsigned int, + tp->snd_cwnd = max_t(unsigned int, tp->snd_cwnd, tp->snd_ssthresh<<1); if (undo && tp->prior_ssthresh > tp->snd_ssthresh) { @@ -1348,7 +1348,7 @@ static void tcp_undo_cwr(struct tcp_opt *tp, int undo) TCP_ECN_withdraw_cwr(tp); } } else { - tp->snd_cwnd = max(unsigned int, tp->snd_cwnd, tp->snd_ssthresh); + tp->snd_cwnd = max_t(unsigned int, tp->snd_cwnd, tp->snd_ssthresh); } tcp_moderate_cwnd(tp); tp->snd_cwnd_stamp = tcp_time_stamp; @@ -1450,7 +1450,7 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_opt *tp) static __inline__ void tcp_complete_cwr(struct tcp_opt *tp) { - tp->snd_cwnd = min(u32, tp->snd_cwnd, tp->snd_ssthresh); + tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_ssthresh); tp->snd_cwnd_stamp = tcp_time_stamp; } @@ -1836,7 +1836,7 @@ static void tcp_ack_probe(struct sock *sk) */ } else { tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, - min(u32, tp->rto << tp->backoff, TCP_RTO_MAX)); + min_t(u32, tp->rto << tp->backoff, TCP_RTO_MAX)); } } @@ -2323,7 +2323,7 @@ static __inline__ void tcp_dsack_set(struct tcp_opt *tp, u32 seq, u32 end_seq) tp->dsack = 1; tp->duplicate_sack[0].start_seq = seq; tp->duplicate_sack[0].end_seq = end_seq; - tp->eff_sacks = min(unsigned int, tp->num_sacks+1, 4-tp->tstamp_ok); + tp->eff_sacks = min_t(unsigned int, tp->num_sacks+1, 4-tp->tstamp_ok); } } @@ -2376,7 +2376,7 @@ static void tcp_sack_maybe_coalesce(struct tcp_opt *tp) * Decrease num_sacks. */ tp->num_sacks--; - tp->eff_sacks = min(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok); + tp->eff_sacks = min_t(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok); for(i=this_sack; i < tp->num_sacks; i++) sp[i] = sp[i+1]; continue; @@ -2438,7 +2438,7 @@ new_sack: sp->start_seq = seq; sp->end_seq = end_seq; tp->num_sacks++; - tp->eff_sacks = min(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok); + tp->eff_sacks = min_t(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok); } /* RCV.NXT advances, some SACKs should be eaten. */ @@ -2475,7 +2475,7 @@ static void tcp_sack_remove(struct tcp_opt *tp) } if (num_sacks != tp->num_sacks) { tp->num_sacks = num_sacks; - tp->eff_sacks = min(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok); + tp->eff_sacks = min_t(unsigned int, tp->num_sacks+tp->dsack, 4-tp->tstamp_ok); } } @@ -2541,7 +2541,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) if (tp->dsack) { tp->dsack = 0; - tp->eff_sacks = min(unsigned int, tp->num_sacks, 4-tp->tstamp_ok); + tp->eff_sacks = min_t(unsigned int, tp->num_sacks, 4-tp->tstamp_ok); } /* Queue data for delivery to the user. @@ -2558,7 +2558,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) tp->ucopy.len && sk->lock.users && !tp->urg_data) { - int chunk = min(unsigned int, skb->len, tp->ucopy.len); + int chunk = min_t(unsigned int, skb->len, tp->ucopy.len); __set_current_state(TASK_RUNNING); @@ -2807,7 +2807,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head, if (offset < 0) BUG(); if (size > 0) { - size = min(int, copy, size); + size = min_t(int, copy, size); if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) BUG(); TCP_SKB_CB(nskb)->end_seq += size; @@ -2886,7 +2886,7 @@ static int tcp_prune_queue(struct sock *sk) if (atomic_read(&sk->rmem_alloc) >= sk->rcvbuf) tcp_clamp_window(sk, tp); else if (tcp_memory_pressure) - tp->rcv_ssthresh = min(u32, tp->rcv_ssthresh, 4*tp->advmss); + tp->rcv_ssthresh = min_t(u32, tp->rcv_ssthresh, 4*tp->advmss); tcp_collapse_ofo_queue(sk); tcp_collapse(sk, sk->receive_queue.next, @@ -2941,7 +2941,7 @@ void tcp_cwnd_application_limited(struct sock *sk) if (tp->ca_state == TCP_CA_Open && sk->socket && !test_bit(SOCK_NOSPACE, &sk->socket->flags)) { /* Limited by application or receiver window. */ - u32 win_used = max(u32, tp->snd_cwnd_used, 2); + u32 win_used = max_t(u32, tp->snd_cwnd_used, 2); if (win_used < tp->snd_cwnd) { tp->snd_ssthresh = tcp_current_ssthresh(tp); tp->snd_cwnd = (tp->snd_cwnd+win_used)>>1; @@ -2967,10 +2967,10 @@ static void tcp_new_space(struct sock *sk) int sndmem, demanded; sndmem = tp->mss_clamp+MAX_TCP_HEADER+16+sizeof(struct sk_buff); - demanded = max(unsigned int, tp->snd_cwnd, tp->reordering+1); + demanded = max_t(unsigned int, tp->snd_cwnd, tp->reordering+1); sndmem *= 2*demanded; if (sndmem > sk->sndbuf) - sk->sndbuf = min(int, sndmem, sysctl_tcp_wmem[2]); + sk->sndbuf = min_t(int, sndmem, sysctl_tcp_wmem[2]); tp->snd_cwnd_stamp = tcp_time_stamp; } @@ -3520,7 +3520,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, if (tp->wscale_ok == 0) { tp->snd_wscale = tp->rcv_wscale = 0; - tp->window_clamp = min(u32, tp->window_clamp, 65535); + tp->window_clamp = min_t(u32, tp->window_clamp, 65535); } if (tp->saw_tstamp) { |
