summaryrefslogtreecommitdiff
path: root/net/ipv4/tcp_timer.c
diff options
context:
space:
mode:
authorHideaki Yoshifuji <yoshfuji@linux-ipv6.org>2004-07-09 01:18:00 +0900
committerHideaki Yoshifuji <yoshfuji@linux-ipv6.org>2004-07-09 01:18:00 +0900
commit01b72229b665ec703247f15202b19e3693e4935d (patch)
tree25f7ebffc09009551b58ce33a791216d832c9844 /net/ipv4/tcp_timer.c
parent07f0a148b27e12aa11086153ca9425d311e20522 (diff)
[NET] convert storage for MIB from struct member to array item.
Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
Diffstat (limited to 'net/ipv4/tcp_timer.c')
-rw-r--r--net/ipv4/tcp_timer.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 1b8fe67e65f4..72a5a50b50ab 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -83,7 +83,7 @@ static void tcp_write_err(struct sock *sk)
sk->sk_error_report(sk);
tcp_done(sk);
- NET_INC_STATS_BH(TCPAbortOnTimeout);
+ NET_INC_STATS_BH(LINUX_MIB_TCPABORTONTIMEOUT);
}
/* Do not allow orphaned sockets to eat all our resources.
@@ -126,7 +126,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset)
if (do_reset)
tcp_send_active_reset(sk, GFP_ATOMIC);
tcp_done(sk);
- NET_INC_STATS_BH(TCPAbortOnMemory);
+ NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
return 1;
}
return 0;
@@ -212,7 +212,7 @@ static void tcp_delack_timer(unsigned long data)
if (sock_owned_by_user(sk)) {
/* Try again later. */
tp->ack.blocked = 1;
- NET_INC_STATS_BH(DelayedACKLocked);
+ NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED);
sk_reset_timer(sk, &tp->delack_timer, jiffies + TCP_DELACK_MIN);
goto out_unlock;
}
@@ -231,8 +231,8 @@ static void tcp_delack_timer(unsigned long data)
if (skb_queue_len(&tp->ucopy.prequeue)) {
struct sk_buff *skb;
- NET_ADD_STATS_BH(TCPSchedulerFailed,
- skb_queue_len(&tp->ucopy.prequeue));
+ NET_ADD_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED,
+ skb_queue_len(&tp->ucopy.prequeue));
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
sk->sk_backlog_rcv(sk, skb);
@@ -252,7 +252,7 @@ static void tcp_delack_timer(unsigned long data)
tp->ack.ato = TCP_ATO_MIN;
}
tcp_send_ack(sk);
- NET_INC_STATS_BH(DelayedACKs);
+ NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS);
}
TCP_CHECK_TIMER(sk);
@@ -353,19 +353,19 @@ static void tcp_retransmit_timer(struct sock *sk)
if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) {
if (tp->sack_ok) {
if (tp->ca_state == TCP_CA_Recovery)
- NET_INC_STATS_BH(TCPSackRecoveryFail);
+ NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL);
else
- NET_INC_STATS_BH(TCPSackFailures);
+ NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES);
} else {
if (tp->ca_state == TCP_CA_Recovery)
- NET_INC_STATS_BH(TCPRenoRecoveryFail);
+ NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL);
else
- NET_INC_STATS_BH(TCPRenoFailures);
+ NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES);
}
} else if (tp->ca_state == TCP_CA_Loss) {
- NET_INC_STATS_BH(TCPLossFailures);
+ NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES);
} else {
- NET_INC_STATS_BH(TCPTimeouts);
+ NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS);
}
}