summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobert Olsson <robert.olsson@data.slu.se>2002-05-08 12:43:56 -0700
committerDavid S. Miller <davem@nuts.ninka.net>2002-05-08 12:43:56 -0700
commit6445be80b80fd1f2aff91503c3b31ad57c838950 (patch)
tree9ef9a9a2de45e2f1455f19236cb42d03b7d2bac6
parente53dd853f384c791da43fea73f32b17f011e479b (diff)
IPV4: Add statistics for route cache GC monitoring.
-rw-r--r--include/net/route.h4
-rw-r--r--net/ipv4/route.c20
2 files changed, 21 insertions, 3 deletions
diff --git a/include/net/route.h b/include/net/route.h
index d6812d468252..7ddc79e4d07e 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -110,6 +110,10 @@ struct rt_cache_stat
unsigned int out_hit;
unsigned int out_slow_tot;
unsigned int out_slow_mc;
+ unsigned int gc_total;
+ unsigned int gc_ignored;
+ unsigned int gc_goal_miss;
+ unsigned int gc_dst_overflow;
} ____cacheline_aligned_in_smp;
extern struct ip_rt_acct *ip_rt_acct;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 098312a7bf6a..efe2665b9420 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -286,7 +286,7 @@ static int rt_cache_stat_get_info(char *buffer, char **start, off_t offset, int
for (lcpu = 0; lcpu < smp_num_cpus; lcpu++) {
i = cpu_logical_map(lcpu);
- len += sprintf(buffer+len, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ len += sprintf(buffer+len, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
dst_entries,
rt_cache_stat[i].in_hit,
rt_cache_stat[i].in_slow_tot,
@@ -298,7 +298,13 @@ static int rt_cache_stat_get_info(char *buffer, char **start, off_t offset, int
rt_cache_stat[i].out_hit,
rt_cache_stat[i].out_slow_tot,
- rt_cache_stat[i].out_slow_mc
+ rt_cache_stat[i].out_slow_mc,
+
+ rt_cache_stat[i].gc_total,
+ rt_cache_stat[i].gc_ignored,
+ rt_cache_stat[i].gc_goal_miss,
+ rt_cache_stat[i].gc_dst_overflow
+
);
}
len -= offset;
@@ -499,9 +505,14 @@ static int rt_garbage_collect(void)
* Garbage collection is pretty expensive,
* do not make it too frequently.
*/
+
+ rt_cache_stat[smp_processor_id()].gc_total++;
+
if (now - last_gc < ip_rt_gc_min_interval &&
- atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
+ atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size) {
+ rt_cache_stat[smp_processor_id()].gc_ignored++;
goto out;
+ }
/* Calculate number of entries, which we want to expire now. */
goal = atomic_read(&ipv4_dst_ops.entries) -
@@ -567,6 +578,8 @@ static int rt_garbage_collect(void)
We will not spin here for long time in any case.
*/
+ rt_cache_stat[smp_processor_id()].gc_goal_miss++;
+
if (expire == 0)
break;
@@ -584,6 +597,7 @@ static int rt_garbage_collect(void)
goto out;
if (net_ratelimit())
printk(KERN_WARNING "dst cache overflow\n");
+ rt_cache_stat[smp_processor_id()].gc_dst_overflow++;
return 1;
work_done: