diff options
| author | Thomas Graf <tgraf@suug.ch> | 2004-10-19 03:51:24 -0700 |
|---|---|---|
| committer | David S. Miller <davem@nuts.davemloft.net> | 2004-10-19 03:51:24 -0700 |
| commit | e3ba9650890e39f1f60dbfa13c7ff2ce7ba1ffa5 (patch) | |
| tree | 51eca78b01b2a0f0726a49e1ac0b74a93fb6a453 /net | |
| parent | a63563e2f61fde0df66c39d45e5cb71781dd5aba (diff) | |
[PKT_SCHED]: Replace tc_stats with new gnet_stats in struct Qdisc
Replaces tc_stats with gnet_stats replacements in struct
Qdisc and adapts all qdiscs to use them.
Signed-off-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
| -rw-r--r-- | net/sched/sch_cbq.c | 22 | ||||
| -rw-r--r-- | net/sched/sch_dsmark.c | 8 | ||||
| -rw-r--r-- | net/sched/sch_fifo.c | 24 | ||||
| -rw-r--r-- | net/sched/sch_generic.c | 6 | ||||
| -rw-r--r-- | net/sched/sch_gred.c | 26 | ||||
| -rw-r--r-- | net/sched/sch_hfsc.c | 14 | ||||
| -rw-r--r-- | net/sched/sch_htb.c | 14 | ||||
| -rw-r--r-- | net/sched/sch_ingress.c | 16 | ||||
| -rw-r--r-- | net/sched/sch_netem.c | 14 | ||||
| -rw-r--r-- | net/sched/sch_prio.c | 10 | ||||
| -rw-r--r-- | net/sched/sch_red.c | 30 | ||||
| -rw-r--r-- | net/sched/sch_sfq.c | 10 | ||||
| -rw-r--r-- | net/sched/sch_tbf.c | 14 | ||||
| -rw-r--r-- | net/sched/sch_teql.c | 6 |
14 files changed, 107 insertions, 107 deletions
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 2ae73826d6cf..8642e294473f 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -433,8 +433,8 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) #endif if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) { sch->q.qlen++; - sch->stats.packets++; - sch->stats.bytes+=len; + sch->bstats.packets++; + sch->bstats.bytes+=len; cbq_mark_toplevel(q, cl); if (!cl->next_alive) cbq_activate_class(cl); @@ -443,7 +443,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) } #ifndef CONFIG_NET_CLS_ACT - sch->stats.drops++; + sch->qstats.drops++; if (cl == NULL) kfree_skb(skb); else { @@ -452,7 +452,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) } #else if ( NET_XMIT_DROP == ret) { - sch->stats.drops++; + sch->qstats.drops++; } if (cl != NULL) { @@ -472,7 +472,7 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch) if ((cl = q->tx_class) == NULL) { kfree_skb(skb); - sch->stats.drops++; + sch->qstats.drops++; return NET_XMIT_CN; } q->tx_class = NULL; @@ -489,7 +489,7 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch) cbq_activate_class(cl); return 0; } - sch->stats.drops++; + sch->qstats.drops++; cl->stats.drops++; return ret; } @@ -729,17 +729,17 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) if (cl->q->enqueue(skb, cl->q) == 0) { sch->q.qlen++; - sch->stats.packets++; - sch->stats.bytes+=len; + sch->bstats.packets++; + sch->bstats.bytes+=len; if (!cl->next_alive) cbq_activate_class(cl); return 0; } - sch->stats.drops++; + sch->qstats.drops++; return 0; } - sch->stats.drops++; + sch->qstats.drops++; return -1; } #endif @@ -1090,7 +1090,7 @@ cbq_dequeue(struct Qdisc *sch) Sigh... start watchdog timer in the last case. */ if (sch->q.qlen) { - sch->stats.overlimits++; + sch->qstats.overlimits++; if (q->wd_expires) { long delay = PSCHED_US2JIFFIE(q->wd_expires); if (delay <= 0) diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 28b61f0f87a7..c62ba0118786 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -241,11 +241,11 @@ static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch) #endif ((ret = p->q->enqueue(skb,p->q)) != 0)) { - sch->stats.drops++; + sch->qstats.drops++; return ret; } - sch->stats.bytes += skb->len; - sch->stats.packets++; + sch->bstats.bytes += skb->len; + sch->bstats.packets++; sch->q.qlen++; return ret; } @@ -299,7 +299,7 @@ static int dsmark_requeue(struct sk_buff *skb,struct Qdisc *sch) sch->q.qlen++; return 0; } - sch->stats.drops++; + sch->qstats.drops++; return ret; } diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index b089924177f8..9250198fe27f 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c @@ -47,14 +47,14 @@ bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) { struct fifo_sched_data *q = qdisc_priv(sch); - if (sch->stats.backlog + skb->len <= q->limit) { + if (sch->qstats.backlog + skb->len <= q->limit) { __skb_queue_tail(&sch->q, skb); - sch->stats.backlog += skb->len; - sch->stats.bytes += skb->len; - sch->stats.packets++; + sch->qstats.backlog += skb->len; + sch->bstats.bytes += skb->len; + sch->bstats.packets++; return 0; } - sch->stats.drops++; + sch->qstats.drops++; #ifdef CONFIG_NET_CLS_POLICE if (sch->reshape_fail==NULL || sch->reshape_fail(skb, sch)) #endif @@ -66,7 +66,7 @@ static int bfifo_requeue(struct sk_buff *skb, struct Qdisc* sch) { __skb_queue_head(&sch->q, skb); - sch->stats.backlog += skb->len; + sch->qstats.backlog += skb->len; return 0; } @@ -77,7 +77,7 @@ bfifo_dequeue(struct Qdisc* sch) skb = __skb_dequeue(&sch->q); if (skb) - sch->stats.backlog -= skb->len; + sch->qstats.backlog -= skb->len; return skb; } @@ -89,7 +89,7 @@ fifo_drop(struct Qdisc* sch) skb = __skb_dequeue_tail(&sch->q); if (skb) { unsigned int len = skb->len; - sch->stats.backlog -= len; + sch->qstats.backlog -= len; kfree_skb(skb); return len; } @@ -100,7 +100,7 @@ static void fifo_reset(struct Qdisc* sch) { skb_queue_purge(&sch->q); - sch->stats.backlog = 0; + sch->qstats.backlog = 0; } static int @@ -110,11 +110,11 @@ pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) if (sch->q.qlen < q->limit) { __skb_queue_tail(&sch->q, skb); - sch->stats.bytes += skb->len; - sch->stats.packets++; + sch->bstats.bytes += skb->len; + sch->bstats.packets++; return 0; } - sch->stats.drops++; + sch->qstats.drops++; #ifdef CONFIG_NET_CLS_POLICE if (sch->reshape_fail==NULL || sch->reshape_fail(skb, sch)) #endif diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 2516dd92a0f1..379ba1d9cadc 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -318,11 +318,11 @@ pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) if (list->qlen < qdisc->dev->tx_queue_len) { __skb_queue_tail(list, skb); qdisc->q.qlen++; - qdisc->stats.bytes += skb->len; - qdisc->stats.packets++; + qdisc->bstats.bytes += skb->len; + qdisc->bstats.packets++; return 0; } - qdisc->stats.drops++; + qdisc->qstats.drops++; kfree_skb(skb); return NET_XMIT_DROP; } diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 375c6c2fb996..5d3f1ee07629 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -130,7 +130,7 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) D2PRINTK("gred_enqueue virtualQ 0x%x classid %x backlog %d " "general backlog %d\n",skb->tc_index&0xf,sch->handle,q->backlog, - sch->stats.backlog); + sch->qstats.backlog); /* sum up all the qaves of prios <= to ours to get the new qave*/ if (!t->eqp && t->grio) { for (i=0;i<t->DPs;i++) { @@ -161,7 +161,7 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) q->qave >>= q->Stab[(us_idle>>q->Scell_log)&0xFF]; } else { if (t->eqp) { - q->qave += sch->stats.backlog - (q->qave >> q->Wlog); + q->qave += sch->qstats.backlog - (q->qave >> q->Wlog); } else { q->qave += q->backlog - (q->qave >> q->Wlog); } @@ -179,9 +179,9 @@ enqueue: q->backlog += skb->len; do_enqueue: __skb_queue_tail(&sch->q, skb); - sch->stats.backlog += skb->len; - sch->stats.bytes += skb->len; - sch->stats.packets++; + sch->qstats.backlog += skb->len; + sch->bstats.bytes += skb->len; + sch->bstats.packets++; return 0; } else { q->pdrop++; @@ -189,12 +189,12 @@ do_enqueue: drop: kfree_skb(skb); - sch->stats.drops++; + sch->qstats.drops++; return NET_XMIT_DROP; } if ((q->qave+qave) >= q->qth_max) { q->qcount = -1; - sch->stats.overlimits++; + sch->qstats.overlimits++; q->forced++; goto drop; } @@ -203,7 +203,7 @@ drop: goto enqueue; q->qcount = 0; q->qR = net_random()&q->Rmask; - sch->stats.overlimits++; + sch->qstats.overlimits++; q->early++; goto drop; } @@ -221,7 +221,7 @@ gred_requeue(struct sk_buff *skb, struct Qdisc* sch) PSCHED_SET_PASTPERFECT(q->qidlestart); __skb_queue_head(&sch->q, skb); - sch->stats.backlog += skb->len; + sch->qstats.backlog += skb->len; q->backlog += skb->len; return 0; } @@ -235,7 +235,7 @@ gred_dequeue(struct Qdisc* sch) skb = __skb_dequeue(&sch->q); if (skb) { - sch->stats.backlog -= skb->len; + sch->qstats.backlog -= skb->len; q= t->tab[(skb->tc_index&0xf)]; if (q) { q->backlog -= skb->len; @@ -269,8 +269,8 @@ static unsigned int gred_drop(struct Qdisc* sch) skb = __skb_dequeue_tail(&sch->q); if (skb) { unsigned int len = skb->len; - sch->stats.backlog -= len; - sch->stats.drops++; + sch->qstats.backlog -= len; + sch->qstats.drops++; q= t->tab[(skb->tc_index&0xf)]; if (q) { q->backlog -= len; @@ -304,7 +304,7 @@ static void gred_reset(struct Qdisc* sch) __skb_queue_purge(&sch->q); - sch->stats.backlog = 0; + sch->qstats.backlog = 0; for (i=0;i<t->DPs;i++) { q= t->tab[i]; diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index fa1a9e5494c8..de172dd2cb0e 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1677,14 +1677,14 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) #ifdef CONFIG_NET_CLS_ACT if (cl == NULL) { if (NET_XMIT_DROP == ret) { - sch->stats.drops++; + sch->qstats.drops++; } return ret; } #else if (cl == NULL) { kfree_skb(skb); - sch->stats.drops++; + sch->qstats.drops++; return NET_XMIT_DROP; } #endif @@ -1692,7 +1692,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) err = cl->qdisc->enqueue(skb, cl->qdisc); if (unlikely(err != NET_XMIT_SUCCESS)) { cl->stats.drops++; - sch->stats.drops++; + sch->qstats.drops++; return err; } @@ -1701,8 +1701,8 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) cl->stats.packets++; cl->stats.bytes += len; - sch->stats.packets++; - sch->stats.bytes += len; + sch->bstats.packets++; + sch->bstats.bytes += len; sch->q.qlen++; return NET_XMIT_SUCCESS; @@ -1739,7 +1739,7 @@ hfsc_dequeue(struct Qdisc *sch) */ cl = vttree_get_minvt(&q->root, cur_time); if (cl == NULL) { - sch->stats.overlimits++; + sch->qstats.overlimits++; hfsc_schedule_watchdog(sch, cur_time); return NULL; } @@ -1804,7 +1804,7 @@ hfsc_drop(struct Qdisc *sch) list_move_tail(&cl->dlist, &q->droplist); } cl->stats.drops++; - sch->stats.drops++; + sch->qstats.drops++; sch->q.qlen--; return len; } diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 61c8fa4db608..9102da1d50df 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -735,7 +735,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) } } else if (!cl) { if (NET_XMIT_DROP == ret) { - sch->stats.drops++; + sch->qstats.drops++; } return ret; } @@ -747,13 +747,13 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) q->direct_pkts++; } else { kfree_skb (skb); - sch->stats.drops++; + sch->qstats.drops++; return NET_XMIT_DROP; } } #endif else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) { - sch->stats.drops++; + sch->qstats.drops++; cl->stats.drops++; return NET_XMIT_DROP; } else { @@ -762,7 +762,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) } sch->q.qlen++; - sch->stats.packets++; sch->stats.bytes += skb->len; + sch->bstats.packets++; sch->bstats.bytes += skb->len; HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb); return NET_XMIT_SUCCESS; } @@ -783,11 +783,11 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) __skb_queue_head(&q->direct_queue, skb); tskb = __skb_dequeue_tail(&q->direct_queue); kfree_skb (tskb); - sch->stats.drops++; + sch->qstats.drops++; return NET_XMIT_CN; } } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) { - sch->stats.drops++; + sch->qstats.drops++; cl->stats.drops++; return NET_XMIT_DROP; } else @@ -1117,7 +1117,7 @@ static void htb_delay_by(struct Qdisc *sch,long delay) /* why don't use jiffies here ? because expires can be in past */ mod_timer(&q->timer, q->jiffies + delay); sch->flags |= TCQ_F_THROTTLED; - sch->stats.overlimits++; + sch->qstats.overlimits++; HTB_DBG(3,1,"htb_deq t_delay=%ld\n",delay); } diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index 13b5c3414794..7ea1d35e1cce 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c @@ -151,12 +151,12 @@ static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch) * firewall FW_* code. */ #ifdef CONFIG_NET_CLS_ACT - sch->stats.packets++; - sch->stats.bytes += skb->len; + sch->bstats.packets++; + sch->bstats.bytes += skb->len; switch (result) { case TC_ACT_SHOT: result = TC_ACT_SHOT; - sch->stats.drops++; + sch->qstats.drops++; break; case TC_ACT_STOLEN: case TC_ACT_QUEUED: @@ -176,14 +176,14 @@ static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch) switch (result) { case TC_POLICE_SHOT: result = NF_DROP; - sch->stats.drops++; + sch->qstats.drops++; break; case TC_POLICE_RECLASSIFY: /* DSCP remarking here ? */ case TC_POLICE_OK: case TC_POLICE_UNSPEC: default: - sch->stats.packets++; - sch->stats.bytes += skb->len; + sch->bstats.packets++; + sch->bstats.bytes += skb->len; result = NF_ACCEPT; break; }; @@ -191,8 +191,8 @@ static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch) #else D2PRINTK("Overriding result to ACCEPT\n"); result = NF_ACCEPT; - sch->stats.packets++; - sch->stats.bytes += skb->len; + sch->bstats.packets++; + sch->bstats.bytes += skb->len; #endif #endif diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index d29df1cabf59..733e22f674c8 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -153,12 +153,12 @@ static int delay_skb(struct Qdisc *sch, struct sk_buff *skb) if (likely(q->delayed.qlen < q->limit)) { __skb_queue_tail(&q->delayed, skb); sch->q.qlen++; - sch->stats.bytes += skb->len; - sch->stats.packets++; + sch->bstats.bytes += skb->len; + sch->bstats.packets++; return NET_XMIT_SUCCESS; } - sch->stats.drops++; + sch->qstats.drops++; kfree_skb(skb); return NET_XMIT_DROP; } @@ -172,7 +172,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) /* Random packet drop 0 => none, ~0 => all */ if (q->loss && q->loss >= get_crandom(&q->loss_cor)) { pr_debug("netem_enqueue: random loss\n"); - sch->stats.drops++; + sch->qstats.drops++; return 0; /* lie about loss so TCP doesn't know */ } @@ -196,7 +196,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ++q->counter; ret = q->qdisc->enqueue(skb, q->qdisc); if (ret) - sch->stats.drops++; + sch->qstats.drops++; return ret; } @@ -224,7 +224,7 @@ static unsigned int netem_drop(struct Qdisc* sch) if ((len = q->qdisc->ops->drop(q->qdisc)) != 0) { sch->q.qlen--; - sch->stats.drops++; + sch->qstats.drops++; } return len; } @@ -256,7 +256,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) __skb_unlink(skb, &q->delayed); if (q->qdisc->enqueue(skb, q->qdisc)) - sch->stats.drops++; + sch->qstats.drops++; } skb = q->qdisc->dequeue(q->qdisc); diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 6961f081170f..a8840e7d44ab 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -107,8 +107,8 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc* sch) goto dropped; if ((ret = qdisc->enqueue(skb, qdisc)) == NET_XMIT_SUCCESS) { - sch->stats.bytes += skb->len; - sch->stats.packets++; + sch->bstats.bytes += skb->len; + sch->bstats.packets++; sch->q.qlen++; return NET_XMIT_SUCCESS; } @@ -117,10 +117,10 @@ dropped: #ifdef CONFIG_NET_CLS_ACT if (NET_XMIT_DROP == ret) { #endif - sch->stats.drops++; + sch->qstats.drops++; #ifdef CONFIG_NET_CLS_ACT } else { - sch->stats.overlimits++; /* abuse, but noone uses it */ + sch->qstats.overlimits++; /* abuse, but noone uses it */ } #endif return ret; @@ -142,7 +142,7 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch) return 0; } dropped: - sch->stats.drops++; + sch->qstats.drops++; return NET_XMIT_DROP; } diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index e98f79bdb435..4d33ff926d03 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -228,13 +228,13 @@ red_enqueue(struct sk_buff *skb, struct Qdisc* sch) q->qave >>= 1; } } else { - q->qave += sch->stats.backlog - (q->qave >> q->Wlog); + q->qave += sch->qstats.backlog - (q->qave >> q->Wlog); /* NOTE: q->qave is fixed point number with point at Wlog. The formulae above is equvalent to floating point version: - qave = qave*(1-W) + sch->stats.backlog*W; + qave = qave*(1-W) + sch->qstats.backlog*W; --ANK (980924) */ } @@ -242,22 +242,22 @@ red_enqueue(struct sk_buff *skb, struct Qdisc* sch) if (q->qave < q->qth_min) { q->qcount = -1; enqueue: - if (sch->stats.backlog + skb->len <= q->limit) { + if (sch->qstats.backlog + skb->len <= q->limit) { __skb_queue_tail(&sch->q, skb); - sch->stats.backlog += skb->len; - sch->stats.bytes += skb->len; - sch->stats.packets++; + sch->qstats.backlog += skb->len; + sch->bstats.bytes += skb->len; + sch->bstats.packets++; return NET_XMIT_SUCCESS; } else { q->st.pdrop++; } kfree_skb(skb); - sch->stats.drops++; + sch->qstats.drops++; return NET_XMIT_DROP; } if (q->qave >= q->qth_max) { q->qcount = -1; - sch->stats.overlimits++; + sch->qstats.overlimits++; mark: if (!(q->flags&TC_RED_ECN) || !red_ecn_mark(skb)) { q->st.early++; @@ -288,7 +288,7 @@ mark: goto enqueue; q->qcount = 0; q->qR = net_random()&q->Rmask; - sch->stats.overlimits++; + sch->qstats.overlimits++; goto mark; } q->qR = net_random()&q->Rmask; @@ -296,7 +296,7 @@ mark: drop: kfree_skb(skb); - sch->stats.drops++; + sch->qstats.drops++; return NET_XMIT_CN; } @@ -308,7 +308,7 @@ red_requeue(struct sk_buff *skb, struct Qdisc* sch) PSCHED_SET_PASTPERFECT(q->qidlestart); __skb_queue_head(&sch->q, skb); - sch->stats.backlog += skb->len; + sch->qstats.backlog += skb->len; return 0; } @@ -320,7 +320,7 @@ red_dequeue(struct Qdisc* sch) skb = __skb_dequeue(&sch->q); if (skb) { - sch->stats.backlog -= skb->len; + sch->qstats.backlog -= skb->len; return skb; } PSCHED_GET_TIME(q->qidlestart); @@ -335,8 +335,8 @@ static unsigned int red_drop(struct Qdisc* sch) skb = __skb_dequeue_tail(&sch->q); if (skb) { unsigned int len = skb->len; - sch->stats.backlog -= len; - sch->stats.drops++; + sch->qstats.backlog -= len; + sch->qstats.drops++; q->st.other++; kfree_skb(skb); return len; @@ -350,7 +350,7 @@ static void red_reset(struct Qdisc* sch) struct red_sched_data *q = qdisc_priv(sch); __skb_queue_purge(&sch->q); - sch->stats.backlog = 0; + sch->qstats.backlog = 0; PSCHED_SET_PASTPERFECT(q->qidlestart); q->qave = 0; q->qcount = -1; diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index ac668b08ebe9..aa5607304782 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -227,7 +227,7 @@ static unsigned int sfq_drop(struct Qdisc *sch) kfree_skb(skb); sfq_dec(q, x); sch->q.qlen--; - sch->stats.drops++; + sch->qstats.drops++; return len; } @@ -243,7 +243,7 @@ static unsigned int sfq_drop(struct Qdisc *sch) sfq_dec(q, d); sch->q.qlen--; q->ht[q->hash[d]] = SFQ_DEPTH; - sch->stats.drops++; + sch->qstats.drops++; return len; } @@ -276,8 +276,8 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch) } } if (++sch->q.qlen < q->limit-1) { - sch->stats.bytes += skb->len; - sch->stats.packets++; + sch->bstats.bytes += skb->len; + sch->bstats.packets++; return 0; } @@ -313,7 +313,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc* sch) if (++sch->q.qlen < q->limit - 1) return 0; - sch->stats.drops++; + sch->qstats.drops++; sfq_drop(sch); return NET_XMIT_CN; } diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index a1df30471451..f3b1b883488f 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -141,7 +141,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) int ret; if (skb->len > q->max_size) { - sch->stats.drops++; + sch->qstats.drops++; #ifdef CONFIG_NET_CLS_POLICE if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) #endif @@ -151,13 +151,13 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) } if ((ret = q->qdisc->enqueue(skb, q->qdisc)) != 0) { - sch->stats.drops++; + sch->qstats.drops++; return ret; } sch->q.qlen++; - sch->stats.bytes += skb->len; - sch->stats.packets++; + sch->bstats.bytes += skb->len; + sch->bstats.packets++; return 0; } @@ -179,7 +179,7 @@ static unsigned int tbf_drop(struct Qdisc* sch) if ((len = q->qdisc->ops->drop(q->qdisc)) != 0) { sch->q.qlen--; - sch->stats.drops++; + sch->qstats.drops++; } return len; } @@ -250,11 +250,11 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch) if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) { /* When requeue fails skb is dropped */ sch->q.qlen--; - sch->stats.drops++; + sch->qstats.drops++; } sch->flags |= TCQ_F_THROTTLED; - sch->stats.overlimits++; + sch->qstats.overlimits++; } return NULL; } diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index dd169094aa2e..9e7c06dca9e7 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -96,14 +96,14 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) __skb_queue_tail(&q->q, skb); if (q->q.qlen <= dev->tx_queue_len) { - sch->stats.bytes += skb->len; - sch->stats.packets++; + sch->bstats.bytes += skb->len; + sch->bstats.packets++; return 0; } __skb_unlink(skb, &q->q); kfree_skb(skb); - sch->stats.drops++; + sch->qstats.drops++; return NET_XMIT_DROP; } |
