summaryrefslogtreecommitdiff
path: root/net/sched/sch_cake.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_cake.c')
-rw-r--r--net/sched/sch_cake.c77
1 files changed, 40 insertions, 37 deletions
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 32bacfc314c2..4a64d6397b6f 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1398,15 +1398,15 @@ static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
const struct skb_shared_info *shinfo = skb_shinfo(skb);
unsigned int hdr_len, last_len = 0;
u32 off = skb_network_offset(skb);
+ u16 segs = qdisc_pkt_segs(skb);
u32 len = qdisc_pkt_len(skb);
- u16 segs = 1;
q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
- if (!shinfo->gso_size)
+ if (segs == 1)
return cake_calc_overhead(q, len, off);
- /* borrowed from qdisc_pkt_len_init() */
+ /* borrowed from qdisc_pkt_len_segs_init() */
if (!skb->encapsulation)
hdr_len = skb_transport_offset(skb);
else
@@ -1430,12 +1430,6 @@ static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
hdr_len += sizeof(struct udphdr);
}
- if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
- segs = DIV_ROUND_UP(skb->len - hdr_len,
- shinfo->gso_size);
- else
- segs = shinfo->gso_segs;
-
len = shinfo->gso_size + hdr_len;
last_len = skb->len - shinfo->gso_size * (segs - 1);
@@ -1597,7 +1591,6 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
qdisc_drop_reason(skb, sch, to_free, SKB_DROP_REASON_QDISC_OVERLIMIT);
sch->q.qlen--;
- qdisc_tree_reduce_backlog(sch, 1, len);
cake_heapify(q, 0);
@@ -1743,14 +1736,14 @@ static void cake_reconfigure(struct Qdisc *sch);
static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
+ u32 idx, tin, prev_qlen, prev_backlog, drop_id;
struct cake_sched_data *q = qdisc_priv(sch);
- int len = qdisc_pkt_len(skb);
- int ret;
+ int len = qdisc_pkt_len(skb), ret;
struct sk_buff *ack = NULL;
ktime_t now = ktime_get();
struct cake_tin_data *b;
struct cake_flow *flow;
- u32 idx, tin;
+ bool same_flow = false;
/* choose flow to insert into */
idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
@@ -1788,7 +1781,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (unlikely(len > b->max_skblen))
b->max_skblen = len;
- if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
+ if (qdisc_pkt_segs(skb) > 1 && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
struct sk_buff *segs, *nskb;
netdev_features_t features = netif_skb_features(skb);
unsigned int slen = 0, numsegs = 0;
@@ -1800,6 +1793,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb_list_walk_safe(segs, segs, nskb) {
skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len;
+ qdisc_skb_cb(segs)->pkt_segs = 1;
cobalt_set_enqueue_time(segs, now);
get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
segs);
@@ -1823,6 +1817,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
consume_skb(skb);
} else {
/* not splitting */
+ int ack_pkt_len = 0;
+
cobalt_set_enqueue_time(skb, now);
get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
flow_queue_add(flow, skb);
@@ -1833,13 +1829,13 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (ack) {
b->ack_drops++;
sch->qstats.drops++;
- b->bytes += qdisc_pkt_len(ack);
- len -= qdisc_pkt_len(ack);
+ ack_pkt_len = qdisc_pkt_len(ack);
+ b->bytes += ack_pkt_len;
q->buffer_used += skb->truesize - ack->truesize;
if (q->rate_flags & CAKE_FLAG_INGRESS)
cake_advance_shaper(q, b, ack, now, true);
- qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
+ qdisc_tree_reduce_backlog(sch, 1, ack_pkt_len);
consume_skb(ack);
} else {
sch->q.qlen++;
@@ -1848,11 +1844,11 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* stats */
b->packets++;
- b->bytes += len;
- b->backlogs[idx] += len;
- b->tin_backlog += len;
- sch->qstats.backlog += len;
- q->avg_window_bytes += len;
+ b->bytes += len - ack_pkt_len;
+ b->backlogs[idx] += len - ack_pkt_len;
+ b->tin_backlog += len - ack_pkt_len;
+ sch->qstats.backlog += len - ack_pkt_len;
+ q->avg_window_bytes += len - ack_pkt_len;
}
if (q->overflow_timeout)
@@ -1927,24 +1923,29 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (q->buffer_used > q->buffer_max_used)
q->buffer_max_used = q->buffer_used;
- if (q->buffer_used > q->buffer_limit) {
- bool same_flow = false;
- u32 dropped = 0;
- u32 drop_id;
+ if (q->buffer_used <= q->buffer_limit)
+ return NET_XMIT_SUCCESS;
- while (q->buffer_used > q->buffer_limit) {
- dropped++;
- drop_id = cake_drop(sch, to_free);
+ prev_qlen = sch->q.qlen;
+ prev_backlog = sch->qstats.backlog;
- if ((drop_id >> 16) == tin &&
- (drop_id & 0xFFFF) == idx)
- same_flow = true;
- }
- b->drop_overlimit += dropped;
+ while (q->buffer_used > q->buffer_limit) {
+ drop_id = cake_drop(sch, to_free);
+ if ((drop_id >> 16) == tin &&
+ (drop_id & 0xFFFF) == idx)
+ same_flow = true;
+ }
+
+ prev_qlen -= sch->q.qlen;
+ prev_backlog -= sch->qstats.backlog;
+ b->drop_overlimit += prev_qlen;
- if (same_flow)
- return NET_XMIT_CN;
+ if (same_flow) {
+ qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
+ prev_backlog - len);
+ return NET_XMIT_CN;
}
+ qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
return NET_XMIT_SUCCESS;
}
@@ -2188,7 +2189,7 @@ retry:
b->tin_dropped++;
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
qdisc_qstats_drop(sch);
- kfree_skb_reason(skb, reason);
+ qdisc_dequeue_drop(sch, skb, reason);
if (q->rate_flags & CAKE_FLAG_INGRESS)
goto retry;
}
@@ -2729,6 +2730,8 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
int i, j, err;
sch->limit = 10240;
+ sch->flags |= TCQ_F_DEQUEUE_DROPS;
+
q->tin_mode = CAKE_DIFFSERV_DIFFSERV3;
q->flow_mode = CAKE_FLOW_TRIPLE;