From fecd715ef820d36e262c04ffba5d0d8968ce1ddb Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 7 Jun 2016 14:11:13 +0200 Subject: [PATCH] kernel: merge pending fq_codel backlog accounting fix Signed-off-by: Felix Fietkau --- ...35-fq_codel-fix-NET_XMIT_CN-behavior.patch | 70 +++++++++++++++++++ .../patches-4.4/660-fq_codel_defaults.patch | 2 +- .../661-fq_codel_keep_dropped_stats.patch | 2 +- .../662-use_fq_codel_by_default.patch | 4 +- 4 files changed, 74 insertions(+), 4 deletions(-) create mode 100644 target/linux/generic/patches-4.4/035-fq_codel-fix-NET_XMIT_CN-behavior.patch diff --git a/target/linux/generic/patches-4.4/035-fq_codel-fix-NET_XMIT_CN-behavior.patch b/target/linux/generic/patches-4.4/035-fq_codel-fix-NET_XMIT_CN-behavior.patch new file mode 100644 index 0000000000..a1902fea07 --- /dev/null +++ b/target/linux/generic/patches-4.4/035-fq_codel-fix-NET_XMIT_CN-behavior.patch @@ -0,0 +1,70 @@ +From: Eric Dumazet +Date: Sat, 4 Jun 2016 12:55:13 -0700 +Subject: [PATCH] fq_codel: fix NET_XMIT_CN behavior + +My prior attempt to fix the backlogs of parents failed. + +If we return NET_XMIT_CN, our parents wont increase their backlog, +so our qdisc_tree_reduce_backlog() should take this into account. + +v2: Florian Westphal pointed out that we could drop the packet, +so we need to save qdisc_pkt_len(skb) in a temp variable before +calling fq_codel_drop() + +Fixes: 9d18562a2278 ("fq_codel: add batch ability to fq_codel_drop()") +Fixes: 2ccccf5fb43f ("net_sched: update hierarchical backlog too") +Reported-by: Stas Nichiporovich +Signed-off-by: Eric Dumazet +Cc: WANG Cong +Cc: Jamal Hadi Salim +--- + +--- a/net/sched/sch_fq_codel.c ++++ b/net/sched/sch_fq_codel.c +@@ -197,6 +197,7 @@ static int fq_codel_enqueue(struct sk_bu + unsigned int idx, prev_backlog, prev_qlen; + struct fq_codel_flow *flow; + int uninitialized_var(ret); ++ unsigned int pkt_len; + bool memory_limited; + + idx = fq_codel_classify(skb, sch, &ret); +@@ -228,6 +229,8 @@ static int fq_codel_enqueue(struct sk_bu + prev_backlog = sch->qstats.backlog; + prev_qlen = sch->q.qlen; + ++ /* save this packet length as it might be dropped by fq_codel_drop() */ ++ pkt_len = qdisc_pkt_len(skb); + /* fq_codel_drop() is quite expensive, as it performs a linear search + * in q->backlogs[] to find a fat flow. + * So instead of dropping a single packet, drop half of its backlog +@@ -235,14 +238,23 @@ static int fq_codel_enqueue(struct sk_bu + */ + ret = fq_codel_drop(sch, q->drop_batch_size); + +- q->drop_overlimit += prev_qlen - sch->q.qlen; ++ prev_qlen -= sch->q.qlen; ++ prev_backlog -= sch->qstats.backlog; ++ q->drop_overlimit += prev_qlen; + if (memory_limited) +- q->drop_overmemory += prev_qlen - sch->q.qlen; +- /* As we dropped packet(s), better let upper stack know this */ +- qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen, +- prev_backlog - sch->qstats.backlog); ++ q->drop_overmemory += prev_qlen; + +- return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS; ++ /* As we dropped packet(s), better let upper stack know this. ++ * If we dropped a packet for this flow, return NET_XMIT_CN, ++ * but in this case, our parents wont increase their backlogs. ++ */ ++ if (ret == idx) { ++ qdisc_tree_reduce_backlog(sch, prev_qlen - 1, ++ prev_backlog - pkt_len); ++ return NET_XMIT_CN; ++ } ++ qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog); ++ return NET_XMIT_SUCCESS; + } + + /* This is the specific function called from codel_dequeue() diff --git a/target/linux/generic/patches-4.4/660-fq_codel_defaults.patch b/target/linux/generic/patches-4.4/660-fq_codel_defaults.patch index 048476ba48..46fceffcf1 100644 --- a/target/linux/generic/patches-4.4/660-fq_codel_defaults.patch +++ b/target/linux/generic/patches-4.4/660-fq_codel_defaults.patch @@ -1,6 +1,6 @@ --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c -@@ -459,7 +459,7 @@ static int fq_codel_init(struct Qdisc *s +@@ -471,7 +471,7 @@ static int fq_codel_init(struct Qdisc *s sch->limit = 10*1024; q->flows_cnt = 1024; diff --git a/target/linux/generic/patches-4.4/661-fq_codel_keep_dropped_stats.patch b/target/linux/generic/patches-4.4/661-fq_codel_keep_dropped_stats.patch index 30907229ce..3cb950c0db 100644 --- a/target/linux/generic/patches-4.4/661-fq_codel_keep_dropped_stats.patch +++ b/target/linux/generic/patches-4.4/661-fq_codel_keep_dropped_stats.patch @@ -1,6 +1,6 @@ --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c -@@ -218,7 +218,6 @@ static int fq_codel_enqueue(struct sk_bu +@@ -219,7 +219,6 @@ static int fq_codel_enqueue(struct sk_bu list_add_tail(&flow->flowchain, &q->new_flows); q->new_flow_count++; flow->deficit = q->quantum; diff --git a/target/linux/generic/patches-4.4/662-use_fq_codel_by_default.patch b/target/linux/generic/patches-4.4/662-use_fq_codel_by_default.patch index a56dec330a..8952ab4e20 100644 --- a/target/linux/generic/patches-4.4/662-use_fq_codel_by_default.patch +++ b/target/linux/generic/patches-4.4/662-use_fq_codel_by_default.patch @@ -13,7 +13,7 @@ device, it has to decide which ones to send first, which ones to --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c -@@ -676,7 +676,7 @@ static const struct Qdisc_class_ops fq_c +@@ -688,7 +688,7 @@ static const struct Qdisc_class_ops fq_c .walk = fq_codel_walk, }; @@ -22,7 +22,7 @@ .cl_ops = &fq_codel_class_ops, .id = "fq_codel", .priv_size = sizeof(struct fq_codel_sched_data), -@@ -692,6 +692,7 @@ static struct Qdisc_ops fq_codel_qdisc_o +@@ -704,6 +704,7 @@ static struct Qdisc_ops fq_codel_qdisc_o .dump_stats = fq_codel_dump_stats, .owner = THIS_MODULE, }; -- 2.25.1