kernel: fix unaligned access issue in the bridge multicast-to-unicast patch
[oweals/openwrt.git] / target / linux / generic / patches-4.4 / 031-net_sched-update-hierarchical-backlog-too.patch
1 From: WANG Cong <xiyou.wangcong@gmail.com>
2 Date: Thu, 25 Feb 2016 14:55:01 -0800
3 Subject: [PATCH] net_sched: update hierarchical backlog too
4
5 When the bottom qdisc decides to, for example, drop some packet,
6 it calls qdisc_tree_decrease_qlen() to update the queue length
7 for all its ancestors, we need to update the backlog too to
8 keep the stats on root qdisc accurate.
9
10 Cc: Jamal Hadi Salim <jhs@mojatatu.com>
11 Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
12 Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
13 Signed-off-by: David S. Miller <davem@davemloft.net>
14 ---
15
16 --- a/include/net/codel.h
17 +++ b/include/net/codel.h
18 @@ -162,12 +162,14 @@ struct codel_vars {
19   * struct codel_stats - contains codel shared variables and stats
20   * @maxpacket: largest packet we've seen so far
21   * @drop_count:        temp count of dropped packets in dequeue()
22 + * @drop_len:  bytes of dropped packets in dequeue()
23   * ecn_mark:   number of packets we ECN marked instead of dropping
24   * ce_mark:    number of packets CE marked because sojourn time was above ce_threshold
25   */
26  struct codel_stats {
27         u32             maxpacket;
28         u32             drop_count;
29 +       u32             drop_len;
30         u32             ecn_mark;
31         u32             ce_mark;
32  };
33 @@ -308,6 +310,7 @@ static struct sk_buff *codel_dequeue(str
34                                                                   vars->rec_inv_sqrt);
35                                         goto end;
36                                 }
37 +                               stats->drop_len += qdisc_pkt_len(skb);
38                                 qdisc_drop(skb, sch);
39                                 stats->drop_count++;
40                                 skb = dequeue_func(vars, sch);
41 @@ -330,6 +333,7 @@ static struct sk_buff *codel_dequeue(str
42                 if (params->ecn && INET_ECN_set_ce(skb)) {
43                         stats->ecn_mark++;
44                 } else {
45 +                       stats->drop_len += qdisc_pkt_len(skb);
46                         qdisc_drop(skb, sch);
47                         stats->drop_count++;
48  
49 --- a/include/net/sch_generic.h
50 +++ b/include/net/sch_generic.h
51 @@ -396,7 +396,8 @@ struct Qdisc *dev_graft_qdisc(struct net
52                               struct Qdisc *qdisc);
53  void qdisc_reset(struct Qdisc *qdisc);
54  void qdisc_destroy(struct Qdisc *qdisc);
55 -void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
56 +void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
57 +                              unsigned int len);
58  struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
59                           const struct Qdisc_ops *ops);
60  struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
61 @@ -707,7 +708,7 @@ static inline struct Qdisc *qdisc_replac
62         old = *pold;
63         *pold = new;
64         if (old != NULL) {
65 -               qdisc_tree_decrease_qlen(old, old->q.qlen);
66 +               qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
67                 qdisc_reset(old);
68         }
69         sch_tree_unlock(sch);
70 --- a/net/sched/sch_api.c
71 +++ b/net/sched/sch_api.c
72 @@ -744,14 +744,15 @@ static u32 qdisc_alloc_handle(struct net
73         return 0;
74  }
75  
76 -void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
77 +void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
78 +                              unsigned int len)
79  {
80         const struct Qdisc_class_ops *cops;
81         unsigned long cl;
82         u32 parentid;
83         int drops;
84  
85 -       if (n == 0)
86 +       if (n == 0 && len == 0)
87                 return;
88         drops = max_t(int, n, 0);
89         rcu_read_lock();
90 @@ -774,11 +775,12 @@ void qdisc_tree_decrease_qlen(struct Qdi
91                         cops->put(sch, cl);
92                 }
93                 sch->q.qlen -= n;
94 +               sch->qstats.backlog -= len;
95                 __qdisc_qstats_drop(sch, drops);
96         }
97         rcu_read_unlock();
98  }
99 -EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
100 +EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
101  
102  static void notify_and_destroy(struct net *net, struct sk_buff *skb,
103                                struct nlmsghdr *n, u32 clid,
104 --- a/net/sched/sch_cbq.c
105 +++ b/net/sched/sch_cbq.c
106 @@ -1909,7 +1909,7 @@ static int cbq_delete(struct Qdisc *sch,
107  {
108         struct cbq_sched_data *q = qdisc_priv(sch);
109         struct cbq_class *cl = (struct cbq_class *)arg;
110 -       unsigned int qlen;
111 +       unsigned int qlen, backlog;
112  
113         if (cl->filters || cl->children || cl == &q->link)
114                 return -EBUSY;
115 @@ -1917,8 +1917,9 @@ static int cbq_delete(struct Qdisc *sch,
116         sch_tree_lock(sch);
117  
118         qlen = cl->q->q.qlen;
119 +       backlog = cl->q->qstats.backlog;
120         qdisc_reset(cl->q);
121 -       qdisc_tree_decrease_qlen(cl->q, qlen);
122 +       qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
123  
124         if (cl->next_alive)
125                 cbq_deactivate_class(cl);
126 --- a/net/sched/sch_choke.c
127 +++ b/net/sched/sch_choke.c
128 @@ -128,8 +128,8 @@ static void choke_drop_by_idx(struct Qdi
129                 choke_zap_tail_holes(q);
130  
131         qdisc_qstats_backlog_dec(sch, skb);
132 +       qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
133         qdisc_drop(skb, sch);
134 -       qdisc_tree_decrease_qlen(sch, 1);
135         --sch->q.qlen;
136  }
137  
138 @@ -456,6 +456,7 @@ static int choke_change(struct Qdisc *sc
139                 old = q->tab;
140                 if (old) {
141                         unsigned int oqlen = sch->q.qlen, tail = 0;
142 +                       unsigned dropped = 0;
143  
144                         while (q->head != q->tail) {
145                                 struct sk_buff *skb = q->tab[q->head];
146 @@ -467,11 +468,12 @@ static int choke_change(struct Qdisc *sc
147                                         ntab[tail++] = skb;
148                                         continue;
149                                 }
150 +                               dropped += qdisc_pkt_len(skb);
151                                 qdisc_qstats_backlog_dec(sch, skb);
152                                 --sch->q.qlen;
153                                 qdisc_drop(skb, sch);
154                         }
155 -                       qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
156 +                       qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
157                         q->head = 0;
158                         q->tail = tail;
159                 }
160 --- a/net/sched/sch_codel.c
161 +++ b/net/sched/sch_codel.c
162 @@ -79,12 +79,13 @@ static struct sk_buff *codel_qdisc_deque
163  
164         skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
165  
166 -       /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
167 +       /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
168          * or HTB crashes. Defer it for next round.
169          */
170         if (q->stats.drop_count && sch->q.qlen) {
171 -               qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
172 +               qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
173                 q->stats.drop_count = 0;
174 +               q->stats.drop_len = 0;
175         }
176         if (skb)
177                 qdisc_bstats_update(sch, skb);
178 @@ -116,7 +117,7 @@ static int codel_change(struct Qdisc *sc
179  {
180         struct codel_sched_data *q = qdisc_priv(sch);
181         struct nlattr *tb[TCA_CODEL_MAX + 1];
182 -       unsigned int qlen;
183 +       unsigned int qlen, dropped = 0;
184         int err;
185  
186         if (!opt)
187 @@ -156,10 +157,11 @@ static int codel_change(struct Qdisc *sc
188         while (sch->q.qlen > sch->limit) {
189                 struct sk_buff *skb = __skb_dequeue(&sch->q);
190  
191 +               dropped += qdisc_pkt_len(skb);
192                 qdisc_qstats_backlog_dec(sch, skb);
193                 qdisc_drop(skb, sch);
194         }
195 -       qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
196 +       qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
197  
198         sch_tree_unlock(sch);
199         return 0;
200 --- a/net/sched/sch_drr.c
201 +++ b/net/sched/sch_drr.c
202 @@ -53,9 +53,10 @@ static struct drr_class *drr_find_class(
203  static void drr_purge_queue(struct drr_class *cl)
204  {
205         unsigned int len = cl->qdisc->q.qlen;
206 +       unsigned int backlog = cl->qdisc->qstats.backlog;
207  
208         qdisc_reset(cl->qdisc);
209 -       qdisc_tree_decrease_qlen(cl->qdisc, len);
210 +       qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
211  }
212  
213  static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
214 --- a/net/sched/sch_fq.c
215 +++ b/net/sched/sch_fq.c
216 @@ -662,6 +662,7 @@ static int fq_change(struct Qdisc *sch,
217         struct fq_sched_data *q = qdisc_priv(sch);
218         struct nlattr *tb[TCA_FQ_MAX + 1];
219         int err, drop_count = 0;
220 +       unsigned drop_len = 0;
221         u32 fq_log;
222  
223         if (!opt)
224 @@ -736,10 +737,11 @@ static int fq_change(struct Qdisc *sch,
225  
226                 if (!skb)
227                         break;
228 +               drop_len += qdisc_pkt_len(skb);
229                 kfree_skb(skb);
230                 drop_count++;
231         }
232 -       qdisc_tree_decrease_qlen(sch, drop_count);
233 +       qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
234  
235         sch_tree_unlock(sch);
236         return err;
237 --- a/net/sched/sch_fq_codel.c
238 +++ b/net/sched/sch_fq_codel.c
239 @@ -175,7 +175,7 @@ static unsigned int fq_codel_qdisc_drop(
240  static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
241  {
242         struct fq_codel_sched_data *q = qdisc_priv(sch);
243 -       unsigned int idx;
244 +       unsigned int idx, prev_backlog;
245         struct fq_codel_flow *flow;
246         int uninitialized_var(ret);
247  
248 @@ -203,6 +203,7 @@ static int fq_codel_enqueue(struct sk_bu
249         if (++sch->q.qlen <= sch->limit)
250                 return NET_XMIT_SUCCESS;
251  
252 +       prev_backlog = sch->qstats.backlog;
253         q->drop_overlimit++;
254         /* Return Congestion Notification only if we dropped a packet
255          * from this flow.
256 @@ -211,7 +212,7 @@ static int fq_codel_enqueue(struct sk_bu
257                 return NET_XMIT_CN;
258  
259         /* As we dropped a packet, better let upper stack know this */
260 -       qdisc_tree_decrease_qlen(sch, 1);
261 +       qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
262         return NET_XMIT_SUCCESS;
263  }
264  
265 @@ -241,6 +242,7 @@ static struct sk_buff *fq_codel_dequeue(
266         struct fq_codel_flow *flow;
267         struct list_head *head;
268         u32 prev_drop_count, prev_ecn_mark;
269 +       unsigned int prev_backlog;
270  
271  begin:
272         head = &q->new_flows;
273 @@ -259,6 +261,7 @@ begin:
274  
275         prev_drop_count = q->cstats.drop_count;
276         prev_ecn_mark = q->cstats.ecn_mark;
277 +       prev_backlog = sch->qstats.backlog;
278  
279         skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
280                             dequeue);
281 @@ -276,12 +279,14 @@ begin:
282         }
283         qdisc_bstats_update(sch, skb);
284         flow->deficit -= qdisc_pkt_len(skb);
285 -       /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
286 +       /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
287          * or HTB crashes. Defer it for next round.
288          */
289         if (q->cstats.drop_count && sch->q.qlen) {
290 -               qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
291 +               qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
292 +                                         q->cstats.drop_len);
293                 q->cstats.drop_count = 0;
294 +               q->cstats.drop_len = 0;
295         }
296         return skb;
297  }
298 @@ -372,11 +377,13 @@ static int fq_codel_change(struct Qdisc
299         while (sch->q.qlen > sch->limit) {
300                 struct sk_buff *skb = fq_codel_dequeue(sch);
301  
302 +               q->cstats.drop_len += qdisc_pkt_len(skb);
303                 kfree_skb(skb);
304                 q->cstats.drop_count++;
305         }
306 -       qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
307 +       qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
308         q->cstats.drop_count = 0;
309 +       q->cstats.drop_len = 0;
310  
311         sch_tree_unlock(sch);
312         return 0;
313 --- a/net/sched/sch_hfsc.c
314 +++ b/net/sched/sch_hfsc.c
315 @@ -895,9 +895,10 @@ static void
316  hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
317  {
318         unsigned int len = cl->qdisc->q.qlen;
319 +       unsigned int backlog = cl->qdisc->qstats.backlog;
320  
321         qdisc_reset(cl->qdisc);
322 -       qdisc_tree_decrease_qlen(cl->qdisc, len);
323 +       qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
324  }
325  
326  static void
327 --- a/net/sched/sch_hhf.c
328 +++ b/net/sched/sch_hhf.c
329 @@ -382,6 +382,7 @@ static int hhf_enqueue(struct sk_buff *s
330         struct hhf_sched_data *q = qdisc_priv(sch);
331         enum wdrr_bucket_idx idx;
332         struct wdrr_bucket *bucket;
333 +       unsigned int prev_backlog;
334  
335         idx = hhf_classify(skb, sch);
336  
337 @@ -409,6 +410,7 @@ static int hhf_enqueue(struct sk_buff *s
338         if (++sch->q.qlen <= sch->limit)
339                 return NET_XMIT_SUCCESS;
340  
341 +       prev_backlog = sch->qstats.backlog;
342         q->drop_overlimit++;
343         /* Return Congestion Notification only if we dropped a packet from this
344          * bucket.
345 @@ -417,7 +419,7 @@ static int hhf_enqueue(struct sk_buff *s
346                 return NET_XMIT_CN;
347  
348         /* As we dropped a packet, better let upper stack know this. */
349 -       qdisc_tree_decrease_qlen(sch, 1);
350 +       qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
351         return NET_XMIT_SUCCESS;
352  }
353  
354 @@ -527,7 +529,7 @@ static int hhf_change(struct Qdisc *sch,
355  {
356         struct hhf_sched_data *q = qdisc_priv(sch);
357         struct nlattr *tb[TCA_HHF_MAX + 1];
358 -       unsigned int qlen;
359 +       unsigned int qlen, prev_backlog;
360         int err;
361         u64 non_hh_quantum;
362         u32 new_quantum = q->quantum;
363 @@ -577,12 +579,14 @@ static int hhf_change(struct Qdisc *sch,
364         }
365  
366         qlen = sch->q.qlen;
367 +       prev_backlog = sch->qstats.backlog;
368         while (sch->q.qlen > sch->limit) {
369                 struct sk_buff *skb = hhf_dequeue(sch);
370  
371                 kfree_skb(skb);
372         }
373 -       qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
374 +       qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
375 +                                 prev_backlog - sch->qstats.backlog);
376  
377         sch_tree_unlock(sch);
378         return 0;
379 --- a/net/sched/sch_htb.c
380 +++ b/net/sched/sch_htb.c
381 @@ -1265,7 +1265,6 @@ static int htb_delete(struct Qdisc *sch,
382  {
383         struct htb_sched *q = qdisc_priv(sch);
384         struct htb_class *cl = (struct htb_class *)arg;
385 -       unsigned int qlen;
386         struct Qdisc *new_q = NULL;
387         int last_child = 0;
388  
389 @@ -1285,9 +1284,11 @@ static int htb_delete(struct Qdisc *sch,
390         sch_tree_lock(sch);
391  
392         if (!cl->level) {
393 -               qlen = cl->un.leaf.q->q.qlen;
394 +               unsigned int qlen = cl->un.leaf.q->q.qlen;
395 +               unsigned int backlog = cl->un.leaf.q->qstats.backlog;
396 +
397                 qdisc_reset(cl->un.leaf.q);
398 -               qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
399 +               qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
400         }
401  
402         /* delete from hash and active; remainder in destroy_class */
403 @@ -1421,10 +1422,11 @@ static int htb_change_class(struct Qdisc
404                 sch_tree_lock(sch);
405                 if (parent && !parent->level) {
406                         unsigned int qlen = parent->un.leaf.q->q.qlen;
407 +                       unsigned int backlog = parent->un.leaf.q->qstats.backlog;
408  
409                         /* turn parent into inner node */
410                         qdisc_reset(parent->un.leaf.q);
411 -                       qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
412 +                       qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
413                         qdisc_destroy(parent->un.leaf.q);
414                         if (parent->prio_activity)
415                                 htb_deactivate(q, parent);
416 --- a/net/sched/sch_multiq.c
417 +++ b/net/sched/sch_multiq.c
418 @@ -218,7 +218,8 @@ static int multiq_tune(struct Qdisc *sch
419                 if (q->queues[i] != &noop_qdisc) {
420                         struct Qdisc *child = q->queues[i];
421                         q->queues[i] = &noop_qdisc;
422 -                       qdisc_tree_decrease_qlen(child, child->q.qlen);
423 +                       qdisc_tree_reduce_backlog(child, child->q.qlen,
424 +                                                 child->qstats.backlog);
425                         qdisc_destroy(child);
426                 }
427         }
428 @@ -238,8 +239,9 @@ static int multiq_tune(struct Qdisc *sch
429                                 q->queues[i] = child;
430  
431                                 if (old != &noop_qdisc) {
432 -                                       qdisc_tree_decrease_qlen(old,
433 -                                                                old->q.qlen);
434 +                                       qdisc_tree_reduce_backlog(old,
435 +                                                                 old->q.qlen,
436 +                                                                 old->qstats.backlog);
437                                         qdisc_destroy(old);
438                                 }
439                                 sch_tree_unlock(sch);
440 --- a/net/sched/sch_netem.c
441 +++ b/net/sched/sch_netem.c
442 @@ -598,7 +598,8 @@ deliver:
443                                 if (unlikely(err != NET_XMIT_SUCCESS)) {
444                                         if (net_xmit_drop_count(err)) {
445                                                 qdisc_qstats_drop(sch);
446 -                                               qdisc_tree_decrease_qlen(sch, 1);
447 +                                               qdisc_tree_reduce_backlog(sch, 1,
448 +                                                                         qdisc_pkt_len(skb));
449                                         }
450                                 }
451                                 goto tfifo_dequeue;
452 --- a/net/sched/sch_pie.c
453 +++ b/net/sched/sch_pie.c
454 @@ -183,7 +183,7 @@ static int pie_change(struct Qdisc *sch,
455  {
456         struct pie_sched_data *q = qdisc_priv(sch);
457         struct nlattr *tb[TCA_PIE_MAX + 1];
458 -       unsigned int qlen;
459 +       unsigned int qlen, dropped = 0;
460         int err;
461  
462         if (!opt)
463 @@ -232,10 +232,11 @@ static int pie_change(struct Qdisc *sch,
464         while (sch->q.qlen > sch->limit) {
465                 struct sk_buff *skb = __skb_dequeue(&sch->q);
466  
467 +               dropped += qdisc_pkt_len(skb);
468                 qdisc_qstats_backlog_dec(sch, skb);
469                 qdisc_drop(skb, sch);
470         }
471 -       qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
472 +       qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
473  
474         sch_tree_unlock(sch);
475         return 0;
476 --- a/net/sched/sch_prio.c
477 +++ b/net/sched/sch_prio.c
478 @@ -191,7 +191,7 @@ static int prio_tune(struct Qdisc *sch,
479                 struct Qdisc *child = q->queues[i];
480                 q->queues[i] = &noop_qdisc;
481                 if (child != &noop_qdisc) {
482 -                       qdisc_tree_decrease_qlen(child, child->q.qlen);
483 +                       qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
484                         qdisc_destroy(child);
485                 }
486         }
487 @@ -210,8 +210,9 @@ static int prio_tune(struct Qdisc *sch,
488                                 q->queues[i] = child;
489  
490                                 if (old != &noop_qdisc) {
491 -                                       qdisc_tree_decrease_qlen(old,
492 -                                                                old->q.qlen);
493 +                                       qdisc_tree_reduce_backlog(old,
494 +                                                                 old->q.qlen,
495 +                                                                 old->qstats.backlog);
496                                         qdisc_destroy(old);
497                                 }
498                                 sch_tree_unlock(sch);
499 --- a/net/sched/sch_qfq.c
500 +++ b/net/sched/sch_qfq.c
501 @@ -220,9 +220,10 @@ static struct qfq_class *qfq_find_class(
502  static void qfq_purge_queue(struct qfq_class *cl)
503  {
504         unsigned int len = cl->qdisc->q.qlen;
505 +       unsigned int backlog = cl->qdisc->qstats.backlog;
506  
507         qdisc_reset(cl->qdisc);
508 -       qdisc_tree_decrease_qlen(cl->qdisc, len);
509 +       qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
510  }
511  
512  static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
513 --- a/net/sched/sch_red.c
514 +++ b/net/sched/sch_red.c
515 @@ -210,7 +210,8 @@ static int red_change(struct Qdisc *sch,
516         q->flags = ctl->flags;
517         q->limit = ctl->limit;
518         if (child) {
519 -               qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
520 +               qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
521 +                                         q->qdisc->qstats.backlog);
522                 qdisc_destroy(q->qdisc);
523                 q->qdisc = child;
524         }
525 --- a/net/sched/sch_sfb.c
526 +++ b/net/sched/sch_sfb.c
527 @@ -510,7 +510,8 @@ static int sfb_change(struct Qdisc *sch,
528  
529         sch_tree_lock(sch);
530  
531 -       qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
532 +       qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
533 +                                 q->qdisc->qstats.backlog);
534         qdisc_destroy(q->qdisc);
535         q->qdisc = child;
536  
537 --- a/net/sched/sch_sfq.c
538 +++ b/net/sched/sch_sfq.c
539 @@ -346,7 +346,7 @@ static int
540  sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
541  {
542         struct sfq_sched_data *q = qdisc_priv(sch);
543 -       unsigned int hash;
544 +       unsigned int hash, dropped;
545         sfq_index x, qlen;
546         struct sfq_slot *slot;
547         int uninitialized_var(ret);
548 @@ -461,7 +461,7 @@ enqueue:
549                 return NET_XMIT_SUCCESS;
550  
551         qlen = slot->qlen;
552 -       sfq_drop(sch);
553 +       dropped = sfq_drop(sch);
554         /* Return Congestion Notification only if we dropped a packet
555          * from this flow.
556          */
557 @@ -469,7 +469,7 @@ enqueue:
558                 return NET_XMIT_CN;
559  
560         /* As we dropped a packet, better let upper stack know this */
561 -       qdisc_tree_decrease_qlen(sch, 1);
562 +       qdisc_tree_reduce_backlog(sch, 1, dropped);
563         return NET_XMIT_SUCCESS;
564  }
565  
566 @@ -537,6 +537,7 @@ static void sfq_rehash(struct Qdisc *sch
567         struct sfq_slot *slot;
568         struct sk_buff_head list;
569         int dropped = 0;
570 +       unsigned int drop_len = 0;
571  
572         __skb_queue_head_init(&list);
573  
574 @@ -565,6 +566,7 @@ static void sfq_rehash(struct Qdisc *sch
575                         if (x >= SFQ_MAX_FLOWS) {
576  drop:
577                                 qdisc_qstats_backlog_dec(sch, skb);
578 +                               drop_len += qdisc_pkt_len(skb);
579                                 kfree_skb(skb);
580                                 dropped++;
581                                 continue;
582 @@ -594,7 +596,7 @@ drop:
583                 }
584         }
585         sch->q.qlen -= dropped;
586 -       qdisc_tree_decrease_qlen(sch, dropped);
587 +       qdisc_tree_reduce_backlog(sch, dropped, drop_len);
588  }
589  
590  static void sfq_perturbation(unsigned long arg)
591 @@ -618,7 +620,7 @@ static int sfq_change(struct Qdisc *sch,
592         struct sfq_sched_data *q = qdisc_priv(sch);
593         struct tc_sfq_qopt *ctl = nla_data(opt);
594         struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
595 -       unsigned int qlen;
596 +       unsigned int qlen, dropped = 0;
597         struct red_parms *p = NULL;
598  
599         if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
600 @@ -667,8 +669,8 @@ static int sfq_change(struct Qdisc *sch,
601  
602         qlen = sch->q.qlen;
603         while (sch->q.qlen > q->limit)
604 -               sfq_drop(sch);
605 -       qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
606 +               dropped += sfq_drop(sch);
607 +       qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
608  
609         del_timer(&q->perturb_timer);
610         if (q->perturb_period) {
611 --- a/net/sched/sch_tbf.c
612 +++ b/net/sched/sch_tbf.c
613 @@ -160,6 +160,7 @@ static int tbf_segment(struct sk_buff *s
614         struct tbf_sched_data *q = qdisc_priv(sch);
615         struct sk_buff *segs, *nskb;
616         netdev_features_t features = netif_skb_features(skb);
617 +       unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
618         int ret, nb;
619  
620         segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
621 @@ -172,6 +173,7 @@ static int tbf_segment(struct sk_buff *s
622                 nskb = segs->next;
623                 segs->next = NULL;
624                 qdisc_skb_cb(segs)->pkt_len = segs->len;
625 +               len += segs->len;
626                 ret = qdisc_enqueue(segs, q->qdisc);
627                 if (ret != NET_XMIT_SUCCESS) {
628                         if (net_xmit_drop_count(ret))
629 @@ -183,7 +185,7 @@ static int tbf_segment(struct sk_buff *s
630         }
631         sch->q.qlen += nb;
632         if (nb > 1)
633 -               qdisc_tree_decrease_qlen(sch, 1 - nb);
634 +               qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
635         consume_skb(skb);
636         return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
637  }
638 @@ -399,7 +401,8 @@ static int tbf_change(struct Qdisc *sch,
639  
640         sch_tree_lock(sch);
641         if (child) {
642 -               qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
643 +               qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
644 +                                         q->qdisc->qstats.backlog);
645                 qdisc_destroy(q->qdisc);
646                 q->qdisc = child;
647         }