aboutsummaryrefslogtreecommitdiff
path: root/net/sched/sch_hhf.c
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@linaro.org>2016-07-17 12:16:24 +0800
committerAlex Shi <alex.shi@linaro.org>2016-07-17 12:16:24 +0800
commitfe4c808f845e2ff3a3fb6c66009ab69c8d01bf57 (patch)
tree82ce8b6476c1c4c84ab05a49c94317f1ce8ac1b7 /net/sched/sch_hhf.c
parentbbe8f0ee88a1eb4bb1e72e593b008faf387f30ac (diff)
parent0ac0a856d986c1ab240753479f5e50fdfab82b14 (diff)
Merge tag 'v3.18.37' into linux-linaro-lsk-v3.18lsk-v3.18-16.07
This is the 3.18.37 stable release
Diffstat (limited to 'net/sched/sch_hhf.c')
-rw-r--r--net/sched/sch_hhf.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 15d3aabfe250..792c6f330f77 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -390,6 +390,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
struct hhf_sched_data *q = qdisc_priv(sch);
enum wdrr_bucket_idx idx;
struct wdrr_bucket *bucket;
+ unsigned int prev_backlog;
idx = hhf_classify(skb, sch);
@@ -417,6 +418,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (++sch->q.qlen <= sch->limit)
return NET_XMIT_SUCCESS;
+ prev_backlog = sch->qstats.backlog;
q->drop_overlimit++;
/* Return Congestion Notification only if we dropped a packet from this
* bucket.
@@ -425,7 +427,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_CN;
/* As we dropped a packet, better let upper stack know this. */
- qdisc_tree_decrease_qlen(sch, 1);
+ qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
return NET_XMIT_SUCCESS;
}
@@ -535,7 +537,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
{
struct hhf_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_HHF_MAX + 1];
- unsigned int qlen;
+ unsigned int qlen, prev_backlog;
int err;
u64 non_hh_quantum;
u32 new_quantum = q->quantum;
@@ -585,12 +587,14 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
}
qlen = sch->q.qlen;
+ prev_backlog = sch->qstats.backlog;
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = hhf_dequeue(sch);
kfree_skb(skb);
}
- qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+ qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
+ prev_backlog - sch->qstats.backlog);
sch_tree_unlock(sch);
return 0;