Code Review
/
kvmfornfv.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
review
|
tree
raw
|
inline
| side by side
Upgrade to 4.4.50-rt62
[kvmfornfv.git]
/
kernel
/
net
/
sched
/
sch_fq_codel.c
diff --git
a/kernel/net/sched/sch_fq_codel.c
b/kernel/net/sched/sch_fq_codel.c
index
4c834e9
..
d3fc8f9
100644
(file)
--- a/
kernel/net/sched/sch_fq_codel.c
+++ b/
kernel/net/sched/sch_fq_codel.c
@@
-175,7
+175,7
@@
static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
- unsigned int idx;
+ unsigned int idx
, prev_backlog
;
struct fq_codel_flow *flow;
int uninitialized_var(ret);
struct fq_codel_flow *flow;
int uninitialized_var(ret);
@@
-203,6
+203,7
@@
static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (++sch->q.qlen <= sch->limit)
return NET_XMIT_SUCCESS;
if (++sch->q.qlen <= sch->limit)
return NET_XMIT_SUCCESS;
+ prev_backlog = sch->qstats.backlog;
q->drop_overlimit++;
/* Return Congestion Notification only if we dropped a packet
* from this flow.
q->drop_overlimit++;
/* Return Congestion Notification only if we dropped a packet
* from this flow.
@@
-211,7
+212,7
@@
static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_CN;
/* As we dropped a packet, better let upper stack know this */
return NET_XMIT_CN;
/* As we dropped a packet, better let upper stack know this */
- qdisc_tree_
decrease_qlen(sch, 1
);
+ qdisc_tree_
reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog
);
return NET_XMIT_SUCCESS;
}
return NET_XMIT_SUCCESS;
}
@@
-241,6
+242,7
@@
static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
struct fq_codel_flow *flow;
struct list_head *head;
u32 prev_drop_count, prev_ecn_mark;
struct fq_codel_flow *flow;
struct list_head *head;
u32 prev_drop_count, prev_ecn_mark;
+ unsigned int prev_backlog;
begin:
head = &q->new_flows;
begin:
head = &q->new_flows;
@@
-259,6
+261,7
@@
begin:
prev_drop_count = q->cstats.drop_count;
prev_ecn_mark = q->cstats.ecn_mark;
prev_drop_count = q->cstats.drop_count;
prev_ecn_mark = q->cstats.ecn_mark;
+ prev_backlog = sch->qstats.backlog;
skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
dequeue);
skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
dequeue);
@@
-276,12
+279,14
@@
begin:
}
qdisc_bstats_update(sch, skb);
flow->deficit -= qdisc_pkt_len(skb);
}
qdisc_bstats_update(sch, skb);
flow->deficit -= qdisc_pkt_len(skb);
- /* We cant call qdisc_tree_
decrease_qlen
() if our qlen is 0,
+ /* We cant call qdisc_tree_
reduce_backlog
() if our qlen is 0,
* or HTB crashes. Defer it for next round.
*/
if (q->cstats.drop_count && sch->q.qlen) {
* or HTB crashes. Defer it for next round.
*/
if (q->cstats.drop_count && sch->q.qlen) {
- qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
+ qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
+ q->cstats.drop_len);
q->cstats.drop_count = 0;
q->cstats.drop_count = 0;
+ q->cstats.drop_len = 0;
}
return skb;
}
}
return skb;
}
@@
-372,11
+377,13
@@
static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = fq_codel_dequeue(sch);
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = fq_codel_dequeue(sch);
+ q->cstats.drop_len += qdisc_pkt_len(skb);
kfree_skb(skb);
q->cstats.drop_count++;
}
kfree_skb(skb);
q->cstats.drop_count++;
}
- qdisc_tree_
decrease_qlen(sch, q->cstats.drop_count
);
+ qdisc_tree_
reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len
);
q->cstats.drop_count = 0;
q->cstats.drop_count = 0;
+ q->cstats.drop_len = 0;
sch_tree_unlock(sch);
return 0;
sch_tree_unlock(sch);
return 0;