Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / net / sched / sch_fq_codel.c
1 /*
2  * Fair Queue CoDel discipline
3  *
4  *      This program is free software; you can redistribute it and/or
5  *      modify it under the terms of the GNU General Public License
6  *      as published by the Free Software Foundation; either version
7  *      2 of the License, or (at your option) any later version.
8  *
9  *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
10  */
11
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/jiffies.h>
16 #include <linux/string.h>
17 #include <linux/in.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/skbuff.h>
21 #include <linux/jhash.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
26 #include <net/codel.h>
27
28 /*      Fair Queue CoDel.
29  *
30  * Principles :
31  * Packets are classified (internal classifier or external) on flows.
32  * This is a Stochastic model (as we use a hash, several flows
33  *                             might be hashed on same slot)
34  * Each flow has a CoDel managed queue.
35  * Flows are linked onto two (Round Robin) lists,
36  * so that new flows have priority on old ones.
37  *
38  * For a given flow, packets are not reordered (CoDel uses a FIFO)
39  * head drops only.
40  * ECN capability is on by default.
41  * Low memory footprint (64 bytes per flow)
42  */
43
44 struct fq_codel_flow {
45         struct sk_buff    *head;
46         struct sk_buff    *tail;
47         struct list_head  flowchain;
48         int               deficit;
49         u32               dropped; /* number of drops (or ECN marks) on this flow */
50         struct codel_vars cvars;
51 }; /* please try to keep this structure <= 64 bytes */
52
53 struct fq_codel_sched_data {
54         struct tcf_proto __rcu *filter_list; /* optional external classifier */
55         struct fq_codel_flow *flows;    /* Flows table [flows_cnt] */
56         u32             *backlogs;      /* backlog table [flows_cnt] */
57         u32             flows_cnt;      /* number of flows */
58         u32             perturbation;   /* hash perturbation */
59         u32             quantum;        /* psched_mtu(qdisc_dev(sch)); */
60         struct codel_params cparams;
61         struct codel_stats cstats;
62         u32             drop_overlimit;
63         u32             new_flow_count;
64
65         struct list_head new_flows;     /* list of new flows */
66         struct list_head old_flows;     /* list of old flows */
67 };
68
69 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
70                                   struct sk_buff *skb)
71 {
72         u32 hash = skb_get_hash_perturb(skb, q->perturbation);
73
74         return reciprocal_scale(hash, q->flows_cnt);
75 }
76
77 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
78                                       int *qerr)
79 {
80         struct fq_codel_sched_data *q = qdisc_priv(sch);
81         struct tcf_proto *filter;
82         struct tcf_result res;
83         int result;
84
85         if (TC_H_MAJ(skb->priority) == sch->handle &&
86             TC_H_MIN(skb->priority) > 0 &&
87             TC_H_MIN(skb->priority) <= q->flows_cnt)
88                 return TC_H_MIN(skb->priority);
89
90         filter = rcu_dereference_bh(q->filter_list);
91         if (!filter)
92                 return fq_codel_hash(q, skb) + 1;
93
94         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
95         result = tc_classify(skb, filter, &res, false);
96         if (result >= 0) {
97 #ifdef CONFIG_NET_CLS_ACT
98                 switch (result) {
99                 case TC_ACT_STOLEN:
100                 case TC_ACT_QUEUED:
101                         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
102                 case TC_ACT_SHOT:
103                         return 0;
104                 }
105 #endif
106                 if (TC_H_MIN(res.classid) <= q->flows_cnt)
107                         return TC_H_MIN(res.classid);
108         }
109         return 0;
110 }
111
112 /* helper functions : might be changed when/if skb use a standard list_head */
113
114 /* remove one skb from head of slot queue */
115 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
116 {
117         struct sk_buff *skb = flow->head;
118
119         flow->head = skb->next;
120         skb->next = NULL;
121         return skb;
122 }
123
124 /* add skb to flow queue (tail add) */
125 static inline void flow_queue_add(struct fq_codel_flow *flow,
126                                   struct sk_buff *skb)
127 {
128         if (flow->head == NULL)
129                 flow->head = skb;
130         else
131                 flow->tail->next = skb;
132         flow->tail = skb;
133         skb->next = NULL;
134 }
135
136 static unsigned int fq_codel_drop(struct Qdisc *sch)
137 {
138         struct fq_codel_sched_data *q = qdisc_priv(sch);
139         struct sk_buff *skb;
140         unsigned int maxbacklog = 0, idx = 0, i, len;
141         struct fq_codel_flow *flow;
142
143         /* Queue is full! Find the fat flow and drop packet from it.
144          * This might sound expensive, but with 1024 flows, we scan
145          * 4KB of memory, and we dont need to handle a complex tree
146          * in fast path (packet queue/enqueue) with many cache misses.
147          */
148         for (i = 0; i < q->flows_cnt; i++) {
149                 if (q->backlogs[i] > maxbacklog) {
150                         maxbacklog = q->backlogs[i];
151                         idx = i;
152                 }
153         }
154         flow = &q->flows[idx];
155         skb = dequeue_head(flow);
156         len = qdisc_pkt_len(skb);
157         q->backlogs[idx] -= len;
158         sch->q.qlen--;
159         qdisc_qstats_drop(sch);
160         qdisc_qstats_backlog_dec(sch, skb);
161         kfree_skb(skb);
162         flow->dropped++;
163         return idx;
164 }
165
166 static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
167 {
168         unsigned int prev_backlog;
169
170         prev_backlog = sch->qstats.backlog;
171         fq_codel_drop(sch);
172         return prev_backlog - sch->qstats.backlog;
173 }
174
175 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
176 {
177         struct fq_codel_sched_data *q = qdisc_priv(sch);
178         unsigned int idx, prev_backlog;
179         struct fq_codel_flow *flow;
180         int uninitialized_var(ret);
181
182         idx = fq_codel_classify(skb, sch, &ret);
183         if (idx == 0) {
184                 if (ret & __NET_XMIT_BYPASS)
185                         qdisc_qstats_drop(sch);
186                 kfree_skb(skb);
187                 return ret;
188         }
189         idx--;
190
191         codel_set_enqueue_time(skb);
192         flow = &q->flows[idx];
193         flow_queue_add(flow, skb);
194         q->backlogs[idx] += qdisc_pkt_len(skb);
195         qdisc_qstats_backlog_inc(sch, skb);
196
197         if (list_empty(&flow->flowchain)) {
198                 list_add_tail(&flow->flowchain, &q->new_flows);
199                 q->new_flow_count++;
200                 flow->deficit = q->quantum;
201                 flow->dropped = 0;
202         }
203         if (++sch->q.qlen <= sch->limit)
204                 return NET_XMIT_SUCCESS;
205
206         prev_backlog = sch->qstats.backlog;
207         q->drop_overlimit++;
208         /* Return Congestion Notification only if we dropped a packet
209          * from this flow.
210          */
211         if (fq_codel_drop(sch) == idx)
212                 return NET_XMIT_CN;
213
214         /* As we dropped a packet, better let upper stack know this */
215         qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
216         return NET_XMIT_SUCCESS;
217 }
218
219 /* This is the specific function called from codel_dequeue()
220  * to dequeue a packet from queue. Note: backlog is handled in
221  * codel, we dont need to reduce it here.
222  */
223 static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
224 {
225         struct fq_codel_sched_data *q = qdisc_priv(sch);
226         struct fq_codel_flow *flow;
227         struct sk_buff *skb = NULL;
228
229         flow = container_of(vars, struct fq_codel_flow, cvars);
230         if (flow->head) {
231                 skb = dequeue_head(flow);
232                 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
233                 sch->q.qlen--;
234         }
235         return skb;
236 }
237
238 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
239 {
240         struct fq_codel_sched_data *q = qdisc_priv(sch);
241         struct sk_buff *skb;
242         struct fq_codel_flow *flow;
243         struct list_head *head;
244         u32 prev_drop_count, prev_ecn_mark;
245         unsigned int prev_backlog;
246
247 begin:
248         head = &q->new_flows;
249         if (list_empty(head)) {
250                 head = &q->old_flows;
251                 if (list_empty(head))
252                         return NULL;
253         }
254         flow = list_first_entry(head, struct fq_codel_flow, flowchain);
255
256         if (flow->deficit <= 0) {
257                 flow->deficit += q->quantum;
258                 list_move_tail(&flow->flowchain, &q->old_flows);
259                 goto begin;
260         }
261
262         prev_drop_count = q->cstats.drop_count;
263         prev_ecn_mark = q->cstats.ecn_mark;
264         prev_backlog = sch->qstats.backlog;
265
266         skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
267                             dequeue);
268
269         flow->dropped += q->cstats.drop_count - prev_drop_count;
270         flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
271
272         if (!skb) {
273                 /* force a pass through old_flows to prevent starvation */
274                 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
275                         list_move_tail(&flow->flowchain, &q->old_flows);
276                 else
277                         list_del_init(&flow->flowchain);
278                 goto begin;
279         }
280         qdisc_bstats_update(sch, skb);
281         flow->deficit -= qdisc_pkt_len(skb);
282         /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
283          * or HTB crashes. Defer it for next round.
284          */
285         if (q->cstats.drop_count && sch->q.qlen) {
286                 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
287                                           q->cstats.drop_len);
288                 q->cstats.drop_count = 0;
289                 q->cstats.drop_len = 0;
290         }
291         return skb;
292 }
293
294 static void fq_codel_reset(struct Qdisc *sch)
295 {
296         struct fq_codel_sched_data *q = qdisc_priv(sch);
297         int i;
298
299         INIT_LIST_HEAD(&q->new_flows);
300         INIT_LIST_HEAD(&q->old_flows);
301         for (i = 0; i < q->flows_cnt; i++) {
302                 struct fq_codel_flow *flow = q->flows + i;
303
304                 while (flow->head) {
305                         struct sk_buff *skb = dequeue_head(flow);
306
307                         qdisc_qstats_backlog_dec(sch, skb);
308                         kfree_skb(skb);
309                 }
310
311                 INIT_LIST_HEAD(&flow->flowchain);
312                 codel_vars_init(&flow->cvars);
313         }
314         memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
315         sch->q.qlen = 0;
316 }
317
318 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
319         [TCA_FQ_CODEL_TARGET]   = { .type = NLA_U32 },
320         [TCA_FQ_CODEL_LIMIT]    = { .type = NLA_U32 },
321         [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
322         [TCA_FQ_CODEL_ECN]      = { .type = NLA_U32 },
323         [TCA_FQ_CODEL_FLOWS]    = { .type = NLA_U32 },
324         [TCA_FQ_CODEL_QUANTUM]  = { .type = NLA_U32 },
325         [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
326 };
327
328 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
329 {
330         struct fq_codel_sched_data *q = qdisc_priv(sch);
331         struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
332         int err;
333
334         if (!opt)
335                 return -EINVAL;
336
337         err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
338         if (err < 0)
339                 return err;
340         if (tb[TCA_FQ_CODEL_FLOWS]) {
341                 if (q->flows)
342                         return -EINVAL;
343                 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
344                 if (!q->flows_cnt ||
345                     q->flows_cnt > 65536)
346                         return -EINVAL;
347         }
348         sch_tree_lock(sch);
349
350         if (tb[TCA_FQ_CODEL_TARGET]) {
351                 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
352
353                 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
354         }
355
356         if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
357                 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
358
359                 q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
360         }
361
362         if (tb[TCA_FQ_CODEL_INTERVAL]) {
363                 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
364
365                 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
366         }
367
368         if (tb[TCA_FQ_CODEL_LIMIT])
369                 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
370
371         if (tb[TCA_FQ_CODEL_ECN])
372                 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
373
374         if (tb[TCA_FQ_CODEL_QUANTUM])
375                 q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
376
377         while (sch->q.qlen > sch->limit) {
378                 struct sk_buff *skb = fq_codel_dequeue(sch);
379
380                 q->cstats.drop_len += qdisc_pkt_len(skb);
381                 kfree_skb(skb);
382                 q->cstats.drop_count++;
383         }
384         qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
385         q->cstats.drop_count = 0;
386         q->cstats.drop_len = 0;
387
388         sch_tree_unlock(sch);
389         return 0;
390 }
391
392 static void *fq_codel_zalloc(size_t sz)
393 {
394         void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
395
396         if (!ptr)
397                 ptr = vzalloc(sz);
398         return ptr;
399 }
400
401 static void fq_codel_free(void *addr)
402 {
403         kvfree(addr);
404 }
405
406 static void fq_codel_destroy(struct Qdisc *sch)
407 {
408         struct fq_codel_sched_data *q = qdisc_priv(sch);
409
410         tcf_destroy_chain(&q->filter_list);
411         fq_codel_free(q->backlogs);
412         fq_codel_free(q->flows);
413 }
414
415 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
416 {
417         struct fq_codel_sched_data *q = qdisc_priv(sch);
418         int i;
419
420         sch->limit = 10*1024;
421         q->flows_cnt = 1024;
422         q->quantum = psched_mtu(qdisc_dev(sch));
423         q->perturbation = prandom_u32();
424         INIT_LIST_HEAD(&q->new_flows);
425         INIT_LIST_HEAD(&q->old_flows);
426         codel_params_init(&q->cparams, sch);
427         codel_stats_init(&q->cstats);
428         q->cparams.ecn = true;
429
430         if (opt) {
431                 int err = fq_codel_change(sch, opt);
432                 if (err)
433                         return err;
434         }
435
436         if (!q->flows) {
437                 q->flows = fq_codel_zalloc(q->flows_cnt *
438                                            sizeof(struct fq_codel_flow));
439                 if (!q->flows)
440                         return -ENOMEM;
441                 q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
442                 if (!q->backlogs) {
443                         fq_codel_free(q->flows);
444                         return -ENOMEM;
445                 }
446                 for (i = 0; i < q->flows_cnt; i++) {
447                         struct fq_codel_flow *flow = q->flows + i;
448
449                         INIT_LIST_HEAD(&flow->flowchain);
450                         codel_vars_init(&flow->cvars);
451                 }
452         }
453         if (sch->limit >= 1)
454                 sch->flags |= TCQ_F_CAN_BYPASS;
455         else
456                 sch->flags &= ~TCQ_F_CAN_BYPASS;
457         return 0;
458 }
459
460 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
461 {
462         struct fq_codel_sched_data *q = qdisc_priv(sch);
463         struct nlattr *opts;
464
465         opts = nla_nest_start(skb, TCA_OPTIONS);
466         if (opts == NULL)
467                 goto nla_put_failure;
468
469         if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
470                         codel_time_to_us(q->cparams.target)) ||
471             nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
472                         sch->limit) ||
473             nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
474                         codel_time_to_us(q->cparams.interval)) ||
475             nla_put_u32(skb, TCA_FQ_CODEL_ECN,
476                         q->cparams.ecn) ||
477             nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
478                         q->quantum) ||
479             nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
480                         q->flows_cnt))
481                 goto nla_put_failure;
482
483         if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
484             nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
485                         codel_time_to_us(q->cparams.ce_threshold)))
486                 goto nla_put_failure;
487
488         return nla_nest_end(skb, opts);
489
490 nla_put_failure:
491         return -1;
492 }
493
494 static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
495 {
496         struct fq_codel_sched_data *q = qdisc_priv(sch);
497         struct tc_fq_codel_xstats st = {
498                 .type                           = TCA_FQ_CODEL_XSTATS_QDISC,
499         };
500         struct list_head *pos;
501
502         st.qdisc_stats.maxpacket = q->cstats.maxpacket;
503         st.qdisc_stats.drop_overlimit = q->drop_overlimit;
504         st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
505         st.qdisc_stats.new_flow_count = q->new_flow_count;
506         st.qdisc_stats.ce_mark = q->cstats.ce_mark;
507
508         list_for_each(pos, &q->new_flows)
509                 st.qdisc_stats.new_flows_len++;
510
511         list_for_each(pos, &q->old_flows)
512                 st.qdisc_stats.old_flows_len++;
513
514         return gnet_stats_copy_app(d, &st, sizeof(st));
515 }
516
517 static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
518 {
519         return NULL;
520 }
521
522 static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
523 {
524         return 0;
525 }
526
527 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
528                               u32 classid)
529 {
530         /* we cannot bypass queue discipline anymore */
531         sch->flags &= ~TCQ_F_CAN_BYPASS;
532         return 0;
533 }
534
535 static void fq_codel_put(struct Qdisc *q, unsigned long cl)
536 {
537 }
538
539 static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch,
540                                                   unsigned long cl)
541 {
542         struct fq_codel_sched_data *q = qdisc_priv(sch);
543
544         if (cl)
545                 return NULL;
546         return &q->filter_list;
547 }
548
549 static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
550                           struct sk_buff *skb, struct tcmsg *tcm)
551 {
552         tcm->tcm_handle |= TC_H_MIN(cl);
553         return 0;
554 }
555
556 static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
557                                      struct gnet_dump *d)
558 {
559         struct fq_codel_sched_data *q = qdisc_priv(sch);
560         u32 idx = cl - 1;
561         struct gnet_stats_queue qs = { 0 };
562         struct tc_fq_codel_xstats xstats;
563
564         if (idx < q->flows_cnt) {
565                 const struct fq_codel_flow *flow = &q->flows[idx];
566                 const struct sk_buff *skb = flow->head;
567
568                 memset(&xstats, 0, sizeof(xstats));
569                 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
570                 xstats.class_stats.deficit = flow->deficit;
571                 xstats.class_stats.ldelay =
572                         codel_time_to_us(flow->cvars.ldelay);
573                 xstats.class_stats.count = flow->cvars.count;
574                 xstats.class_stats.lastcount = flow->cvars.lastcount;
575                 xstats.class_stats.dropping = flow->cvars.dropping;
576                 if (flow->cvars.dropping) {
577                         codel_tdiff_t delta = flow->cvars.drop_next -
578                                               codel_get_time();
579
580                         xstats.class_stats.drop_next = (delta >= 0) ?
581                                 codel_time_to_us(delta) :
582                                 -codel_time_to_us(-delta);
583                 }
584                 while (skb) {
585                         qs.qlen++;
586                         skb = skb->next;
587                 }
588                 qs.backlog = q->backlogs[idx];
589                 qs.drops = flow->dropped;
590         }
591         if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0)
592                 return -1;
593         if (idx < q->flows_cnt)
594                 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
595         return 0;
596 }
597
598 static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
599 {
600         struct fq_codel_sched_data *q = qdisc_priv(sch);
601         unsigned int i;
602
603         if (arg->stop)
604                 return;
605
606         for (i = 0; i < q->flows_cnt; i++) {
607                 if (list_empty(&q->flows[i].flowchain) ||
608                     arg->count < arg->skip) {
609                         arg->count++;
610                         continue;
611                 }
612                 if (arg->fn(sch, i + 1, arg) < 0) {
613                         arg->stop = 1;
614                         break;
615                 }
616                 arg->count++;
617         }
618 }
619
620 static const struct Qdisc_class_ops fq_codel_class_ops = {
621         .leaf           =       fq_codel_leaf,
622         .get            =       fq_codel_get,
623         .put            =       fq_codel_put,
624         .tcf_chain      =       fq_codel_find_tcf,
625         .bind_tcf       =       fq_codel_bind,
626         .unbind_tcf     =       fq_codel_put,
627         .dump           =       fq_codel_dump_class,
628         .dump_stats     =       fq_codel_dump_class_stats,
629         .walk           =       fq_codel_walk,
630 };
631
632 static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
633         .cl_ops         =       &fq_codel_class_ops,
634         .id             =       "fq_codel",
635         .priv_size      =       sizeof(struct fq_codel_sched_data),
636         .enqueue        =       fq_codel_enqueue,
637         .dequeue        =       fq_codel_dequeue,
638         .peek           =       qdisc_peek_dequeued,
639         .drop           =       fq_codel_qdisc_drop,
640         .init           =       fq_codel_init,
641         .reset          =       fq_codel_reset,
642         .destroy        =       fq_codel_destroy,
643         .change         =       fq_codel_change,
644         .dump           =       fq_codel_dump,
645         .dump_stats =   fq_codel_dump_stats,
646         .owner          =       THIS_MODULE,
647 };
648
649 static int __init fq_codel_module_init(void)
650 {
651         return register_qdisc(&fq_codel_qdisc_ops);
652 }
653
654 static void __exit fq_codel_module_exit(void)
655 {
656         unregister_qdisc(&fq_codel_qdisc_ops);
657 }
658
659 module_init(fq_codel_module_init)
660 module_exit(fq_codel_module_exit)
661 MODULE_AUTHOR("Eric Dumazet");
662 MODULE_LICENSE("GPL");