Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / include / net / codel.h
1 #ifndef __NET_SCHED_CODEL_H
2 #define __NET_SCHED_CODEL_H
3
4 /*
5  * Codel - The Controlled-Delay Active Queue Management algorithm
6  *
7  *  Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
8  *  Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
9  *  Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
10  *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions, and the following disclaimer,
17  *    without modification.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. The names of the authors may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * Alternatively, provided that this notice is retained in full, this
25  * software may be distributed under the terms of the GNU General
26  * Public License ("GPL") version 2, in which case the provisions of the
27  * GPL apply INSTEAD OF those given above.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
40  * DAMAGE.
41  *
42  */
43
44 #include <linux/types.h>
45 #include <linux/ktime.h>
46 #include <linux/skbuff.h>
47 #include <net/pkt_sched.h>
48 #include <net/inet_ecn.h>
49
50 /* Controlling Queue Delay (CoDel) algorithm
51  * =========================================
52  * Source : Kathleen Nichols and Van Jacobson
53  * http://queue.acm.org/detail.cfm?id=2209336
54  *
55  * Implemented on linux by Dave Taht and Eric Dumazet
56  */
57
58
59 /* CoDel uses a 1024 nsec clock, encoded in u32
60  * This gives a range of 2199 seconds, because of signed compares
61  */
62 typedef u32 codel_time_t;
63 typedef s32 codel_tdiff_t;
64 #define CODEL_SHIFT 10
65 #define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT)
66
67 static inline codel_time_t codel_get_time(void)
68 {
69         u64 ns = ktime_get_ns();
70
71         return ns >> CODEL_SHIFT;
72 }
73
74 /* Dealing with timer wrapping, according to RFC 1982, as desc in wikipedia:
75  *  https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
76  * codel_time_after(a,b) returns true if the time a is after time b.
77  */
78 #define codel_time_after(a, b)                                          \
79         (typecheck(codel_time_t, a) &&                                  \
80          typecheck(codel_time_t, b) &&                                  \
81          ((s32)((a) - (b)) > 0))
82 #define codel_time_before(a, b)         codel_time_after(b, a)
83
84 #define codel_time_after_eq(a, b)                                       \
85         (typecheck(codel_time_t, a) &&                                  \
86          typecheck(codel_time_t, b) &&                                  \
87          ((s32)((a) - (b)) >= 0))
88 #define codel_time_before_eq(a, b)      codel_time_after_eq(b, a)
89
90 /* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
91 struct codel_skb_cb {
92         codel_time_t enqueue_time;
93 };
94
95 static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
96 {
97         qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
98         return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
99 }
100
101 static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
102 {
103         return get_codel_cb(skb)->enqueue_time;
104 }
105
106 static void codel_set_enqueue_time(struct sk_buff *skb)
107 {
108         get_codel_cb(skb)->enqueue_time = codel_get_time();
109 }
110
111 static inline u32 codel_time_to_us(codel_time_t val)
112 {
113         u64 valns = ((u64)val << CODEL_SHIFT);
114
115         do_div(valns, NSEC_PER_USEC);
116         return (u32)valns;
117 }
118
119 /**
120  * struct codel_params - contains codel parameters
121  * @target:     target queue size (in time units)
122  * @ce_threshold:  threshold for marking packets with ECN CE
123  * @interval:   width of moving time window
124  * @mtu:        device mtu, or minimal queue backlog in bytes.
125  * @ecn:        is Explicit Congestion Notification enabled
126  */
127 struct codel_params {
128         codel_time_t    target;
129         codel_time_t    ce_threshold;
130         codel_time_t    interval;
131         u32             mtu;
132         bool            ecn;
133 };
134
135 /**
136  * struct codel_vars - contains codel variables
137  * @count:              how many drops we've done since the last time we
138  *                      entered dropping state
139  * @lastcount:          count at entry to dropping state
140  * @dropping:           set to true if in dropping state
141  * @rec_inv_sqrt:       reciprocal value of sqrt(count) >> 1
142  * @first_above_time:   when we went (or will go) continuously above target
143  *                      for interval
144  * @drop_next:          time to drop next packet, or when we dropped last
145  * @ldelay:             sojourn time of last dequeued packet
146  */
147 struct codel_vars {
148         u32             count;
149         u32             lastcount;
150         bool            dropping;
151         u16             rec_inv_sqrt;
152         codel_time_t    first_above_time;
153         codel_time_t    drop_next;
154         codel_time_t    ldelay;
155 };
156
157 #define REC_INV_SQRT_BITS (8 * sizeof(u16)) /* or sizeof_in_bits(rec_inv_sqrt) */
158 /* needed shift to get a Q0.32 number from rec_inv_sqrt */
159 #define REC_INV_SQRT_SHIFT (32 - REC_INV_SQRT_BITS)
160
161 /**
162  * struct codel_stats - contains codel shared variables and stats
163  * @maxpacket:  largest packet we've seen so far
164  * @drop_count: temp count of dropped packets in dequeue()
165  * @drop_len:   bytes of dropped packets in dequeue()
166  * ecn_mark:    number of packets we ECN marked instead of dropping
167  * ce_mark:     number of packets CE marked because sojourn time was above ce_threshold
168  */
169 struct codel_stats {
170         u32             maxpacket;
171         u32             drop_count;
172         u32             drop_len;
173         u32             ecn_mark;
174         u32             ce_mark;
175 };
176
177 #define CODEL_DISABLED_THRESHOLD INT_MAX
178
179 static void codel_params_init(struct codel_params *params,
180                               const struct Qdisc *sch)
181 {
182         params->interval = MS2TIME(100);
183         params->target = MS2TIME(5);
184         params->mtu = psched_mtu(qdisc_dev(sch));
185         params->ce_threshold = CODEL_DISABLED_THRESHOLD;
186         params->ecn = false;
187 }
188
189 static void codel_vars_init(struct codel_vars *vars)
190 {
191         memset(vars, 0, sizeof(*vars));
192 }
193
194 static void codel_stats_init(struct codel_stats *stats)
195 {
196         stats->maxpacket = 0;
197 }
198
199 /*
200  * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
201  * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
202  *
203  * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
204  */
205 static void codel_Newton_step(struct codel_vars *vars)
206 {
207         u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT;
208         u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
209         u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
210
211         val >>= 2; /* avoid overflow in following multiply */
212         val = (val * invsqrt) >> (32 - 2 + 1);
213
214         vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT;
215 }
216
217 /*
218  * CoDel control_law is t + interval/sqrt(count)
219  * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
220  * both sqrt() and divide operation.
221  */
222 static codel_time_t codel_control_law(codel_time_t t,
223                                       codel_time_t interval,
224                                       u32 rec_inv_sqrt)
225 {
226         return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT);
227 }
228
229 static bool codel_should_drop(const struct sk_buff *skb,
230                               struct Qdisc *sch,
231                               struct codel_vars *vars,
232                               struct codel_params *params,
233                               struct codel_stats *stats,
234                               codel_time_t now)
235 {
236         bool ok_to_drop;
237
238         if (!skb) {
239                 vars->first_above_time = 0;
240                 return false;
241         }
242
243         vars->ldelay = now - codel_get_enqueue_time(skb);
244         sch->qstats.backlog -= qdisc_pkt_len(skb);
245
246         if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
247                 stats->maxpacket = qdisc_pkt_len(skb);
248
249         if (codel_time_before(vars->ldelay, params->target) ||
250             sch->qstats.backlog <= params->mtu) {
251                 /* went below - stay below for at least interval */
252                 vars->first_above_time = 0;
253                 return false;
254         }
255         ok_to_drop = false;
256         if (vars->first_above_time == 0) {
257                 /* just went above from below. If we stay above
258                  * for at least interval we'll say it's ok to drop
259                  */
260                 vars->first_above_time = now + params->interval;
261         } else if (codel_time_after(now, vars->first_above_time)) {
262                 ok_to_drop = true;
263         }
264         return ok_to_drop;
265 }
266
267 typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars,
268                                                 struct Qdisc *sch);
269
270 static struct sk_buff *codel_dequeue(struct Qdisc *sch,
271                                      struct codel_params *params,
272                                      struct codel_vars *vars,
273                                      struct codel_stats *stats,
274                                      codel_skb_dequeue_t dequeue_func)
275 {
276         struct sk_buff *skb = dequeue_func(vars, sch);
277         codel_time_t now;
278         bool drop;
279
280         if (!skb) {
281                 vars->dropping = false;
282                 return skb;
283         }
284         now = codel_get_time();
285         drop = codel_should_drop(skb, sch, vars, params, stats, now);
286         if (vars->dropping) {
287                 if (!drop) {
288                         /* sojourn time below target - leave dropping state */
289                         vars->dropping = false;
290                 } else if (codel_time_after_eq(now, vars->drop_next)) {
291                         /* It's time for the next drop. Drop the current
292                          * packet and dequeue the next. The dequeue might
293                          * take us out of dropping state.
294                          * If not, schedule the next drop.
295                          * A large backlog might result in drop rates so high
296                          * that the next drop should happen now,
297                          * hence the while loop.
298                          */
299                         while (vars->dropping &&
300                                codel_time_after_eq(now, vars->drop_next)) {
301                                 vars->count++; /* dont care of possible wrap
302                                                 * since there is no more divide
303                                                 */
304                                 codel_Newton_step(vars);
305                                 if (params->ecn && INET_ECN_set_ce(skb)) {
306                                         stats->ecn_mark++;
307                                         vars->drop_next =
308                                                 codel_control_law(vars->drop_next,
309                                                                   params->interval,
310                                                                   vars->rec_inv_sqrt);
311                                         goto end;
312                                 }
313                                 stats->drop_len += qdisc_pkt_len(skb);
314                                 qdisc_drop(skb, sch);
315                                 stats->drop_count++;
316                                 skb = dequeue_func(vars, sch);
317                                 if (!codel_should_drop(skb, sch,
318                                                        vars, params, stats, now)) {
319                                         /* leave dropping state */
320                                         vars->dropping = false;
321                                 } else {
322                                         /* and schedule the next drop */
323                                         vars->drop_next =
324                                                 codel_control_law(vars->drop_next,
325                                                                   params->interval,
326                                                                   vars->rec_inv_sqrt);
327                                 }
328                         }
329                 }
330         } else if (drop) {
331                 u32 delta;
332
333                 if (params->ecn && INET_ECN_set_ce(skb)) {
334                         stats->ecn_mark++;
335                 } else {
336                         stats->drop_len += qdisc_pkt_len(skb);
337                         qdisc_drop(skb, sch);
338                         stats->drop_count++;
339
340                         skb = dequeue_func(vars, sch);
341                         drop = codel_should_drop(skb, sch, vars, params,
342                                                  stats, now);
343                 }
344                 vars->dropping = true;
345                 /* if min went above target close to when we last went below it
346                  * assume that the drop rate that controlled the queue on the
347                  * last cycle is a good starting point to control it now.
348                  */
349                 delta = vars->count - vars->lastcount;
350                 if (delta > 1 &&
351                     codel_time_before(now - vars->drop_next,
352                                       16 * params->interval)) {
353                         vars->count = delta;
354                         /* we dont care if rec_inv_sqrt approximation
355                          * is not very precise :
356                          * Next Newton steps will correct it quadratically.
357                          */
358                         codel_Newton_step(vars);
359                 } else {
360                         vars->count = 1;
361                         vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
362                 }
363                 vars->lastcount = vars->count;
364                 vars->drop_next = codel_control_law(now, params->interval,
365                                                     vars->rec_inv_sqrt);
366         }
367 end:
368         if (skb && codel_time_after(vars->ldelay, params->ce_threshold) &&
369             INET_ECN_set_ce(skb))
370                 stats->ce_mark++;
371         return skb;
372 }
373 #endif