These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / net / sched / cls_flow.c
1 /*
2  * net/sched/cls_flow.c         Generic flow classifier
3  *
4  * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/list.h>
15 #include <linux/jhash.h>
16 #include <linux/random.h>
17 #include <linux/pkt_cls.h>
18 #include <linux/skbuff.h>
19 #include <linux/in.h>
20 #include <linux/ip.h>
21 #include <linux/ipv6.h>
22 #include <linux/if_vlan.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <net/inet_sock.h>
26
27 #include <net/pkt_cls.h>
28 #include <net/ip.h>
29 #include <net/route.h>
30 #include <net/flow_dissector.h>
31
32 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
33 #include <net/netfilter/nf_conntrack.h>
34 #endif
35
36 struct flow_head {
37         struct list_head        filters;
38         struct rcu_head         rcu;
39 };
40
41 struct flow_filter {
42         struct list_head        list;
43         struct tcf_exts         exts;
44         struct tcf_ematch_tree  ematches;
45         struct tcf_proto        *tp;
46         struct timer_list       perturb_timer;
47         u32                     perturb_period;
48         u32                     handle;
49
50         u32                     nkeys;
51         u32                     keymask;
52         u32                     mode;
53         u32                     mask;
54         u32                     xor;
55         u32                     rshift;
56         u32                     addend;
57         u32                     divisor;
58         u32                     baseclass;
59         u32                     hashrnd;
60         struct rcu_head         rcu;
61 };
62
63 static inline u32 addr_fold(void *addr)
64 {
65         unsigned long a = (unsigned long)addr;
66
67         return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
68 }
69
70 static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
71 {
72         __be32 src = flow_get_u32_src(flow);
73
74         if (src)
75                 return ntohl(src);
76
77         return addr_fold(skb->sk);
78 }
79
80 static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
81 {
82         __be32 dst = flow_get_u32_dst(flow);
83
84         if (dst)
85                 return ntohl(dst);
86
87         return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
88 }
89
90 static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow)
91 {
92         return flow->basic.ip_proto;
93 }
94
95 static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
96 {
97         if (flow->ports.ports)
98                 return ntohs(flow->ports.src);
99
100         return addr_fold(skb->sk);
101 }
102
103 static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
104 {
105         if (flow->ports.ports)
106                 return ntohs(flow->ports.dst);
107
108         return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
109 }
110
111 static u32 flow_get_iif(const struct sk_buff *skb)
112 {
113         return skb->skb_iif;
114 }
115
116 static u32 flow_get_priority(const struct sk_buff *skb)
117 {
118         return skb->priority;
119 }
120
121 static u32 flow_get_mark(const struct sk_buff *skb)
122 {
123         return skb->mark;
124 }
125
126 static u32 flow_get_nfct(const struct sk_buff *skb)
127 {
128 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
129         return addr_fold(skb->nfct);
130 #else
131         return 0;
132 #endif
133 }
134
135 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
136 #define CTTUPLE(skb, member)                                            \
137 ({                                                                      \
138         enum ip_conntrack_info ctinfo;                                  \
139         const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);             \
140         if (ct == NULL)                                                 \
141                 goto fallback;                                          \
142         ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member;                 \
143 })
144 #else
145 #define CTTUPLE(skb, member)                                            \
146 ({                                                                      \
147         goto fallback;                                                  \
148         0;                                                              \
149 })
150 #endif
151
152 static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow)
153 {
154         switch (tc_skb_protocol(skb)) {
155         case htons(ETH_P_IP):
156                 return ntohl(CTTUPLE(skb, src.u3.ip));
157         case htons(ETH_P_IPV6):
158                 return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
159         }
160 fallback:
161         return flow_get_src(skb, flow);
162 }
163
164 static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow)
165 {
166         switch (tc_skb_protocol(skb)) {
167         case htons(ETH_P_IP):
168                 return ntohl(CTTUPLE(skb, dst.u3.ip));
169         case htons(ETH_P_IPV6):
170                 return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
171         }
172 fallback:
173         return flow_get_dst(skb, flow);
174 }
175
176 static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
177 {
178         return ntohs(CTTUPLE(skb, src.u.all));
179 fallback:
180         return flow_get_proto_src(skb, flow);
181 }
182
183 static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
184 {
185         return ntohs(CTTUPLE(skb, dst.u.all));
186 fallback:
187         return flow_get_proto_dst(skb, flow);
188 }
189
190 static u32 flow_get_rtclassid(const struct sk_buff *skb)
191 {
192 #ifdef CONFIG_IP_ROUTE_CLASSID
193         if (skb_dst(skb))
194                 return skb_dst(skb)->tclassid;
195 #endif
196         return 0;
197 }
198
199 static u32 flow_get_skuid(const struct sk_buff *skb)
200 {
201         struct sock *sk = skb_to_full_sk(skb);
202
203         if (sk && sk->sk_socket && sk->sk_socket->file) {
204                 kuid_t skuid = sk->sk_socket->file->f_cred->fsuid;
205
206                 return from_kuid(&init_user_ns, skuid);
207         }
208         return 0;
209 }
210
211 static u32 flow_get_skgid(const struct sk_buff *skb)
212 {
213         struct sock *sk = skb_to_full_sk(skb);
214
215         if (sk && sk->sk_socket && sk->sk_socket->file) {
216                 kgid_t skgid = sk->sk_socket->file->f_cred->fsgid;
217
218                 return from_kgid(&init_user_ns, skgid);
219         }
220         return 0;
221 }
222
223 static u32 flow_get_vlan_tag(const struct sk_buff *skb)
224 {
225         u16 uninitialized_var(tag);
226
227         if (vlan_get_tag(skb, &tag) < 0)
228                 return 0;
229         return tag & VLAN_VID_MASK;
230 }
231
232 static u32 flow_get_rxhash(struct sk_buff *skb)
233 {
234         return skb_get_hash(skb);
235 }
236
237 static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
238 {
239         switch (key) {
240         case FLOW_KEY_SRC:
241                 return flow_get_src(skb, flow);
242         case FLOW_KEY_DST:
243                 return flow_get_dst(skb, flow);
244         case FLOW_KEY_PROTO:
245                 return flow_get_proto(skb, flow);
246         case FLOW_KEY_PROTO_SRC:
247                 return flow_get_proto_src(skb, flow);
248         case FLOW_KEY_PROTO_DST:
249                 return flow_get_proto_dst(skb, flow);
250         case FLOW_KEY_IIF:
251                 return flow_get_iif(skb);
252         case FLOW_KEY_PRIORITY:
253                 return flow_get_priority(skb);
254         case FLOW_KEY_MARK:
255                 return flow_get_mark(skb);
256         case FLOW_KEY_NFCT:
257                 return flow_get_nfct(skb);
258         case FLOW_KEY_NFCT_SRC:
259                 return flow_get_nfct_src(skb, flow);
260         case FLOW_KEY_NFCT_DST:
261                 return flow_get_nfct_dst(skb, flow);
262         case FLOW_KEY_NFCT_PROTO_SRC:
263                 return flow_get_nfct_proto_src(skb, flow);
264         case FLOW_KEY_NFCT_PROTO_DST:
265                 return flow_get_nfct_proto_dst(skb, flow);
266         case FLOW_KEY_RTCLASSID:
267                 return flow_get_rtclassid(skb);
268         case FLOW_KEY_SKUID:
269                 return flow_get_skuid(skb);
270         case FLOW_KEY_SKGID:
271                 return flow_get_skgid(skb);
272         case FLOW_KEY_VLAN_TAG:
273                 return flow_get_vlan_tag(skb);
274         case FLOW_KEY_RXHASH:
275                 return flow_get_rxhash(skb);
276         default:
277                 WARN_ON(1);
278                 return 0;
279         }
280 }
281
282 #define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) |                 \
283                           (1 << FLOW_KEY_DST) |                 \
284                           (1 << FLOW_KEY_PROTO) |               \
285                           (1 << FLOW_KEY_PROTO_SRC) |           \
286                           (1 << FLOW_KEY_PROTO_DST) |           \
287                           (1 << FLOW_KEY_NFCT_SRC) |            \
288                           (1 << FLOW_KEY_NFCT_DST) |            \
289                           (1 << FLOW_KEY_NFCT_PROTO_SRC) |      \
290                           (1 << FLOW_KEY_NFCT_PROTO_DST))
291
292 static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
293                          struct tcf_result *res)
294 {
295         struct flow_head *head = rcu_dereference_bh(tp->root);
296         struct flow_filter *f;
297         u32 keymask;
298         u32 classid;
299         unsigned int n, key;
300         int r;
301
302         list_for_each_entry_rcu(f, &head->filters, list) {
303                 u32 keys[FLOW_KEY_MAX + 1];
304                 struct flow_keys flow_keys;
305
306                 if (!tcf_em_tree_match(skb, &f->ematches, NULL))
307                         continue;
308
309                 keymask = f->keymask;
310                 if (keymask & FLOW_KEYS_NEEDED)
311                         skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
312
313                 for (n = 0; n < f->nkeys; n++) {
314                         key = ffs(keymask) - 1;
315                         keymask &= ~(1 << key);
316                         keys[n] = flow_key_get(skb, key, &flow_keys);
317                 }
318
319                 if (f->mode == FLOW_MODE_HASH)
320                         classid = jhash2(keys, f->nkeys, f->hashrnd);
321                 else {
322                         classid = keys[0];
323                         classid = (classid & f->mask) ^ f->xor;
324                         classid = (classid >> f->rshift) + f->addend;
325                 }
326
327                 if (f->divisor)
328                         classid %= f->divisor;
329
330                 res->class   = 0;
331                 res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid);
332
333                 r = tcf_exts_exec(skb, &f->exts, res);
334                 if (r < 0)
335                         continue;
336                 return r;
337         }
338         return -1;
339 }
340
341 static void flow_perturbation(unsigned long arg)
342 {
343         struct flow_filter *f = (struct flow_filter *)arg;
344
345         get_random_bytes(&f->hashrnd, 4);
346         if (f->perturb_period)
347                 mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
348 }
349
350 static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
351         [TCA_FLOW_KEYS]         = { .type = NLA_U32 },
352         [TCA_FLOW_MODE]         = { .type = NLA_U32 },
353         [TCA_FLOW_BASECLASS]    = { .type = NLA_U32 },
354         [TCA_FLOW_RSHIFT]       = { .type = NLA_U32 },
355         [TCA_FLOW_ADDEND]       = { .type = NLA_U32 },
356         [TCA_FLOW_MASK]         = { .type = NLA_U32 },
357         [TCA_FLOW_XOR]          = { .type = NLA_U32 },
358         [TCA_FLOW_DIVISOR]      = { .type = NLA_U32 },
359         [TCA_FLOW_ACT]          = { .type = NLA_NESTED },
360         [TCA_FLOW_POLICE]       = { .type = NLA_NESTED },
361         [TCA_FLOW_EMATCHES]     = { .type = NLA_NESTED },
362         [TCA_FLOW_PERTURB]      = { .type = NLA_U32 },
363 };
364
365 static void flow_destroy_filter(struct rcu_head *head)
366 {
367         struct flow_filter *f = container_of(head, struct flow_filter, rcu);
368
369         del_timer_sync(&f->perturb_timer);
370         tcf_exts_destroy(&f->exts);
371         tcf_em_tree_destroy(&f->ematches);
372         kfree(f);
373 }
374
375 static int flow_change(struct net *net, struct sk_buff *in_skb,
376                        struct tcf_proto *tp, unsigned long base,
377                        u32 handle, struct nlattr **tca,
378                        unsigned long *arg, bool ovr)
379 {
380         struct flow_head *head = rtnl_dereference(tp->root);
381         struct flow_filter *fold, *fnew;
382         struct nlattr *opt = tca[TCA_OPTIONS];
383         struct nlattr *tb[TCA_FLOW_MAX + 1];
384         struct tcf_exts e;
385         struct tcf_ematch_tree t;
386         unsigned int nkeys = 0;
387         unsigned int perturb_period = 0;
388         u32 baseclass = 0;
389         u32 keymask = 0;
390         u32 mode;
391         int err;
392
393         if (opt == NULL)
394                 return -EINVAL;
395
396         err = nla_parse_nested(tb, TCA_FLOW_MAX, opt, flow_policy);
397         if (err < 0)
398                 return err;
399
400         if (tb[TCA_FLOW_BASECLASS]) {
401                 baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
402                 if (TC_H_MIN(baseclass) == 0)
403                         return -EINVAL;
404         }
405
406         if (tb[TCA_FLOW_KEYS]) {
407                 keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
408
409                 nkeys = hweight32(keymask);
410                 if (nkeys == 0)
411                         return -EINVAL;
412
413                 if (fls(keymask) - 1 > FLOW_KEY_MAX)
414                         return -EOPNOTSUPP;
415
416                 if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) &&
417                     sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns)
418                         return -EOPNOTSUPP;
419         }
420
421         tcf_exts_init(&e, TCA_FLOW_ACT, TCA_FLOW_POLICE);
422         err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
423         if (err < 0)
424                 return err;
425
426         err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &t);
427         if (err < 0)
428                 goto err1;
429
430         err = -ENOBUFS;
431         fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
432         if (!fnew)
433                 goto err2;
434
435         tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
436
437         fold = (struct flow_filter *)*arg;
438         if (fold) {
439                 err = -EINVAL;
440                 if (fold->handle != handle && handle)
441                         goto err2;
442
443                 /* Copy fold into fnew */
444                 fnew->tp = fold->tp;
445                 fnew->handle = fold->handle;
446                 fnew->nkeys = fold->nkeys;
447                 fnew->keymask = fold->keymask;
448                 fnew->mode = fold->mode;
449                 fnew->mask = fold->mask;
450                 fnew->xor = fold->xor;
451                 fnew->rshift = fold->rshift;
452                 fnew->addend = fold->addend;
453                 fnew->divisor = fold->divisor;
454                 fnew->baseclass = fold->baseclass;
455                 fnew->hashrnd = fold->hashrnd;
456
457                 mode = fold->mode;
458                 if (tb[TCA_FLOW_MODE])
459                         mode = nla_get_u32(tb[TCA_FLOW_MODE]);
460                 if (mode != FLOW_MODE_HASH && nkeys > 1)
461                         goto err2;
462
463                 if (mode == FLOW_MODE_HASH)
464                         perturb_period = fold->perturb_period;
465                 if (tb[TCA_FLOW_PERTURB]) {
466                         if (mode != FLOW_MODE_HASH)
467                                 goto err2;
468                         perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
469                 }
470         } else {
471                 err = -EINVAL;
472                 if (!handle)
473                         goto err2;
474                 if (!tb[TCA_FLOW_KEYS])
475                         goto err2;
476
477                 mode = FLOW_MODE_MAP;
478                 if (tb[TCA_FLOW_MODE])
479                         mode = nla_get_u32(tb[TCA_FLOW_MODE]);
480                 if (mode != FLOW_MODE_HASH && nkeys > 1)
481                         goto err2;
482
483                 if (tb[TCA_FLOW_PERTURB]) {
484                         if (mode != FLOW_MODE_HASH)
485                                 goto err2;
486                         perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
487                 }
488
489                 if (TC_H_MAJ(baseclass) == 0)
490                         baseclass = TC_H_MAKE(tp->q->handle, baseclass);
491                 if (TC_H_MIN(baseclass) == 0)
492                         baseclass = TC_H_MAKE(baseclass, 1);
493
494                 fnew->handle = handle;
495                 fnew->mask  = ~0U;
496                 fnew->tp = tp;
497                 get_random_bytes(&fnew->hashrnd, 4);
498         }
499
500         fnew->perturb_timer.function = flow_perturbation;
501         fnew->perturb_timer.data = (unsigned long)fnew;
502         init_timer_deferrable(&fnew->perturb_timer);
503
504         tcf_exts_change(tp, &fnew->exts, &e);
505         tcf_em_tree_change(tp, &fnew->ematches, &t);
506
507         netif_keep_dst(qdisc_dev(tp->q));
508
509         if (tb[TCA_FLOW_KEYS]) {
510                 fnew->keymask = keymask;
511                 fnew->nkeys   = nkeys;
512         }
513
514         fnew->mode = mode;
515
516         if (tb[TCA_FLOW_MASK])
517                 fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
518         if (tb[TCA_FLOW_XOR])
519                 fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
520         if (tb[TCA_FLOW_RSHIFT])
521                 fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
522         if (tb[TCA_FLOW_ADDEND])
523                 fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
524
525         if (tb[TCA_FLOW_DIVISOR])
526                 fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
527         if (baseclass)
528                 fnew->baseclass = baseclass;
529
530         fnew->perturb_period = perturb_period;
531         if (perturb_period)
532                 mod_timer(&fnew->perturb_timer, jiffies + perturb_period);
533
534         if (*arg == 0)
535                 list_add_tail_rcu(&fnew->list, &head->filters);
536         else
537                 list_replace_rcu(&fold->list, &fnew->list);
538
539         *arg = (unsigned long)fnew;
540
541         if (fold)
542                 call_rcu(&fold->rcu, flow_destroy_filter);
543         return 0;
544
545 err2:
546         tcf_em_tree_destroy(&t);
547         kfree(fnew);
548 err1:
549         tcf_exts_destroy(&e);
550         return err;
551 }
552
553 static int flow_delete(struct tcf_proto *tp, unsigned long arg)
554 {
555         struct flow_filter *f = (struct flow_filter *)arg;
556
557         list_del_rcu(&f->list);
558         call_rcu(&f->rcu, flow_destroy_filter);
559         return 0;
560 }
561
562 static int flow_init(struct tcf_proto *tp)
563 {
564         struct flow_head *head;
565
566         head = kzalloc(sizeof(*head), GFP_KERNEL);
567         if (head == NULL)
568                 return -ENOBUFS;
569         INIT_LIST_HEAD(&head->filters);
570         rcu_assign_pointer(tp->root, head);
571         return 0;
572 }
573
574 static bool flow_destroy(struct tcf_proto *tp, bool force)
575 {
576         struct flow_head *head = rtnl_dereference(tp->root);
577         struct flow_filter *f, *next;
578
579         if (!force && !list_empty(&head->filters))
580                 return false;
581
582         list_for_each_entry_safe(f, next, &head->filters, list) {
583                 list_del_rcu(&f->list);
584                 call_rcu(&f->rcu, flow_destroy_filter);
585         }
586         RCU_INIT_POINTER(tp->root, NULL);
587         kfree_rcu(head, rcu);
588         return true;
589 }
590
591 static unsigned long flow_get(struct tcf_proto *tp, u32 handle)
592 {
593         struct flow_head *head = rtnl_dereference(tp->root);
594         struct flow_filter *f;
595
596         list_for_each_entry(f, &head->filters, list)
597                 if (f->handle == handle)
598                         return (unsigned long)f;
599         return 0;
600 }
601
602 static int flow_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
603                      struct sk_buff *skb, struct tcmsg *t)
604 {
605         struct flow_filter *f = (struct flow_filter *)fh;
606         struct nlattr *nest;
607
608         if (f == NULL)
609                 return skb->len;
610
611         t->tcm_handle = f->handle;
612
613         nest = nla_nest_start(skb, TCA_OPTIONS);
614         if (nest == NULL)
615                 goto nla_put_failure;
616
617         if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
618             nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
619                 goto nla_put_failure;
620
621         if (f->mask != ~0 || f->xor != 0) {
622                 if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
623                     nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
624                         goto nla_put_failure;
625         }
626         if (f->rshift &&
627             nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
628                 goto nla_put_failure;
629         if (f->addend &&
630             nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
631                 goto nla_put_failure;
632
633         if (f->divisor &&
634             nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
635                 goto nla_put_failure;
636         if (f->baseclass &&
637             nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
638                 goto nla_put_failure;
639
640         if (f->perturb_period &&
641             nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
642                 goto nla_put_failure;
643
644         if (tcf_exts_dump(skb, &f->exts) < 0)
645                 goto nla_put_failure;
646 #ifdef CONFIG_NET_EMATCH
647         if (f->ematches.hdr.nmatches &&
648             tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
649                 goto nla_put_failure;
650 #endif
651         nla_nest_end(skb, nest);
652
653         if (tcf_exts_dump_stats(skb, &f->exts) < 0)
654                 goto nla_put_failure;
655
656         return skb->len;
657
658 nla_put_failure:
659         nla_nest_cancel(skb, nest);
660         return -1;
661 }
662
663 static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg)
664 {
665         struct flow_head *head = rtnl_dereference(tp->root);
666         struct flow_filter *f;
667
668         list_for_each_entry(f, &head->filters, list) {
669                 if (arg->count < arg->skip)
670                         goto skip;
671                 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
672                         arg->stop = 1;
673                         break;
674                 }
675 skip:
676                 arg->count++;
677         }
678 }
679
680 static struct tcf_proto_ops cls_flow_ops __read_mostly = {
681         .kind           = "flow",
682         .classify       = flow_classify,
683         .init           = flow_init,
684         .destroy        = flow_destroy,
685         .change         = flow_change,
686         .delete         = flow_delete,
687         .get            = flow_get,
688         .dump           = flow_dump,
689         .walk           = flow_walk,
690         .owner          = THIS_MODULE,
691 };
692
693 static int __init cls_flow_init(void)
694 {
695         return register_tcf_proto_ops(&cls_flow_ops);
696 }
697
698 static void __exit cls_flow_exit(void)
699 {
700         unregister_tcf_proto_ops(&cls_flow_ops);
701 }
702
703 module_init(cls_flow_init);
704 module_exit(cls_flow_exit);
705
706 MODULE_LICENSE("GPL");
707 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
708 MODULE_DESCRIPTION("TC flow classifier");