These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / net / ipv6 / route.c
1 /*
2  *      Linux INET6 implementation
3  *      FIB front-end.
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  */
13
14 /*      Changes:
15  *
16  *      YOSHIFUJI Hideaki @USAGI
17  *              reworked default router selection.
18  *              - respect outgoing interface
19  *              - select from (probably) reachable routers (i.e.
20  *              routers in REACHABLE, STALE, DELAY or PROBE states).
21  *              - always select the same router if it is (probably)
22  *              reachable.  otherwise, round-robin the list.
23  *      Ville Nuorvala
24  *              Fixed routing subtrees.
25  */
26
27 #define pr_fmt(fmt) "IPv6: " fmt
28
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <net/net_namespace.h>
48 #include <net/snmp.h>
49 #include <net/ipv6.h>
50 #include <net/ip6_fib.h>
51 #include <net/ip6_route.h>
52 #include <net/ndisc.h>
53 #include <net/addrconf.h>
54 #include <net/tcp.h>
55 #include <linux/rtnetlink.h>
56 #include <net/dst.h>
57 #include <net/dst_metadata.h>
58 #include <net/xfrm.h>
59 #include <net/netevent.h>
60 #include <net/netlink.h>
61 #include <net/nexthop.h>
62 #include <net/lwtunnel.h>
63 #include <net/ip_tunnels.h>
64 #include <net/l3mdev.h>
65
66 #include <asm/uaccess.h>
67
68 #ifdef CONFIG_SYSCTL
69 #include <linux/sysctl.h>
70 #endif
71
72 enum rt6_nud_state {
73         RT6_NUD_FAIL_HARD = -3,
74         RT6_NUD_FAIL_PROBE = -2,
75         RT6_NUD_FAIL_DO_RR = -1,
76         RT6_NUD_SUCCEED = 1
77 };
78
79 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort);
80 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
81 static unsigned int      ip6_default_advmss(const struct dst_entry *dst);
82 static unsigned int      ip6_mtu(const struct dst_entry *dst);
83 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
84 static void             ip6_dst_destroy(struct dst_entry *);
85 static void             ip6_dst_ifdown(struct dst_entry *,
86                                        struct net_device *dev, int how);
87 static int               ip6_dst_gc(struct dst_ops *ops);
88
89 static int              ip6_pkt_discard(struct sk_buff *skb);
90 static int              ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
91 static int              ip6_pkt_prohibit(struct sk_buff *skb);
92 static int              ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
93 static void             ip6_link_failure(struct sk_buff *skb);
94 static void             ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
95                                            struct sk_buff *skb, u32 mtu);
96 static void             rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
97                                         struct sk_buff *skb);
98 static void             rt6_dst_from_metrics_check(struct rt6_info *rt);
99 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
100
101 #ifdef CONFIG_IPV6_ROUTE_INFO
102 static struct rt6_info *rt6_add_route_info(struct net *net,
103                                            const struct in6_addr *prefix, int prefixlen,
104                                            const struct in6_addr *gwaddr, int ifindex,
105                                            unsigned int pref);
106 static struct rt6_info *rt6_get_route_info(struct net *net,
107                                            const struct in6_addr *prefix, int prefixlen,
108                                            const struct in6_addr *gwaddr, int ifindex);
109 #endif
110
111 struct uncached_list {
112         spinlock_t              lock;
113         struct list_head        head;
114 };
115
116 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
117
118 static void rt6_uncached_list_add(struct rt6_info *rt)
119 {
120         struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
121
122         rt->dst.flags |= DST_NOCACHE;
123         rt->rt6i_uncached_list = ul;
124
125         spin_lock_bh(&ul->lock);
126         list_add_tail(&rt->rt6i_uncached, &ul->head);
127         spin_unlock_bh(&ul->lock);
128 }
129
130 static void rt6_uncached_list_del(struct rt6_info *rt)
131 {
132         if (!list_empty(&rt->rt6i_uncached)) {
133                 struct uncached_list *ul = rt->rt6i_uncached_list;
134
135                 spin_lock_bh(&ul->lock);
136                 list_del(&rt->rt6i_uncached);
137                 spin_unlock_bh(&ul->lock);
138         }
139 }
140
141 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
142 {
143         struct net_device *loopback_dev = net->loopback_dev;
144         int cpu;
145
146         if (dev == loopback_dev)
147                 return;
148
149         for_each_possible_cpu(cpu) {
150                 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
151                 struct rt6_info *rt;
152
153                 spin_lock_bh(&ul->lock);
154                 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
155                         struct inet6_dev *rt_idev = rt->rt6i_idev;
156                         struct net_device *rt_dev = rt->dst.dev;
157
158                         if (rt_idev->dev == dev) {
159                                 rt->rt6i_idev = in6_dev_get(loopback_dev);
160                                 in6_dev_put(rt_idev);
161                         }
162
163                         if (rt_dev == dev) {
164                                 rt->dst.dev = loopback_dev;
165                                 dev_hold(rt->dst.dev);
166                                 dev_put(rt_dev);
167                         }
168                 }
169                 spin_unlock_bh(&ul->lock);
170         }
171 }
172
173 static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt)
174 {
175         return dst_metrics_write_ptr(rt->dst.from);
176 }
177
178 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
179 {
180         struct rt6_info *rt = (struct rt6_info *)dst;
181
182         if (rt->rt6i_flags & RTF_PCPU)
183                 return rt6_pcpu_cow_metrics(rt);
184         else if (rt->rt6i_flags & RTF_CACHE)
185                 return NULL;
186         else
187                 return dst_cow_metrics_generic(dst, old);
188 }
189
190 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
191                                              struct sk_buff *skb,
192                                              const void *daddr)
193 {
194         struct in6_addr *p = &rt->rt6i_gateway;
195
196         if (!ipv6_addr_any(p))
197                 return (const void *) p;
198         else if (skb)
199                 return &ipv6_hdr(skb)->daddr;
200         return daddr;
201 }
202
203 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
204                                           struct sk_buff *skb,
205                                           const void *daddr)
206 {
207         struct rt6_info *rt = (struct rt6_info *) dst;
208         struct neighbour *n;
209
210         daddr = choose_neigh_daddr(rt, skb, daddr);
211         n = __ipv6_neigh_lookup(dst->dev, daddr);
212         if (n)
213                 return n;
214         return neigh_create(&nd_tbl, daddr, dst->dev);
215 }
216
217 static struct dst_ops ip6_dst_ops_template = {
218         .family                 =       AF_INET6,
219         .gc                     =       ip6_dst_gc,
220         .gc_thresh              =       1024,
221         .check                  =       ip6_dst_check,
222         .default_advmss         =       ip6_default_advmss,
223         .mtu                    =       ip6_mtu,
224         .cow_metrics            =       ipv6_cow_metrics,
225         .destroy                =       ip6_dst_destroy,
226         .ifdown                 =       ip6_dst_ifdown,
227         .negative_advice        =       ip6_negative_advice,
228         .link_failure           =       ip6_link_failure,
229         .update_pmtu            =       ip6_rt_update_pmtu,
230         .redirect               =       rt6_do_redirect,
231         .local_out              =       __ip6_local_out,
232         .neigh_lookup           =       ip6_neigh_lookup,
233 };
234
235 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
236 {
237         unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
238
239         return mtu ? : dst->dev->mtu;
240 }
241
242 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
243                                          struct sk_buff *skb, u32 mtu)
244 {
245 }
246
247 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
248                                       struct sk_buff *skb)
249 {
250 }
251
252 static struct dst_ops ip6_dst_blackhole_ops = {
253         .family                 =       AF_INET6,
254         .destroy                =       ip6_dst_destroy,
255         .check                  =       ip6_dst_check,
256         .mtu                    =       ip6_blackhole_mtu,
257         .default_advmss         =       ip6_default_advmss,
258         .update_pmtu            =       ip6_rt_blackhole_update_pmtu,
259         .redirect               =       ip6_rt_blackhole_redirect,
260         .cow_metrics            =       dst_cow_metrics_generic,
261         .neigh_lookup           =       ip6_neigh_lookup,
262 };
263
264 static const u32 ip6_template_metrics[RTAX_MAX] = {
265         [RTAX_HOPLIMIT - 1] = 0,
266 };
267
268 static const struct rt6_info ip6_null_entry_template = {
269         .dst = {
270                 .__refcnt       = ATOMIC_INIT(1),
271                 .__use          = 1,
272                 .obsolete       = DST_OBSOLETE_FORCE_CHK,
273                 .error          = -ENETUNREACH,
274                 .input          = ip6_pkt_discard,
275                 .output         = ip6_pkt_discard_out,
276         },
277         .rt6i_flags     = (RTF_REJECT | RTF_NONEXTHOP),
278         .rt6i_protocol  = RTPROT_KERNEL,
279         .rt6i_metric    = ~(u32) 0,
280         .rt6i_ref       = ATOMIC_INIT(1),
281 };
282
283 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
284
285 static const struct rt6_info ip6_prohibit_entry_template = {
286         .dst = {
287                 .__refcnt       = ATOMIC_INIT(1),
288                 .__use          = 1,
289                 .obsolete       = DST_OBSOLETE_FORCE_CHK,
290                 .error          = -EACCES,
291                 .input          = ip6_pkt_prohibit,
292                 .output         = ip6_pkt_prohibit_out,
293         },
294         .rt6i_flags     = (RTF_REJECT | RTF_NONEXTHOP),
295         .rt6i_protocol  = RTPROT_KERNEL,
296         .rt6i_metric    = ~(u32) 0,
297         .rt6i_ref       = ATOMIC_INIT(1),
298 };
299
300 static const struct rt6_info ip6_blk_hole_entry_template = {
301         .dst = {
302                 .__refcnt       = ATOMIC_INIT(1),
303                 .__use          = 1,
304                 .obsolete       = DST_OBSOLETE_FORCE_CHK,
305                 .error          = -EINVAL,
306                 .input          = dst_discard,
307                 .output         = dst_discard_out,
308         },
309         .rt6i_flags     = (RTF_REJECT | RTF_NONEXTHOP),
310         .rt6i_protocol  = RTPROT_KERNEL,
311         .rt6i_metric    = ~(u32) 0,
312         .rt6i_ref       = ATOMIC_INIT(1),
313 };
314
315 #endif
316
317 static void rt6_info_init(struct rt6_info *rt)
318 {
319         struct dst_entry *dst = &rt->dst;
320
321         memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
322         INIT_LIST_HEAD(&rt->rt6i_siblings);
323         INIT_LIST_HEAD(&rt->rt6i_uncached);
324 }
325
326 /* allocate dst with ip6_dst_ops */
327 static struct rt6_info *__ip6_dst_alloc(struct net *net,
328                                         struct net_device *dev,
329                                         int flags)
330 {
331         struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
332                                         0, DST_OBSOLETE_FORCE_CHK, flags);
333
334         if (rt)
335                 rt6_info_init(rt);
336
337         return rt;
338 }
339
340 static struct rt6_info *ip6_dst_alloc(struct net *net,
341                                       struct net_device *dev,
342                                       int flags)
343 {
344         struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
345
346         if (rt) {
347                 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
348                 if (rt->rt6i_pcpu) {
349                         int cpu;
350
351                         for_each_possible_cpu(cpu) {
352                                 struct rt6_info **p;
353
354                                 p = per_cpu_ptr(rt->rt6i_pcpu, cpu);
355                                 /* no one shares rt */
356                                 *p =  NULL;
357                         }
358                 } else {
359                         dst_destroy((struct dst_entry *)rt);
360                         return NULL;
361                 }
362         }
363
364         return rt;
365 }
366
367 static void ip6_dst_destroy(struct dst_entry *dst)
368 {
369         struct rt6_info *rt = (struct rt6_info *)dst;
370         struct dst_entry *from = dst->from;
371         struct inet6_dev *idev;
372
373         dst_destroy_metrics_generic(dst);
374         free_percpu(rt->rt6i_pcpu);
375         rt6_uncached_list_del(rt);
376
377         idev = rt->rt6i_idev;
378         if (idev) {
379                 rt->rt6i_idev = NULL;
380                 in6_dev_put(idev);
381         }
382
383         dst->from = NULL;
384         dst_release(from);
385 }
386
387 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
388                            int how)
389 {
390         struct rt6_info *rt = (struct rt6_info *)dst;
391         struct inet6_dev *idev = rt->rt6i_idev;
392         struct net_device *loopback_dev =
393                 dev_net(dev)->loopback_dev;
394
395         if (dev != loopback_dev) {
396                 if (idev && idev->dev == dev) {
397                         struct inet6_dev *loopback_idev =
398                                 in6_dev_get(loopback_dev);
399                         if (loopback_idev) {
400                                 rt->rt6i_idev = loopback_idev;
401                                 in6_dev_put(idev);
402                         }
403                 }
404         }
405 }
406
407 static bool __rt6_check_expired(const struct rt6_info *rt)
408 {
409         if (rt->rt6i_flags & RTF_EXPIRES)
410                 return time_after(jiffies, rt->dst.expires);
411         else
412                 return false;
413 }
414
415 static bool rt6_check_expired(const struct rt6_info *rt)
416 {
417         if (rt->rt6i_flags & RTF_EXPIRES) {
418                 if (time_after(jiffies, rt->dst.expires))
419                         return true;
420         } else if (rt->dst.from) {
421                 return rt6_check_expired((struct rt6_info *) rt->dst.from);
422         }
423         return false;
424 }
425
426 /* Multipath route selection:
427  *   Hash based function using packet header and flowlabel.
428  * Adapted from fib_info_hashfn()
429  */
430 static int rt6_info_hash_nhsfn(unsigned int candidate_count,
431                                const struct flowi6 *fl6)
432 {
433         return get_hash_from_flowi6(fl6) % candidate_count;
434 }
435
436 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
437                                              struct flowi6 *fl6, int oif,
438                                              int strict)
439 {
440         struct rt6_info *sibling, *next_sibling;
441         int route_choosen;
442
443         route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6);
444         /* Don't change the route, if route_choosen == 0
445          * (siblings does not include ourself)
446          */
447         if (route_choosen)
448                 list_for_each_entry_safe(sibling, next_sibling,
449                                 &match->rt6i_siblings, rt6i_siblings) {
450                         route_choosen--;
451                         if (route_choosen == 0) {
452                                 if (rt6_score_route(sibling, oif, strict) < 0)
453                                         break;
454                                 match = sibling;
455                                 break;
456                         }
457                 }
458         return match;
459 }
460
461 /*
462  *      Route lookup. Any table->tb6_lock is implied.
463  */
464
465 static inline struct rt6_info *rt6_device_match(struct net *net,
466                                                     struct rt6_info *rt,
467                                                     const struct in6_addr *saddr,
468                                                     int oif,
469                                                     int flags)
470 {
471         struct rt6_info *local = NULL;
472         struct rt6_info *sprt;
473
474         if (!oif && ipv6_addr_any(saddr))
475                 goto out;
476
477         for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
478                 struct net_device *dev = sprt->dst.dev;
479
480                 if (oif) {
481                         if (dev->ifindex == oif)
482                                 return sprt;
483                         if (dev->flags & IFF_LOOPBACK) {
484                                 if (!sprt->rt6i_idev ||
485                                     sprt->rt6i_idev->dev->ifindex != oif) {
486                                         if (flags & RT6_LOOKUP_F_IFACE)
487                                                 continue;
488                                         if (local &&
489                                             local->rt6i_idev->dev->ifindex == oif)
490                                                 continue;
491                                 }
492                                 local = sprt;
493                         }
494                 } else {
495                         if (ipv6_chk_addr(net, saddr, dev,
496                                           flags & RT6_LOOKUP_F_IFACE))
497                                 return sprt;
498                 }
499         }
500
501         if (oif) {
502                 if (local)
503                         return local;
504
505                 if (flags & RT6_LOOKUP_F_IFACE)
506                         return net->ipv6.ip6_null_entry;
507         }
508 out:
509         return rt;
510 }
511
512 #ifdef CONFIG_IPV6_ROUTER_PREF
513 struct __rt6_probe_work {
514         struct work_struct work;
515         struct in6_addr target;
516         struct net_device *dev;
517 };
518
519 static void rt6_probe_deferred(struct work_struct *w)
520 {
521         struct in6_addr mcaddr;
522         struct __rt6_probe_work *work =
523                 container_of(w, struct __rt6_probe_work, work);
524
525         addrconf_addr_solict_mult(&work->target, &mcaddr);
526         ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL);
527         dev_put(work->dev);
528         kfree(work);
529 }
530
531 static void rt6_probe(struct rt6_info *rt)
532 {
533         struct __rt6_probe_work *work;
534         struct neighbour *neigh;
535         /*
536          * Okay, this does not seem to be appropriate
537          * for now, however, we need to check if it
538          * is really so; aka Router Reachability Probing.
539          *
540          * Router Reachability Probe MUST be rate-limited
541          * to no more than one per minute.
542          */
543         if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
544                 return;
545         rcu_read_lock_bh();
546         neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
547         if (neigh) {
548                 if (neigh->nud_state & NUD_VALID)
549                         goto out;
550
551                 work = NULL;
552                 write_lock(&neigh->lock);
553                 if (!(neigh->nud_state & NUD_VALID) &&
554                     time_after(jiffies,
555                                neigh->updated +
556                                rt->rt6i_idev->cnf.rtr_probe_interval)) {
557                         work = kmalloc(sizeof(*work), GFP_ATOMIC);
558                         if (work)
559                                 __neigh_set_probe_once(neigh);
560                 }
561                 write_unlock(&neigh->lock);
562         } else {
563                 work = kmalloc(sizeof(*work), GFP_ATOMIC);
564         }
565
566         if (work) {
567                 INIT_WORK(&work->work, rt6_probe_deferred);
568                 work->target = rt->rt6i_gateway;
569                 dev_hold(rt->dst.dev);
570                 work->dev = rt->dst.dev;
571                 schedule_work(&work->work);
572         }
573
574 out:
575         rcu_read_unlock_bh();
576 }
577 #else
578 static inline void rt6_probe(struct rt6_info *rt)
579 {
580 }
581 #endif
582
583 /*
584  * Default Router Selection (RFC 2461 6.3.6)
585  */
586 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
587 {
588         struct net_device *dev = rt->dst.dev;
589         if (!oif || dev->ifindex == oif)
590                 return 2;
591         if ((dev->flags & IFF_LOOPBACK) &&
592             rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
593                 return 1;
594         return 0;
595 }
596
597 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
598 {
599         struct neighbour *neigh;
600         enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
601
602         if (rt->rt6i_flags & RTF_NONEXTHOP ||
603             !(rt->rt6i_flags & RTF_GATEWAY))
604                 return RT6_NUD_SUCCEED;
605
606         rcu_read_lock_bh();
607         neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
608         if (neigh) {
609                 read_lock(&neigh->lock);
610                 if (neigh->nud_state & NUD_VALID)
611                         ret = RT6_NUD_SUCCEED;
612 #ifdef CONFIG_IPV6_ROUTER_PREF
613                 else if (!(neigh->nud_state & NUD_FAILED))
614                         ret = RT6_NUD_SUCCEED;
615                 else
616                         ret = RT6_NUD_FAIL_PROBE;
617 #endif
618                 read_unlock(&neigh->lock);
619         } else {
620                 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
621                       RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
622         }
623         rcu_read_unlock_bh();
624
625         return ret;
626 }
627
628 static int rt6_score_route(struct rt6_info *rt, int oif,
629                            int strict)
630 {
631         int m;
632
633         m = rt6_check_dev(rt, oif);
634         if (!m && (strict & RT6_LOOKUP_F_IFACE))
635                 return RT6_NUD_FAIL_HARD;
636 #ifdef CONFIG_IPV6_ROUTER_PREF
637         m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
638 #endif
639         if (strict & RT6_LOOKUP_F_REACHABLE) {
640                 int n = rt6_check_neigh(rt);
641                 if (n < 0)
642                         return n;
643         }
644         return m;
645 }
646
647 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
648                                    int *mpri, struct rt6_info *match,
649                                    bool *do_rr)
650 {
651         int m;
652         bool match_do_rr = false;
653         struct inet6_dev *idev = rt->rt6i_idev;
654         struct net_device *dev = rt->dst.dev;
655
656         if (dev && !netif_carrier_ok(dev) &&
657             idev->cnf.ignore_routes_with_linkdown)
658                 goto out;
659
660         if (rt6_check_expired(rt))
661                 goto out;
662
663         m = rt6_score_route(rt, oif, strict);
664         if (m == RT6_NUD_FAIL_DO_RR) {
665                 match_do_rr = true;
666                 m = 0; /* lowest valid score */
667         } else if (m == RT6_NUD_FAIL_HARD) {
668                 goto out;
669         }
670
671         if (strict & RT6_LOOKUP_F_REACHABLE)
672                 rt6_probe(rt);
673
674         /* note that m can be RT6_NUD_FAIL_PROBE at this point */
675         if (m > *mpri) {
676                 *do_rr = match_do_rr;
677                 *mpri = m;
678                 match = rt;
679         }
680 out:
681         return match;
682 }
683
684 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
685                                      struct rt6_info *rr_head,
686                                      u32 metric, int oif, int strict,
687                                      bool *do_rr)
688 {
689         struct rt6_info *rt, *match, *cont;
690         int mpri = -1;
691
692         match = NULL;
693         cont = NULL;
694         for (rt = rr_head; rt; rt = rt->dst.rt6_next) {
695                 if (rt->rt6i_metric != metric) {
696                         cont = rt;
697                         break;
698                 }
699
700                 match = find_match(rt, oif, strict, &mpri, match, do_rr);
701         }
702
703         for (rt = fn->leaf; rt && rt != rr_head; rt = rt->dst.rt6_next) {
704                 if (rt->rt6i_metric != metric) {
705                         cont = rt;
706                         break;
707                 }
708
709                 match = find_match(rt, oif, strict, &mpri, match, do_rr);
710         }
711
712         if (match || !cont)
713                 return match;
714
715         for (rt = cont; rt; rt = rt->dst.rt6_next)
716                 match = find_match(rt, oif, strict, &mpri, match, do_rr);
717
718         return match;
719 }
720
721 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
722 {
723         struct rt6_info *match, *rt0;
724         struct net *net;
725         bool do_rr = false;
726
727         rt0 = fn->rr_ptr;
728         if (!rt0)
729                 fn->rr_ptr = rt0 = fn->leaf;
730
731         match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
732                              &do_rr);
733
734         if (do_rr) {
735                 struct rt6_info *next = rt0->dst.rt6_next;
736
737                 /* no entries matched; do round-robin */
738                 if (!next || next->rt6i_metric != rt0->rt6i_metric)
739                         next = fn->leaf;
740
741                 if (next != rt0)
742                         fn->rr_ptr = next;
743         }
744
745         net = dev_net(rt0->dst.dev);
746         return match ? match : net->ipv6.ip6_null_entry;
747 }
748
749 static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt)
750 {
751         return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
752 }
753
754 #ifdef CONFIG_IPV6_ROUTE_INFO
755 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
756                   const struct in6_addr *gwaddr)
757 {
758         struct net *net = dev_net(dev);
759         struct route_info *rinfo = (struct route_info *) opt;
760         struct in6_addr prefix_buf, *prefix;
761         unsigned int pref;
762         unsigned long lifetime;
763         struct rt6_info *rt;
764
765         if (len < sizeof(struct route_info)) {
766                 return -EINVAL;
767         }
768
769         /* Sanity check for prefix_len and length */
770         if (rinfo->length > 3) {
771                 return -EINVAL;
772         } else if (rinfo->prefix_len > 128) {
773                 return -EINVAL;
774         } else if (rinfo->prefix_len > 64) {
775                 if (rinfo->length < 2) {
776                         return -EINVAL;
777                 }
778         } else if (rinfo->prefix_len > 0) {
779                 if (rinfo->length < 1) {
780                         return -EINVAL;
781                 }
782         }
783
784         pref = rinfo->route_pref;
785         if (pref == ICMPV6_ROUTER_PREF_INVALID)
786                 return -EINVAL;
787
788         lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
789
790         if (rinfo->length == 3)
791                 prefix = (struct in6_addr *)rinfo->prefix;
792         else {
793                 /* this function is safe */
794                 ipv6_addr_prefix(&prefix_buf,
795                                  (struct in6_addr *)rinfo->prefix,
796                                  rinfo->prefix_len);
797                 prefix = &prefix_buf;
798         }
799
800         if (rinfo->prefix_len == 0)
801                 rt = rt6_get_dflt_router(gwaddr, dev);
802         else
803                 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
804                                         gwaddr, dev->ifindex);
805
806         if (rt && !lifetime) {
807                 ip6_del_rt(rt);
808                 rt = NULL;
809         }
810
811         if (!rt && lifetime)
812                 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
813                                         pref);
814         else if (rt)
815                 rt->rt6i_flags = RTF_ROUTEINFO |
816                                  (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
817
818         if (rt) {
819                 if (!addrconf_finite_timeout(lifetime))
820                         rt6_clean_expires(rt);
821                 else
822                         rt6_set_expires(rt, jiffies + HZ * lifetime);
823
824                 ip6_rt_put(rt);
825         }
826         return 0;
827 }
828 #endif
829
830 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
831                                         struct in6_addr *saddr)
832 {
833         struct fib6_node *pn;
834         while (1) {
835                 if (fn->fn_flags & RTN_TL_ROOT)
836                         return NULL;
837                 pn = fn->parent;
838                 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn)
839                         fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr);
840                 else
841                         fn = pn;
842                 if (fn->fn_flags & RTN_RTINFO)
843                         return fn;
844         }
845 }
846
847 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
848                                              struct fib6_table *table,
849                                              struct flowi6 *fl6, int flags)
850 {
851         struct fib6_node *fn;
852         struct rt6_info *rt;
853
854         read_lock_bh(&table->tb6_lock);
855         fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
856 restart:
857         rt = fn->leaf;
858         rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
859         if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
860                 rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags);
861         if (rt == net->ipv6.ip6_null_entry) {
862                 fn = fib6_backtrack(fn, &fl6->saddr);
863                 if (fn)
864                         goto restart;
865         }
866         dst_use(&rt->dst, jiffies);
867         read_unlock_bh(&table->tb6_lock);
868         return rt;
869
870 }
871
872 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
873                                     int flags)
874 {
875         return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
876 }
877 EXPORT_SYMBOL_GPL(ip6_route_lookup);
878
879 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
880                             const struct in6_addr *saddr, int oif, int strict)
881 {
882         struct flowi6 fl6 = {
883                 .flowi6_oif = oif,
884                 .daddr = *daddr,
885         };
886         struct dst_entry *dst;
887         int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
888
889         if (saddr) {
890                 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
891                 flags |= RT6_LOOKUP_F_HAS_SADDR;
892         }
893
894         dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
895         if (dst->error == 0)
896                 return (struct rt6_info *) dst;
897
898         dst_release(dst);
899
900         return NULL;
901 }
902 EXPORT_SYMBOL(rt6_lookup);
903
904 /* ip6_ins_rt is called with FREE table->tb6_lock.
905    It takes new route entry, the addition fails by any reason the
906    route is freed. In any case, if caller does not hold it, it may
907    be destroyed.
908  */
909
910 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
911                         struct mx6_config *mxc)
912 {
913         int err;
914         struct fib6_table *table;
915
916         table = rt->rt6i_table;
917         write_lock_bh(&table->tb6_lock);
918         err = fib6_add(&table->tb6_root, rt, info, mxc);
919         write_unlock_bh(&table->tb6_lock);
920
921         return err;
922 }
923
924 int ip6_ins_rt(struct rt6_info *rt)
925 {
926         struct nl_info info = { .nl_net = dev_net(rt->dst.dev), };
927         struct mx6_config mxc = { .mx = NULL, };
928
929         return __ip6_ins_rt(rt, &info, &mxc);
930 }
931
932 static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
933                                            const struct in6_addr *daddr,
934                                            const struct in6_addr *saddr)
935 {
936         struct rt6_info *rt;
937
938         /*
939          *      Clone the route.
940          */
941
942         if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
943                 ort = (struct rt6_info *)ort->dst.from;
944
945         rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
946
947         if (!rt)
948                 return NULL;
949
950         ip6_rt_copy_init(rt, ort);
951         rt->rt6i_flags |= RTF_CACHE;
952         rt->rt6i_metric = 0;
953         rt->dst.flags |= DST_HOST;
954         rt->rt6i_dst.addr = *daddr;
955         rt->rt6i_dst.plen = 128;
956
957         if (!rt6_is_gw_or_nonexthop(ort)) {
958                 if (ort->rt6i_dst.plen != 128 &&
959                     ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
960                         rt->rt6i_flags |= RTF_ANYCAST;
961 #ifdef CONFIG_IPV6_SUBTREES
962                 if (rt->rt6i_src.plen && saddr) {
963                         rt->rt6i_src.addr = *saddr;
964                         rt->rt6i_src.plen = 128;
965                 }
966 #endif
967         }
968
969         return rt;
970 }
971
972 static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
973 {
974         struct rt6_info *pcpu_rt;
975
976         pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
977                                   rt->dst.dev, rt->dst.flags);
978
979         if (!pcpu_rt)
980                 return NULL;
981         ip6_rt_copy_init(pcpu_rt, rt);
982         pcpu_rt->rt6i_protocol = rt->rt6i_protocol;
983         pcpu_rt->rt6i_flags |= RTF_PCPU;
984         return pcpu_rt;
985 }
986
987 /* It should be called with read_lock_bh(&tb6_lock) acquired */
988 static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
989 {
990         struct rt6_info *pcpu_rt, **p;
991
992         p = this_cpu_ptr(rt->rt6i_pcpu);
993         pcpu_rt = *p;
994
995         if (pcpu_rt) {
996                 dst_hold(&pcpu_rt->dst);
997                 rt6_dst_from_metrics_check(pcpu_rt);
998         }
999         return pcpu_rt;
1000 }
1001
1002 static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
1003 {
1004         struct fib6_table *table = rt->rt6i_table;
1005         struct rt6_info *pcpu_rt, *prev, **p;
1006
1007         pcpu_rt = ip6_rt_pcpu_alloc(rt);
1008         if (!pcpu_rt) {
1009                 struct net *net = dev_net(rt->dst.dev);
1010
1011                 dst_hold(&net->ipv6.ip6_null_entry->dst);
1012                 return net->ipv6.ip6_null_entry;
1013         }
1014
1015         read_lock_bh(&table->tb6_lock);
1016         if (rt->rt6i_pcpu) {
1017                 p = this_cpu_ptr(rt->rt6i_pcpu);
1018                 prev = cmpxchg(p, NULL, pcpu_rt);
1019                 if (prev) {
1020                         /* If someone did it before us, return prev instead */
1021                         dst_destroy(&pcpu_rt->dst);
1022                         pcpu_rt = prev;
1023                 }
1024         } else {
1025                 /* rt has been removed from the fib6 tree
1026                  * before we have a chance to acquire the read_lock.
1027                  * In this case, don't brother to create a pcpu rt
1028                  * since rt is going away anyway.  The next
1029                  * dst_check() will trigger a re-lookup.
1030                  */
1031                 dst_destroy(&pcpu_rt->dst);
1032                 pcpu_rt = rt;
1033         }
1034         dst_hold(&pcpu_rt->dst);
1035         rt6_dst_from_metrics_check(pcpu_rt);
1036         read_unlock_bh(&table->tb6_lock);
1037         return pcpu_rt;
1038 }
1039
1040 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
1041                                       struct flowi6 *fl6, int flags)
1042 {
1043         struct fib6_node *fn, *saved_fn;
1044         struct rt6_info *rt;
1045         int strict = 0;
1046
1047         strict |= flags & RT6_LOOKUP_F_IFACE;
1048         if (net->ipv6.devconf_all->forwarding == 0)
1049                 strict |= RT6_LOOKUP_F_REACHABLE;
1050
1051         read_lock_bh(&table->tb6_lock);
1052
1053         fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1054         saved_fn = fn;
1055
1056         if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1057                 oif = 0;
1058
1059 redo_rt6_select:
1060         rt = rt6_select(fn, oif, strict);
1061         if (rt->rt6i_nsiblings)
1062                 rt = rt6_multipath_select(rt, fl6, oif, strict);
1063         if (rt == net->ipv6.ip6_null_entry) {
1064                 fn = fib6_backtrack(fn, &fl6->saddr);
1065                 if (fn)
1066                         goto redo_rt6_select;
1067                 else if (strict & RT6_LOOKUP_F_REACHABLE) {
1068                         /* also consider unreachable route */
1069                         strict &= ~RT6_LOOKUP_F_REACHABLE;
1070                         fn = saved_fn;
1071                         goto redo_rt6_select;
1072                 }
1073         }
1074
1075
1076         if (rt == net->ipv6.ip6_null_entry || (rt->rt6i_flags & RTF_CACHE)) {
1077                 dst_use(&rt->dst, jiffies);
1078                 read_unlock_bh(&table->tb6_lock);
1079
1080                 rt6_dst_from_metrics_check(rt);
1081                 return rt;
1082         } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1083                             !(rt->rt6i_flags & RTF_GATEWAY))) {
1084                 /* Create a RTF_CACHE clone which will not be
1085                  * owned by the fib6 tree.  It is for the special case where
1086                  * the daddr in the skb during the neighbor look-up is different
1087                  * from the fl6->daddr used to look-up route here.
1088                  */
1089
1090                 struct rt6_info *uncached_rt;
1091
1092                 dst_use(&rt->dst, jiffies);
1093                 read_unlock_bh(&table->tb6_lock);
1094
1095                 uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL);
1096                 dst_release(&rt->dst);
1097
1098                 if (uncached_rt)
1099                         rt6_uncached_list_add(uncached_rt);
1100                 else
1101                         uncached_rt = net->ipv6.ip6_null_entry;
1102
1103                 dst_hold(&uncached_rt->dst);
1104                 return uncached_rt;
1105
1106         } else {
1107                 /* Get a percpu copy */
1108
1109                 struct rt6_info *pcpu_rt;
1110
1111                 rt->dst.lastuse = jiffies;
1112                 rt->dst.__use++;
1113                 pcpu_rt = rt6_get_pcpu_route(rt);
1114
1115                 if (pcpu_rt) {
1116                         read_unlock_bh(&table->tb6_lock);
1117                 } else {
1118                         /* We have to do the read_unlock first
1119                          * because rt6_make_pcpu_route() may trigger
1120                          * ip6_dst_gc() which will take the write_lock.
1121                          */
1122                         dst_hold(&rt->dst);
1123                         read_unlock_bh(&table->tb6_lock);
1124                         pcpu_rt = rt6_make_pcpu_route(rt);
1125                         dst_release(&rt->dst);
1126                 }
1127
1128                 return pcpu_rt;
1129
1130         }
1131 }
1132
1133 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
1134                                             struct flowi6 *fl6, int flags)
1135 {
1136         return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
1137 }
1138
1139 static struct dst_entry *ip6_route_input_lookup(struct net *net,
1140                                                 struct net_device *dev,
1141                                                 struct flowi6 *fl6, int flags)
1142 {
1143         if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1144                 flags |= RT6_LOOKUP_F_IFACE;
1145
1146         return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
1147 }
1148
1149 void ip6_route_input(struct sk_buff *skb)
1150 {
1151         const struct ipv6hdr *iph = ipv6_hdr(skb);
1152         struct net *net = dev_net(skb->dev);
1153         int flags = RT6_LOOKUP_F_HAS_SADDR;
1154         struct ip_tunnel_info *tun_info;
1155         struct flowi6 fl6 = {
1156                 .flowi6_iif = l3mdev_fib_oif(skb->dev),
1157                 .daddr = iph->daddr,
1158                 .saddr = iph->saddr,
1159                 .flowlabel = ip6_flowinfo(iph),
1160                 .flowi6_mark = skb->mark,
1161                 .flowi6_proto = iph->nexthdr,
1162         };
1163
1164         tun_info = skb_tunnel_info(skb);
1165         if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1166                 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
1167         skb_dst_drop(skb);
1168         skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
1169 }
1170
1171 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
1172                                              struct flowi6 *fl6, int flags)
1173 {
1174         return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
1175 }
1176
1177 struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
1178                                          struct flowi6 *fl6, int flags)
1179 {
1180         struct dst_entry *dst;
1181         bool any_src;
1182
1183         dst = l3mdev_rt6_dst_by_oif(net, fl6);
1184         if (dst)
1185                 return dst;
1186
1187         fl6->flowi6_iif = LOOPBACK_IFINDEX;
1188
1189         any_src = ipv6_addr_any(&fl6->saddr);
1190         if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
1191             (fl6->flowi6_oif && any_src))
1192                 flags |= RT6_LOOKUP_F_IFACE;
1193
1194         if (!any_src)
1195                 flags |= RT6_LOOKUP_F_HAS_SADDR;
1196         else if (sk)
1197                 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1198
1199         return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
1200 }
1201 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
1202
1203 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1204 {
1205         struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1206         struct dst_entry *new = NULL;
1207
1208         rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
1209         if (rt) {
1210                 rt6_info_init(rt);
1211
1212                 new = &rt->dst;
1213                 new->__use = 1;
1214                 new->input = dst_discard;
1215                 new->output = dst_discard_out;
1216
1217                 dst_copy_metrics(new, &ort->dst);
1218                 rt->rt6i_idev = ort->rt6i_idev;
1219                 if (rt->rt6i_idev)
1220                         in6_dev_hold(rt->rt6i_idev);
1221
1222                 rt->rt6i_gateway = ort->rt6i_gateway;
1223                 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
1224                 rt->rt6i_metric = 0;
1225
1226                 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1227 #ifdef CONFIG_IPV6_SUBTREES
1228                 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1229 #endif
1230
1231                 dst_free(new);
1232         }
1233
1234         dst_release(dst_orig);
1235         return new ? new : ERR_PTR(-ENOMEM);
1236 }
1237
1238 /*
1239  *      Destination cache support functions
1240  */
1241
1242 static void rt6_dst_from_metrics_check(struct rt6_info *rt)
1243 {
1244         if (rt->dst.from &&
1245             dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from))
1246                 dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true);
1247 }
1248
1249 static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
1250 {
1251         if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
1252                 return NULL;
1253
1254         if (rt6_check_expired(rt))
1255                 return NULL;
1256
1257         return &rt->dst;
1258 }
1259
1260 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
1261 {
1262         if (!__rt6_check_expired(rt) &&
1263             rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1264             rt6_check((struct rt6_info *)(rt->dst.from), cookie))
1265                 return &rt->dst;
1266         else
1267                 return NULL;
1268 }
1269
1270 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1271 {
1272         struct rt6_info *rt;
1273
1274         rt = (struct rt6_info *) dst;
1275
1276         /* All IPV6 dsts are created with ->obsolete set to the value
1277          * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1278          * into this function always.
1279          */
1280
1281         rt6_dst_from_metrics_check(rt);
1282
1283         if (rt->rt6i_flags & RTF_PCPU ||
1284             (unlikely(dst->flags & DST_NOCACHE) && rt->dst.from))
1285                 return rt6_dst_from_check(rt, cookie);
1286         else
1287                 return rt6_check(rt, cookie);
1288 }
1289
1290 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
1291 {
1292         struct rt6_info *rt = (struct rt6_info *) dst;
1293
1294         if (rt) {
1295                 if (rt->rt6i_flags & RTF_CACHE) {
1296                         if (rt6_check_expired(rt)) {
1297                                 ip6_del_rt(rt);
1298                                 dst = NULL;
1299                         }
1300                 } else {
1301                         dst_release(dst);
1302                         dst = NULL;
1303                 }
1304         }
1305         return dst;
1306 }
1307
1308 static void ip6_link_failure(struct sk_buff *skb)
1309 {
1310         struct rt6_info *rt;
1311
1312         icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
1313
1314         rt = (struct rt6_info *) skb_dst(skb);
1315         if (rt) {
1316                 if (rt->rt6i_flags & RTF_CACHE) {
1317                         dst_hold(&rt->dst);
1318                         ip6_del_rt(rt);
1319                 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
1320                         rt->rt6i_node->fn_sernum = -1;
1321                 }
1322         }
1323 }
1324
1325 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
1326 {
1327         struct net *net = dev_net(rt->dst.dev);
1328
1329         rt->rt6i_flags |= RTF_MODIFIED;
1330         rt->rt6i_pmtu = mtu;
1331         rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1332 }
1333
1334 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
1335 {
1336         return !(rt->rt6i_flags & RTF_CACHE) &&
1337                 (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
1338 }
1339
1340 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
1341                                  const struct ipv6hdr *iph, u32 mtu)
1342 {
1343         struct rt6_info *rt6 = (struct rt6_info *)dst;
1344
1345         if (rt6->rt6i_flags & RTF_LOCAL)
1346                 return;
1347
1348         dst_confirm(dst);
1349         mtu = max_t(u32, mtu, IPV6_MIN_MTU);
1350         if (mtu >= dst_mtu(dst))
1351                 return;
1352
1353         if (!rt6_cache_allowed_for_pmtu(rt6)) {
1354                 rt6_do_update_pmtu(rt6, mtu);
1355         } else {
1356                 const struct in6_addr *daddr, *saddr;
1357                 struct rt6_info *nrt6;
1358
1359                 if (iph) {
1360                         daddr = &iph->daddr;
1361                         saddr = &iph->saddr;
1362                 } else if (sk) {
1363                         daddr = &sk->sk_v6_daddr;
1364                         saddr = &inet6_sk(sk)->saddr;
1365                 } else {
1366                         return;
1367                 }
1368                 nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr);
1369                 if (nrt6) {
1370                         rt6_do_update_pmtu(nrt6, mtu);
1371
1372                         /* ip6_ins_rt(nrt6) will bump the
1373                          * rt6->rt6i_node->fn_sernum
1374                          * which will fail the next rt6_check() and
1375                          * invalidate the sk->sk_dst_cache.
1376                          */
1377                         ip6_ins_rt(nrt6);
1378                 }
1379         }
1380 }
1381
1382 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1383                                struct sk_buff *skb, u32 mtu)
1384 {
1385         __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
1386 }
1387
1388 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1389                      int oif, u32 mark)
1390 {
1391         const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1392         struct dst_entry *dst;
1393         struct flowi6 fl6;
1394
1395         memset(&fl6, 0, sizeof(fl6));
1396         fl6.flowi6_oif = oif;
1397         fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
1398         fl6.daddr = iph->daddr;
1399         fl6.saddr = iph->saddr;
1400         fl6.flowlabel = ip6_flowinfo(iph);
1401
1402         dst = ip6_route_output(net, NULL, &fl6);
1403         if (!dst->error)
1404                 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
1405         dst_release(dst);
1406 }
1407 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1408
1409 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1410 {
1411         ip6_update_pmtu(skb, sock_net(sk), mtu,
1412                         sk->sk_bound_dev_if, sk->sk_mark);
1413 }
1414 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1415
1416 /* Handle redirects */
1417 struct ip6rd_flowi {
1418         struct flowi6 fl6;
1419         struct in6_addr gateway;
1420 };
1421
1422 static struct rt6_info *__ip6_route_redirect(struct net *net,
1423                                              struct fib6_table *table,
1424                                              struct flowi6 *fl6,
1425                                              int flags)
1426 {
1427         struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
1428         struct rt6_info *rt;
1429         struct fib6_node *fn;
1430
1431         /* Get the "current" route for this destination and
1432          * check if the redirect has come from approriate router.
1433          *
1434          * RFC 4861 specifies that redirects should only be
1435          * accepted if they come from the nexthop to the target.
1436          * Due to the way the routes are chosen, this notion
1437          * is a bit fuzzy and one might need to check all possible
1438          * routes.
1439          */
1440
1441         read_lock_bh(&table->tb6_lock);
1442         fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1443 restart:
1444         for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1445                 if (rt6_check_expired(rt))
1446                         continue;
1447                 if (rt->dst.error)
1448                         break;
1449                 if (!(rt->rt6i_flags & RTF_GATEWAY))
1450                         continue;
1451                 if (fl6->flowi6_oif != rt->dst.dev->ifindex)
1452                         continue;
1453                 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1454                         continue;
1455                 break;
1456         }
1457
1458         if (!rt)
1459                 rt = net->ipv6.ip6_null_entry;
1460         else if (rt->dst.error) {
1461                 rt = net->ipv6.ip6_null_entry;
1462                 goto out;
1463         }
1464
1465         if (rt == net->ipv6.ip6_null_entry) {
1466                 fn = fib6_backtrack(fn, &fl6->saddr);
1467                 if (fn)
1468                         goto restart;
1469         }
1470
1471 out:
1472         dst_hold(&rt->dst);
1473
1474         read_unlock_bh(&table->tb6_lock);
1475
1476         return rt;
1477 };
1478
1479 static struct dst_entry *ip6_route_redirect(struct net *net,
1480                                         const struct flowi6 *fl6,
1481                                         const struct in6_addr *gateway)
1482 {
1483         int flags = RT6_LOOKUP_F_HAS_SADDR;
1484         struct ip6rd_flowi rdfl;
1485
1486         rdfl.fl6 = *fl6;
1487         rdfl.gateway = *gateway;
1488
1489         return fib6_rule_lookup(net, &rdfl.fl6,
1490                                 flags, __ip6_route_redirect);
1491 }
1492
1493 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
1494 {
1495         const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1496         struct dst_entry *dst;
1497         struct flowi6 fl6;
1498
1499         memset(&fl6, 0, sizeof(fl6));
1500         fl6.flowi6_iif = LOOPBACK_IFINDEX;
1501         fl6.flowi6_oif = oif;
1502         fl6.flowi6_mark = mark;
1503         fl6.daddr = iph->daddr;
1504         fl6.saddr = iph->saddr;
1505         fl6.flowlabel = ip6_flowinfo(iph);
1506
1507         dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
1508         rt6_do_redirect(dst, NULL, skb);
1509         dst_release(dst);
1510 }
1511 EXPORT_SYMBOL_GPL(ip6_redirect);
1512
1513 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
1514                             u32 mark)
1515 {
1516         const struct ipv6hdr *iph = ipv6_hdr(skb);
1517         const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
1518         struct dst_entry *dst;
1519         struct flowi6 fl6;
1520
1521         memset(&fl6, 0, sizeof(fl6));
1522         fl6.flowi6_iif = LOOPBACK_IFINDEX;
1523         fl6.flowi6_oif = oif;
1524         fl6.flowi6_mark = mark;
1525         fl6.daddr = msg->dest;
1526         fl6.saddr = iph->daddr;
1527
1528         dst = ip6_route_redirect(net, &fl6, &iph->saddr);
1529         rt6_do_redirect(dst, NULL, skb);
1530         dst_release(dst);
1531 }
1532
1533 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1534 {
1535         ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
1536 }
1537 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
1538
1539 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1540 {
1541         struct net_device *dev = dst->dev;
1542         unsigned int mtu = dst_mtu(dst);
1543         struct net *net = dev_net(dev);
1544
1545         mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1546
1547         if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
1548                 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
1549
1550         /*
1551          * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1552          * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1553          * IPV6_MAXPLEN is also valid and means: "any MSS,
1554          * rely only on pmtu discovery"
1555          */
1556         if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1557                 mtu = IPV6_MAXPLEN;
1558         return mtu;
1559 }
1560
1561 static unsigned int ip6_mtu(const struct dst_entry *dst)
1562 {
1563         const struct rt6_info *rt = (const struct rt6_info *)dst;
1564         unsigned int mtu = rt->rt6i_pmtu;
1565         struct inet6_dev *idev;
1566
1567         if (mtu)
1568                 goto out;
1569
1570         mtu = dst_metric_raw(dst, RTAX_MTU);
1571         if (mtu)
1572                 goto out;
1573
1574         mtu = IPV6_MIN_MTU;
1575
1576         rcu_read_lock();
1577         idev = __in6_dev_get(dst->dev);
1578         if (idev)
1579                 mtu = idev->cnf.mtu6;
1580         rcu_read_unlock();
1581
1582 out:
1583         return min_t(unsigned int, mtu, IP6_MAX_MTU);
1584 }
1585
1586 static struct dst_entry *icmp6_dst_gc_list;
1587 static DEFINE_SPINLOCK(icmp6_dst_lock);
1588
1589 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1590                                   struct flowi6 *fl6)
1591 {
1592         struct dst_entry *dst;
1593         struct rt6_info *rt;
1594         struct inet6_dev *idev = in6_dev_get(dev);
1595         struct net *net = dev_net(dev);
1596
1597         if (unlikely(!idev))
1598                 return ERR_PTR(-ENODEV);
1599
1600         rt = ip6_dst_alloc(net, dev, 0);
1601         if (unlikely(!rt)) {
1602                 in6_dev_put(idev);
1603                 dst = ERR_PTR(-ENOMEM);
1604                 goto out;
1605         }
1606
1607         rt->dst.flags |= DST_HOST;
1608         rt->dst.output  = ip6_output;
1609         atomic_set(&rt->dst.__refcnt, 1);
1610         rt->rt6i_gateway  = fl6->daddr;
1611         rt->rt6i_dst.addr = fl6->daddr;
1612         rt->rt6i_dst.plen = 128;
1613         rt->rt6i_idev     = idev;
1614         dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
1615
1616         spin_lock_bh(&icmp6_dst_lock);
1617         rt->dst.next = icmp6_dst_gc_list;
1618         icmp6_dst_gc_list = &rt->dst;
1619         spin_unlock_bh(&icmp6_dst_lock);
1620
1621         fib6_force_start_gc(net);
1622
1623         dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
1624
1625 out:
1626         return dst;
1627 }
1628
1629 int icmp6_dst_gc(void)
1630 {
1631         struct dst_entry *dst, **pprev;
1632         int more = 0;
1633
1634         spin_lock_bh(&icmp6_dst_lock);
1635         pprev = &icmp6_dst_gc_list;
1636
1637         while ((dst = *pprev) != NULL) {
1638                 if (!atomic_read(&dst->__refcnt)) {
1639                         *pprev = dst->next;
1640                         dst_free(dst);
1641                 } else {
1642                         pprev = &dst->next;
1643                         ++more;
1644                 }
1645         }
1646
1647         spin_unlock_bh(&icmp6_dst_lock);
1648
1649         return more;
1650 }
1651
1652 static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1653                             void *arg)
1654 {
1655         struct dst_entry *dst, **pprev;
1656
1657         spin_lock_bh(&icmp6_dst_lock);
1658         pprev = &icmp6_dst_gc_list;
1659         while ((dst = *pprev) != NULL) {
1660                 struct rt6_info *rt = (struct rt6_info *) dst;
1661                 if (func(rt, arg)) {
1662                         *pprev = dst->next;
1663                         dst_free(dst);
1664                 } else {
1665                         pprev = &dst->next;
1666                 }
1667         }
1668         spin_unlock_bh(&icmp6_dst_lock);
1669 }
1670
1671 static int ip6_dst_gc(struct dst_ops *ops)
1672 {
1673         struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1674         int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1675         int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1676         int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1677         int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1678         unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1679         int entries;
1680
1681         entries = dst_entries_get_fast(ops);
1682         if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
1683             entries <= rt_max_size)
1684                 goto out;
1685
1686         net->ipv6.ip6_rt_gc_expire++;
1687         fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
1688         entries = dst_entries_get_slow(ops);
1689         if (entries < ops->gc_thresh)
1690                 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1691 out:
1692         net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1693         return entries > rt_max_size;
1694 }
1695
1696 static int ip6_convert_metrics(struct mx6_config *mxc,
1697                                const struct fib6_config *cfg)
1698 {
1699         bool ecn_ca = false;
1700         struct nlattr *nla;
1701         int remaining;
1702         u32 *mp;
1703
1704         if (!cfg->fc_mx)
1705                 return 0;
1706
1707         mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1708         if (unlikely(!mp))
1709                 return -ENOMEM;
1710
1711         nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1712                 int type = nla_type(nla);
1713                 u32 val;
1714
1715                 if (!type)
1716                         continue;
1717                 if (unlikely(type > RTAX_MAX))
1718                         goto err;
1719
1720                 if (type == RTAX_CC_ALGO) {
1721                         char tmp[TCP_CA_NAME_MAX];
1722
1723                         nla_strlcpy(tmp, nla, sizeof(tmp));
1724                         val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
1725                         if (val == TCP_CA_UNSPEC)
1726                                 goto err;
1727                 } else {
1728                         val = nla_get_u32(nla);
1729                 }
1730                 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
1731                         goto err;
1732
1733                 mp[type - 1] = val;
1734                 __set_bit(type - 1, mxc->mx_valid);
1735         }
1736
1737         if (ecn_ca) {
1738                 __set_bit(RTAX_FEATURES - 1, mxc->mx_valid);
1739                 mp[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
1740         }
1741
1742         mxc->mx = mp;
1743         return 0;
1744  err:
1745         kfree(mp);
1746         return -EINVAL;
1747 }
1748
1749 static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
1750 {
1751         struct net *net = cfg->fc_nlinfo.nl_net;
1752         struct rt6_info *rt = NULL;
1753         struct net_device *dev = NULL;
1754         struct inet6_dev *idev = NULL;
1755         struct fib6_table *table;
1756         int addr_type;
1757         int err = -EINVAL;
1758
1759         if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1760                 goto out;
1761 #ifndef CONFIG_IPV6_SUBTREES
1762         if (cfg->fc_src_len)
1763                 goto out;
1764 #endif
1765         if (cfg->fc_ifindex) {
1766                 err = -ENODEV;
1767                 dev = dev_get_by_index(net, cfg->fc_ifindex);
1768                 if (!dev)
1769                         goto out;
1770                 idev = in6_dev_get(dev);
1771                 if (!idev)
1772                         goto out;
1773         }
1774
1775         if (cfg->fc_metric == 0)
1776                 cfg->fc_metric = IP6_RT_PRIO_USER;
1777
1778         err = -ENOBUFS;
1779         if (cfg->fc_nlinfo.nlh &&
1780             !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
1781                 table = fib6_get_table(net, cfg->fc_table);
1782                 if (!table) {
1783                         pr_warn("NLM_F_CREATE should be specified when creating new route\n");
1784                         table = fib6_new_table(net, cfg->fc_table);
1785                 }
1786         } else {
1787                 table = fib6_new_table(net, cfg->fc_table);
1788         }
1789
1790         if (!table)
1791                 goto out;
1792
1793         rt = ip6_dst_alloc(net, NULL,
1794                            (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
1795
1796         if (!rt) {
1797                 err = -ENOMEM;
1798                 goto out;
1799         }
1800
1801         if (cfg->fc_flags & RTF_EXPIRES)
1802                 rt6_set_expires(rt, jiffies +
1803                                 clock_t_to_jiffies(cfg->fc_expires));
1804         else
1805                 rt6_clean_expires(rt);
1806
1807         if (cfg->fc_protocol == RTPROT_UNSPEC)
1808                 cfg->fc_protocol = RTPROT_BOOT;
1809         rt->rt6i_protocol = cfg->fc_protocol;
1810
1811         addr_type = ipv6_addr_type(&cfg->fc_dst);
1812
1813         if (addr_type & IPV6_ADDR_MULTICAST)
1814                 rt->dst.input = ip6_mc_input;
1815         else if (cfg->fc_flags & RTF_LOCAL)
1816                 rt->dst.input = ip6_input;
1817         else
1818                 rt->dst.input = ip6_forward;
1819
1820         rt->dst.output = ip6_output;
1821
1822         if (cfg->fc_encap) {
1823                 struct lwtunnel_state *lwtstate;
1824
1825                 err = lwtunnel_build_state(dev, cfg->fc_encap_type,
1826                                            cfg->fc_encap, AF_INET6, cfg,
1827                                            &lwtstate);
1828                 if (err)
1829                         goto out;
1830                 rt->dst.lwtstate = lwtstate_get(lwtstate);
1831                 if (lwtunnel_output_redirect(rt->dst.lwtstate)) {
1832                         rt->dst.lwtstate->orig_output = rt->dst.output;
1833                         rt->dst.output = lwtunnel_output;
1834                 }
1835                 if (lwtunnel_input_redirect(rt->dst.lwtstate)) {
1836                         rt->dst.lwtstate->orig_input = rt->dst.input;
1837                         rt->dst.input = lwtunnel_input;
1838                 }
1839         }
1840
1841         ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1842         rt->rt6i_dst.plen = cfg->fc_dst_len;
1843         if (rt->rt6i_dst.plen == 128)
1844                 rt->dst.flags |= DST_HOST;
1845
1846 #ifdef CONFIG_IPV6_SUBTREES
1847         ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1848         rt->rt6i_src.plen = cfg->fc_src_len;
1849 #endif
1850
1851         rt->rt6i_metric = cfg->fc_metric;
1852
1853         /* We cannot add true routes via loopback here,
1854            they would result in kernel looping; promote them to reject routes
1855          */
1856         if ((cfg->fc_flags & RTF_REJECT) ||
1857             (dev && (dev->flags & IFF_LOOPBACK) &&
1858              !(addr_type & IPV6_ADDR_LOOPBACK) &&
1859              !(cfg->fc_flags & RTF_LOCAL))) {
1860                 /* hold loopback dev/idev if we haven't done so. */
1861                 if (dev != net->loopback_dev) {
1862                         if (dev) {
1863                                 dev_put(dev);
1864                                 in6_dev_put(idev);
1865                         }
1866                         dev = net->loopback_dev;
1867                         dev_hold(dev);
1868                         idev = in6_dev_get(dev);
1869                         if (!idev) {
1870                                 err = -ENODEV;
1871                                 goto out;
1872                         }
1873                 }
1874                 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1875                 switch (cfg->fc_type) {
1876                 case RTN_BLACKHOLE:
1877                         rt->dst.error = -EINVAL;
1878                         rt->dst.output = dst_discard_out;
1879                         rt->dst.input = dst_discard;
1880                         break;
1881                 case RTN_PROHIBIT:
1882                         rt->dst.error = -EACCES;
1883                         rt->dst.output = ip6_pkt_prohibit_out;
1884                         rt->dst.input = ip6_pkt_prohibit;
1885                         break;
1886                 case RTN_THROW:
1887                 case RTN_UNREACHABLE:
1888                 default:
1889                         rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
1890                                         : (cfg->fc_type == RTN_UNREACHABLE)
1891                                         ? -EHOSTUNREACH : -ENETUNREACH;
1892                         rt->dst.output = ip6_pkt_discard_out;
1893                         rt->dst.input = ip6_pkt_discard;
1894                         break;
1895                 }
1896                 goto install_route;
1897         }
1898
1899         if (cfg->fc_flags & RTF_GATEWAY) {
1900                 const struct in6_addr *gw_addr;
1901                 int gwa_type;
1902
1903                 gw_addr = &cfg->fc_gateway;
1904                 gwa_type = ipv6_addr_type(gw_addr);
1905
1906                 /* if gw_addr is local we will fail to detect this in case
1907                  * address is still TENTATIVE (DAD in progress). rt6_lookup()
1908                  * will return already-added prefix route via interface that
1909                  * prefix route was assigned to, which might be non-loopback.
1910                  */
1911                 err = -EINVAL;
1912                 if (ipv6_chk_addr_and_flags(net, gw_addr,
1913                                             gwa_type & IPV6_ADDR_LINKLOCAL ?
1914                                             dev : NULL, 0, 0))
1915                         goto out;
1916
1917                 rt->rt6i_gateway = *gw_addr;
1918
1919                 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1920                         struct rt6_info *grt;
1921
1922                         /* IPv6 strictly inhibits using not link-local
1923                            addresses as nexthop address.
1924                            Otherwise, router will not able to send redirects.
1925                            It is very good, but in some (rare!) circumstances
1926                            (SIT, PtP, NBMA NOARP links) it is handy to allow
1927                            some exceptions. --ANK
1928                          */
1929                         if (!(gwa_type & IPV6_ADDR_UNICAST))
1930                                 goto out;
1931
1932                         grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1933
1934                         err = -EHOSTUNREACH;
1935                         if (!grt)
1936                                 goto out;
1937                         if (dev) {
1938                                 if (dev != grt->dst.dev) {
1939                                         ip6_rt_put(grt);
1940                                         goto out;
1941                                 }
1942                         } else {
1943                                 dev = grt->dst.dev;
1944                                 idev = grt->rt6i_idev;
1945                                 dev_hold(dev);
1946                                 in6_dev_hold(grt->rt6i_idev);
1947                         }
1948                         if (!(grt->rt6i_flags & RTF_GATEWAY))
1949                                 err = 0;
1950                         ip6_rt_put(grt);
1951
1952                         if (err)
1953                                 goto out;
1954                 }
1955                 err = -EINVAL;
1956                 if (!dev || (dev->flags & IFF_LOOPBACK))
1957                         goto out;
1958         }
1959
1960         err = -ENODEV;
1961         if (!dev)
1962                 goto out;
1963
1964         if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
1965                 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
1966                         err = -EINVAL;
1967                         goto out;
1968                 }
1969                 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
1970                 rt->rt6i_prefsrc.plen = 128;
1971         } else
1972                 rt->rt6i_prefsrc.plen = 0;
1973
1974         rt->rt6i_flags = cfg->fc_flags;
1975
1976 install_route:
1977         rt->dst.dev = dev;
1978         rt->rt6i_idev = idev;
1979         rt->rt6i_table = table;
1980
1981         cfg->fc_nlinfo.nl_net = dev_net(dev);
1982
1983         return rt;
1984 out:
1985         if (dev)
1986                 dev_put(dev);
1987         if (idev)
1988                 in6_dev_put(idev);
1989         if (rt)
1990                 dst_free(&rt->dst);
1991
1992         return ERR_PTR(err);
1993 }
1994
1995 int ip6_route_add(struct fib6_config *cfg)
1996 {
1997         struct mx6_config mxc = { .mx = NULL, };
1998         struct rt6_info *rt;
1999         int err;
2000
2001         rt = ip6_route_info_create(cfg);
2002         if (IS_ERR(rt)) {
2003                 err = PTR_ERR(rt);
2004                 rt = NULL;
2005                 goto out;
2006         }
2007
2008         err = ip6_convert_metrics(&mxc, cfg);
2009         if (err)
2010                 goto out;
2011
2012         err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc);
2013
2014         kfree(mxc.mx);
2015
2016         return err;
2017 out:
2018         if (rt)
2019                 dst_free(&rt->dst);
2020
2021         return err;
2022 }
2023
2024 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
2025 {
2026         int err;
2027         struct fib6_table *table;
2028         struct net *net = dev_net(rt->dst.dev);
2029
2030         if (rt == net->ipv6.ip6_null_entry ||
2031             rt->dst.flags & DST_NOCACHE) {
2032                 err = -ENOENT;
2033                 goto out;
2034         }
2035
2036         table = rt->rt6i_table;
2037         write_lock_bh(&table->tb6_lock);
2038         err = fib6_del(rt, info);
2039         write_unlock_bh(&table->tb6_lock);
2040
2041 out:
2042         ip6_rt_put(rt);
2043         return err;
2044 }
2045
2046 int ip6_del_rt(struct rt6_info *rt)
2047 {
2048         struct nl_info info = {
2049                 .nl_net = dev_net(rt->dst.dev),
2050         };
2051         return __ip6_del_rt(rt, &info);
2052 }
2053
2054 static int ip6_route_del(struct fib6_config *cfg)
2055 {
2056         struct fib6_table *table;
2057         struct fib6_node *fn;
2058         struct rt6_info *rt;
2059         int err = -ESRCH;
2060
2061         table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
2062         if (!table)
2063                 return err;
2064
2065         read_lock_bh(&table->tb6_lock);
2066
2067         fn = fib6_locate(&table->tb6_root,
2068                          &cfg->fc_dst, cfg->fc_dst_len,
2069                          &cfg->fc_src, cfg->fc_src_len);
2070
2071         if (fn) {
2072                 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2073                         if ((rt->rt6i_flags & RTF_CACHE) &&
2074                             !(cfg->fc_flags & RTF_CACHE))
2075                                 continue;
2076                         if (cfg->fc_ifindex &&
2077                             (!rt->dst.dev ||
2078                              rt->dst.dev->ifindex != cfg->fc_ifindex))
2079                                 continue;
2080                         if (cfg->fc_flags & RTF_GATEWAY &&
2081                             !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
2082                                 continue;
2083                         if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
2084                                 continue;
2085                         dst_hold(&rt->dst);
2086                         read_unlock_bh(&table->tb6_lock);
2087
2088                         return __ip6_del_rt(rt, &cfg->fc_nlinfo);
2089                 }
2090         }
2091         read_unlock_bh(&table->tb6_lock);
2092
2093         return err;
2094 }
2095
2096 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
2097 {
2098         struct netevent_redirect netevent;
2099         struct rt6_info *rt, *nrt = NULL;
2100         struct ndisc_options ndopts;
2101         struct inet6_dev *in6_dev;
2102         struct neighbour *neigh;
2103         struct rd_msg *msg;
2104         int optlen, on_link;
2105         u8 *lladdr;
2106
2107         optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
2108         optlen -= sizeof(*msg);
2109
2110         if (optlen < 0) {
2111                 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
2112                 return;
2113         }
2114
2115         msg = (struct rd_msg *)icmp6_hdr(skb);
2116
2117         if (ipv6_addr_is_multicast(&msg->dest)) {
2118                 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
2119                 return;
2120         }
2121
2122         on_link = 0;
2123         if (ipv6_addr_equal(&msg->dest, &msg->target)) {
2124                 on_link = 1;
2125         } else if (ipv6_addr_type(&msg->target) !=
2126                    (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
2127                 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
2128                 return;
2129         }
2130
2131         in6_dev = __in6_dev_get(skb->dev);
2132         if (!in6_dev)
2133                 return;
2134         if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
2135                 return;
2136
2137         /* RFC2461 8.1:
2138          *      The IP source address of the Redirect MUST be the same as the current
2139          *      first-hop router for the specified ICMP Destination Address.
2140          */
2141
2142         if (!ndisc_parse_options(msg->opt, optlen, &ndopts)) {
2143                 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
2144                 return;
2145         }
2146
2147         lladdr = NULL;
2148         if (ndopts.nd_opts_tgt_lladdr) {
2149                 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
2150                                              skb->dev);
2151                 if (!lladdr) {
2152                         net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
2153                         return;
2154                 }
2155         }
2156
2157         rt = (struct rt6_info *) dst;
2158         if (rt->rt6i_flags & RTF_REJECT) {
2159                 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
2160                 return;
2161         }
2162
2163         /* Redirect received -> path was valid.
2164          * Look, redirects are sent only in response to data packets,
2165          * so that this nexthop apparently is reachable. --ANK
2166          */
2167         dst_confirm(&rt->dst);
2168
2169         neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
2170         if (!neigh)
2171                 return;
2172
2173         /*
2174          *      We have finally decided to accept it.
2175          */
2176
2177         neigh_update(neigh, lladdr, NUD_STALE,
2178                      NEIGH_UPDATE_F_WEAK_OVERRIDE|
2179                      NEIGH_UPDATE_F_OVERRIDE|
2180                      (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
2181                                      NEIGH_UPDATE_F_ISROUTER))
2182                      );
2183
2184         nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL);
2185         if (!nrt)
2186                 goto out;
2187
2188         nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
2189         if (on_link)
2190                 nrt->rt6i_flags &= ~RTF_GATEWAY;
2191
2192         nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
2193
2194         if (ip6_ins_rt(nrt))
2195                 goto out;
2196
2197         netevent.old = &rt->dst;
2198         netevent.new = &nrt->dst;
2199         netevent.daddr = &msg->dest;
2200         netevent.neigh = neigh;
2201         call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
2202
2203         if (rt->rt6i_flags & RTF_CACHE) {
2204                 rt = (struct rt6_info *) dst_clone(&rt->dst);
2205                 ip6_del_rt(rt);
2206         }
2207
2208 out:
2209         neigh_release(neigh);
2210 }
2211
2212 /*
2213  *      Misc support functions
2214  */
2215
2216 static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
2217 {
2218         BUG_ON(from->dst.from);
2219
2220         rt->rt6i_flags &= ~RTF_EXPIRES;
2221         dst_hold(&from->dst);
2222         rt->dst.from = &from->dst;
2223         dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true);
2224 }
2225
2226 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
2227 {
2228         rt->dst.input = ort->dst.input;
2229         rt->dst.output = ort->dst.output;
2230         rt->rt6i_dst = ort->rt6i_dst;
2231         rt->dst.error = ort->dst.error;
2232         rt->rt6i_idev = ort->rt6i_idev;
2233         if (rt->rt6i_idev)
2234                 in6_dev_hold(rt->rt6i_idev);
2235         rt->dst.lastuse = jiffies;
2236         rt->rt6i_gateway = ort->rt6i_gateway;
2237         rt->rt6i_flags = ort->rt6i_flags;
2238         rt6_set_from(rt, ort);
2239         rt->rt6i_metric = ort->rt6i_metric;
2240 #ifdef CONFIG_IPV6_SUBTREES
2241         rt->rt6i_src = ort->rt6i_src;
2242 #endif
2243         rt->rt6i_prefsrc = ort->rt6i_prefsrc;
2244         rt->rt6i_table = ort->rt6i_table;
2245         rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
2246 }
2247
2248 #ifdef CONFIG_IPV6_ROUTE_INFO
2249 static struct rt6_info *rt6_get_route_info(struct net *net,
2250                                            const struct in6_addr *prefix, int prefixlen,
2251                                            const struct in6_addr *gwaddr, int ifindex)
2252 {
2253         struct fib6_node *fn;
2254         struct rt6_info *rt = NULL;
2255         struct fib6_table *table;
2256
2257         table = fib6_get_table(net, RT6_TABLE_INFO);
2258         if (!table)
2259                 return NULL;
2260
2261         read_lock_bh(&table->tb6_lock);
2262         fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0);
2263         if (!fn)
2264                 goto out;
2265
2266         for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2267                 if (rt->dst.dev->ifindex != ifindex)
2268                         continue;
2269                 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
2270                         continue;
2271                 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
2272                         continue;
2273                 dst_hold(&rt->dst);
2274                 break;
2275         }
2276 out:
2277         read_unlock_bh(&table->tb6_lock);
2278         return rt;
2279 }
2280
2281 static struct rt6_info *rt6_add_route_info(struct net *net,
2282                                            const struct in6_addr *prefix, int prefixlen,
2283                                            const struct in6_addr *gwaddr, int ifindex,
2284                                            unsigned int pref)
2285 {
2286         struct fib6_config cfg = {
2287                 .fc_metric      = IP6_RT_PRIO_USER,
2288                 .fc_ifindex     = ifindex,
2289                 .fc_dst_len     = prefixlen,
2290                 .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
2291                                   RTF_UP | RTF_PREF(pref),
2292                 .fc_nlinfo.portid = 0,
2293                 .fc_nlinfo.nlh = NULL,
2294                 .fc_nlinfo.nl_net = net,
2295         };
2296
2297         cfg.fc_table = l3mdev_fib_table_by_index(net, ifindex) ? : RT6_TABLE_INFO;
2298         cfg.fc_dst = *prefix;
2299         cfg.fc_gateway = *gwaddr;
2300
2301         /* We should treat it as a default route if prefix length is 0. */
2302         if (!prefixlen)
2303                 cfg.fc_flags |= RTF_DEFAULT;
2304
2305         ip6_route_add(&cfg);
2306
2307         return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
2308 }
2309 #endif
2310
2311 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
2312 {
2313         struct rt6_info *rt;
2314         struct fib6_table *table;
2315
2316         table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
2317         if (!table)
2318                 return NULL;
2319
2320         read_lock_bh(&table->tb6_lock);
2321         for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2322                 if (dev == rt->dst.dev &&
2323                     ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
2324                     ipv6_addr_equal(&rt->rt6i_gateway, addr))
2325                         break;
2326         }
2327         if (rt)
2328                 dst_hold(&rt->dst);
2329         read_unlock_bh(&table->tb6_lock);
2330         return rt;
2331 }
2332
2333 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
2334                                      struct net_device *dev,
2335                                      unsigned int pref)
2336 {
2337         struct fib6_config cfg = {
2338                 .fc_table       = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
2339                 .fc_metric      = IP6_RT_PRIO_USER,
2340                 .fc_ifindex     = dev->ifindex,
2341                 .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
2342                                   RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
2343                 .fc_nlinfo.portid = 0,
2344                 .fc_nlinfo.nlh = NULL,
2345                 .fc_nlinfo.nl_net = dev_net(dev),
2346         };
2347
2348         cfg.fc_gateway = *gwaddr;
2349
2350         ip6_route_add(&cfg);
2351
2352         return rt6_get_dflt_router(gwaddr, dev);
2353 }
2354
2355 void rt6_purge_dflt_routers(struct net *net)
2356 {
2357         struct rt6_info *rt;
2358         struct fib6_table *table;
2359
2360         /* NOTE: Keep consistent with rt6_get_dflt_router */
2361         table = fib6_get_table(net, RT6_TABLE_DFLT);
2362         if (!table)
2363                 return;
2364
2365 restart:
2366         read_lock_bh(&table->tb6_lock);
2367         for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2368                 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
2369                     (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
2370                         dst_hold(&rt->dst);
2371                         read_unlock_bh(&table->tb6_lock);
2372                         ip6_del_rt(rt);
2373                         goto restart;
2374                 }
2375         }
2376         read_unlock_bh(&table->tb6_lock);
2377 }
2378
2379 static void rtmsg_to_fib6_config(struct net *net,
2380                                  struct in6_rtmsg *rtmsg,
2381                                  struct fib6_config *cfg)
2382 {
2383         memset(cfg, 0, sizeof(*cfg));
2384
2385         cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
2386                          : RT6_TABLE_MAIN;
2387         cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
2388         cfg->fc_metric = rtmsg->rtmsg_metric;
2389         cfg->fc_expires = rtmsg->rtmsg_info;
2390         cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
2391         cfg->fc_src_len = rtmsg->rtmsg_src_len;
2392         cfg->fc_flags = rtmsg->rtmsg_flags;
2393
2394         cfg->fc_nlinfo.nl_net = net;
2395
2396         cfg->fc_dst = rtmsg->rtmsg_dst;
2397         cfg->fc_src = rtmsg->rtmsg_src;
2398         cfg->fc_gateway = rtmsg->rtmsg_gateway;
2399 }
2400
2401 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
2402 {
2403         struct fib6_config cfg;
2404         struct in6_rtmsg rtmsg;
2405         int err;
2406
2407         switch (cmd) {
2408         case SIOCADDRT:         /* Add a route */
2409         case SIOCDELRT:         /* Delete a route */
2410                 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2411                         return -EPERM;
2412                 err = copy_from_user(&rtmsg, arg,
2413                                      sizeof(struct in6_rtmsg));
2414                 if (err)
2415                         return -EFAULT;
2416
2417                 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
2418
2419                 rtnl_lock();
2420                 switch (cmd) {
2421                 case SIOCADDRT:
2422                         err = ip6_route_add(&cfg);
2423                         break;
2424                 case SIOCDELRT:
2425                         err = ip6_route_del(&cfg);
2426                         break;
2427                 default:
2428                         err = -EINVAL;
2429                 }
2430                 rtnl_unlock();
2431
2432                 return err;
2433         }
2434
2435         return -EINVAL;
2436 }
2437
2438 /*
2439  *      Drop the packet on the floor
2440  */
2441
2442 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
2443 {
2444         int type;
2445         struct dst_entry *dst = skb_dst(skb);
2446         switch (ipstats_mib_noroutes) {
2447         case IPSTATS_MIB_INNOROUTES:
2448                 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
2449                 if (type == IPV6_ADDR_ANY) {
2450                         IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2451                                       IPSTATS_MIB_INADDRERRORS);
2452                         break;
2453                 }
2454                 /* FALLTHROUGH */
2455         case IPSTATS_MIB_OUTNOROUTES:
2456                 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2457                               ipstats_mib_noroutes);
2458                 break;
2459         }
2460         icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
2461         kfree_skb(skb);
2462         return 0;
2463 }
2464
2465 static int ip6_pkt_discard(struct sk_buff *skb)
2466 {
2467         return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
2468 }
2469
2470 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
2471 {
2472         skb->dev = skb_dst(skb)->dev;
2473         return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2474 }
2475
2476 static int ip6_pkt_prohibit(struct sk_buff *skb)
2477 {
2478         return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2479 }
2480
2481 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
2482 {
2483         skb->dev = skb_dst(skb)->dev;
2484         return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2485 }
2486
2487 /*
2488  *      Allocate a dst for local (unicast / anycast) address.
2489  */
2490
2491 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2492                                     const struct in6_addr *addr,
2493                                     bool anycast)
2494 {
2495         u32 tb_id;
2496         struct net *net = dev_net(idev->dev);
2497         struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
2498                                             DST_NOCOUNT);
2499         if (!rt)
2500                 return ERR_PTR(-ENOMEM);
2501
2502         in6_dev_hold(idev);
2503
2504         rt->dst.flags |= DST_HOST;
2505         rt->dst.input = ip6_input;
2506         rt->dst.output = ip6_output;
2507         rt->rt6i_idev = idev;
2508
2509         rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2510         if (anycast)
2511                 rt->rt6i_flags |= RTF_ANYCAST;
2512         else
2513                 rt->rt6i_flags |= RTF_LOCAL;
2514
2515         rt->rt6i_gateway  = *addr;
2516         rt->rt6i_dst.addr = *addr;
2517         rt->rt6i_dst.plen = 128;
2518         tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
2519         rt->rt6i_table = fib6_get_table(net, tb_id);
2520         rt->dst.flags |= DST_NOCACHE;
2521
2522         atomic_set(&rt->dst.__refcnt, 1);
2523
2524         return rt;
2525 }
2526
2527 int ip6_route_get_saddr(struct net *net,
2528                         struct rt6_info *rt,
2529                         const struct in6_addr *daddr,
2530                         unsigned int prefs,
2531                         struct in6_addr *saddr)
2532 {
2533         struct inet6_dev *idev =
2534                 rt ? ip6_dst_idev((struct dst_entry *)rt) : NULL;
2535         int err = 0;
2536         if (rt && rt->rt6i_prefsrc.plen)
2537                 *saddr = rt->rt6i_prefsrc.addr;
2538         else
2539                 err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2540                                          daddr, prefs, saddr);
2541         return err;
2542 }
2543
2544 /* remove deleted ip from prefsrc entries */
2545 struct arg_dev_net_ip {
2546         struct net_device *dev;
2547         struct net *net;
2548         struct in6_addr *addr;
2549 };
2550
2551 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2552 {
2553         struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2554         struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2555         struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2556
2557         if (((void *)rt->dst.dev == dev || !dev) &&
2558             rt != net->ipv6.ip6_null_entry &&
2559             ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2560                 /* remove prefsrc entry */
2561                 rt->rt6i_prefsrc.plen = 0;
2562         }
2563         return 0;
2564 }
2565
2566 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2567 {
2568         struct net *net = dev_net(ifp->idev->dev);
2569         struct arg_dev_net_ip adni = {
2570                 .dev = ifp->idev->dev,
2571                 .net = net,
2572                 .addr = &ifp->addr,
2573         };
2574         fib6_clean_all(net, fib6_remove_prefsrc, &adni);
2575 }
2576
2577 #define RTF_RA_ROUTER           (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
2578 #define RTF_CACHE_GATEWAY       (RTF_GATEWAY | RTF_CACHE)
2579
2580 /* Remove routers and update dst entries when gateway turn into host. */
2581 static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
2582 {
2583         struct in6_addr *gateway = (struct in6_addr *)arg;
2584
2585         if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
2586              ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
2587              ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
2588                 return -1;
2589         }
2590         return 0;
2591 }
2592
2593 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
2594 {
2595         fib6_clean_all(net, fib6_clean_tohost, gateway);
2596 }
2597
2598 struct arg_dev_net {
2599         struct net_device *dev;
2600         struct net *net;
2601 };
2602
2603 static int fib6_ifdown(struct rt6_info *rt, void *arg)
2604 {
2605         const struct arg_dev_net *adn = arg;
2606         const struct net_device *dev = adn->dev;
2607
2608         if ((rt->dst.dev == dev || !dev) &&
2609             rt != adn->net->ipv6.ip6_null_entry)
2610                 return -1;
2611
2612         return 0;
2613 }
2614
2615 void rt6_ifdown(struct net *net, struct net_device *dev)
2616 {
2617         struct arg_dev_net adn = {
2618                 .dev = dev,
2619                 .net = net,
2620         };
2621
2622         fib6_clean_all(net, fib6_ifdown, &adn);
2623         icmp6_clean_all(fib6_ifdown, &adn);
2624         if (dev)
2625                 rt6_uncached_list_flush_dev(net, dev);
2626 }
2627
2628 struct rt6_mtu_change_arg {
2629         struct net_device *dev;
2630         unsigned int mtu;
2631 };
2632
2633 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2634 {
2635         struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2636         struct inet6_dev *idev;
2637
2638         /* In IPv6 pmtu discovery is not optional,
2639            so that RTAX_MTU lock cannot disable it.
2640            We still use this lock to block changes
2641            caused by addrconf/ndisc.
2642         */
2643
2644         idev = __in6_dev_get(arg->dev);
2645         if (!idev)
2646                 return 0;
2647
2648         /* For administrative MTU increase, there is no way to discover
2649            IPv6 PMTU increase, so PMTU increase should be updated here.
2650            Since RFC 1981 doesn't include administrative MTU increase
2651            update PMTU increase is a MUST. (i.e. jumbo frame)
2652          */
2653         /*
2654            If new MTU is less than route PMTU, this new MTU will be the
2655            lowest MTU in the path, update the route PMTU to reflect PMTU
2656            decreases; if new MTU is greater than route PMTU, and the
2657            old MTU is the lowest MTU in the path, update the route PMTU
2658            to reflect the increase. In this case if the other nodes' MTU
2659            also have the lowest MTU, TOO BIG MESSAGE will be lead to
2660            PMTU discouvery.
2661          */
2662         if (rt->dst.dev == arg->dev &&
2663             !dst_metric_locked(&rt->dst, RTAX_MTU)) {
2664                 if (rt->rt6i_flags & RTF_CACHE) {
2665                         /* For RTF_CACHE with rt6i_pmtu == 0
2666                          * (i.e. a redirected route),
2667                          * the metrics of its rt->dst.from has already
2668                          * been updated.
2669                          */
2670                         if (rt->rt6i_pmtu && rt->rt6i_pmtu > arg->mtu)
2671                                 rt->rt6i_pmtu = arg->mtu;
2672                 } else if (dst_mtu(&rt->dst) >= arg->mtu ||
2673                            (dst_mtu(&rt->dst) < arg->mtu &&
2674                             dst_mtu(&rt->dst) == idev->cnf.mtu6)) {
2675                         dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2676                 }
2677         }
2678         return 0;
2679 }
2680
2681 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2682 {
2683         struct rt6_mtu_change_arg arg = {
2684                 .dev = dev,
2685                 .mtu = mtu,
2686         };
2687
2688         fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
2689 }
2690
2691 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2692         [RTA_GATEWAY]           = { .len = sizeof(struct in6_addr) },
2693         [RTA_OIF]               = { .type = NLA_U32 },
2694         [RTA_IIF]               = { .type = NLA_U32 },
2695         [RTA_PRIORITY]          = { .type = NLA_U32 },
2696         [RTA_METRICS]           = { .type = NLA_NESTED },
2697         [RTA_MULTIPATH]         = { .len = sizeof(struct rtnexthop) },
2698         [RTA_PREF]              = { .type = NLA_U8 },
2699         [RTA_ENCAP_TYPE]        = { .type = NLA_U16 },
2700         [RTA_ENCAP]             = { .type = NLA_NESTED },
2701 };
2702
2703 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2704                               struct fib6_config *cfg)
2705 {
2706         struct rtmsg *rtm;
2707         struct nlattr *tb[RTA_MAX+1];
2708         unsigned int pref;
2709         int err;
2710
2711         err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2712         if (err < 0)
2713                 goto errout;
2714
2715         err = -EINVAL;
2716         rtm = nlmsg_data(nlh);
2717         memset(cfg, 0, sizeof(*cfg));
2718
2719         cfg->fc_table = rtm->rtm_table;
2720         cfg->fc_dst_len = rtm->rtm_dst_len;
2721         cfg->fc_src_len = rtm->rtm_src_len;
2722         cfg->fc_flags = RTF_UP;
2723         cfg->fc_protocol = rtm->rtm_protocol;
2724         cfg->fc_type = rtm->rtm_type;
2725
2726         if (rtm->rtm_type == RTN_UNREACHABLE ||
2727             rtm->rtm_type == RTN_BLACKHOLE ||
2728             rtm->rtm_type == RTN_PROHIBIT ||
2729             rtm->rtm_type == RTN_THROW)
2730                 cfg->fc_flags |= RTF_REJECT;
2731
2732         if (rtm->rtm_type == RTN_LOCAL)
2733                 cfg->fc_flags |= RTF_LOCAL;
2734
2735         if (rtm->rtm_flags & RTM_F_CLONED)
2736                 cfg->fc_flags |= RTF_CACHE;
2737
2738         cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
2739         cfg->fc_nlinfo.nlh = nlh;
2740         cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2741
2742         if (tb[RTA_GATEWAY]) {
2743                 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
2744                 cfg->fc_flags |= RTF_GATEWAY;
2745         }
2746
2747         if (tb[RTA_DST]) {
2748                 int plen = (rtm->rtm_dst_len + 7) >> 3;
2749
2750                 if (nla_len(tb[RTA_DST]) < plen)
2751                         goto errout;
2752
2753                 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2754         }
2755
2756         if (tb[RTA_SRC]) {
2757                 int plen = (rtm->rtm_src_len + 7) >> 3;
2758
2759                 if (nla_len(tb[RTA_SRC]) < plen)
2760                         goto errout;
2761
2762                 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2763         }
2764
2765         if (tb[RTA_PREFSRC])
2766                 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
2767
2768         if (tb[RTA_OIF])
2769                 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2770
2771         if (tb[RTA_PRIORITY])
2772                 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2773
2774         if (tb[RTA_METRICS]) {
2775                 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2776                 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2777         }
2778
2779         if (tb[RTA_TABLE])
2780                 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2781
2782         if (tb[RTA_MULTIPATH]) {
2783                 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2784                 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2785         }
2786
2787         if (tb[RTA_PREF]) {
2788                 pref = nla_get_u8(tb[RTA_PREF]);
2789                 if (pref != ICMPV6_ROUTER_PREF_LOW &&
2790                     pref != ICMPV6_ROUTER_PREF_HIGH)
2791                         pref = ICMPV6_ROUTER_PREF_MEDIUM;
2792                 cfg->fc_flags |= RTF_PREF(pref);
2793         }
2794
2795         if (tb[RTA_ENCAP])
2796                 cfg->fc_encap = tb[RTA_ENCAP];
2797
2798         if (tb[RTA_ENCAP_TYPE])
2799                 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
2800
2801         err = 0;
2802 errout:
2803         return err;
2804 }
2805
2806 struct rt6_nh {
2807         struct rt6_info *rt6_info;
2808         struct fib6_config r_cfg;
2809         struct mx6_config mxc;
2810         struct list_head next;
2811 };
2812
2813 static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
2814 {
2815         struct rt6_nh *nh;
2816
2817         list_for_each_entry(nh, rt6_nh_list, next) {
2818                 pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6 nexthop %pI6 ifi %d\n",
2819                         &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
2820                         nh->r_cfg.fc_ifindex);
2821         }
2822 }
2823
2824 static int ip6_route_info_append(struct list_head *rt6_nh_list,
2825                                  struct rt6_info *rt, struct fib6_config *r_cfg)
2826 {
2827         struct rt6_nh *nh;
2828         struct rt6_info *rtnh;
2829         int err = -EEXIST;
2830
2831         list_for_each_entry(nh, rt6_nh_list, next) {
2832                 /* check if rt6_info already exists */
2833                 rtnh = nh->rt6_info;
2834
2835                 if (rtnh->dst.dev == rt->dst.dev &&
2836                     rtnh->rt6i_idev == rt->rt6i_idev &&
2837                     ipv6_addr_equal(&rtnh->rt6i_gateway,
2838                                     &rt->rt6i_gateway))
2839                         return err;
2840         }
2841
2842         nh = kzalloc(sizeof(*nh), GFP_KERNEL);
2843         if (!nh)
2844                 return -ENOMEM;
2845         nh->rt6_info = rt;
2846         err = ip6_convert_metrics(&nh->mxc, r_cfg);
2847         if (err) {
2848                 kfree(nh);
2849                 return err;
2850         }
2851         memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
2852         list_add_tail(&nh->next, rt6_nh_list);
2853
2854         return 0;
2855 }
2856
2857 static int ip6_route_multipath_add(struct fib6_config *cfg)
2858 {
2859         struct fib6_config r_cfg;
2860         struct rtnexthop *rtnh;
2861         struct rt6_info *rt;
2862         struct rt6_nh *err_nh;
2863         struct rt6_nh *nh, *nh_safe;
2864         int remaining;
2865         int attrlen;
2866         int err = 1;
2867         int nhn = 0;
2868         int replace = (cfg->fc_nlinfo.nlh &&
2869                        (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
2870         LIST_HEAD(rt6_nh_list);
2871
2872         remaining = cfg->fc_mp_len;
2873         rtnh = (struct rtnexthop *)cfg->fc_mp;
2874
2875         /* Parse a Multipath Entry and build a list (rt6_nh_list) of
2876          * rt6_info structs per nexthop
2877          */
2878         while (rtnh_ok(rtnh, remaining)) {
2879                 memcpy(&r_cfg, cfg, sizeof(*cfg));
2880                 if (rtnh->rtnh_ifindex)
2881                         r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
2882
2883                 attrlen = rtnh_attrlen(rtnh);
2884                 if (attrlen > 0) {
2885                         struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
2886
2887                         nla = nla_find(attrs, attrlen, RTA_GATEWAY);
2888                         if (nla) {
2889                                 r_cfg.fc_gateway = nla_get_in6_addr(nla);
2890                                 r_cfg.fc_flags |= RTF_GATEWAY;
2891                         }
2892                         r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
2893                         nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
2894                         if (nla)
2895                                 r_cfg.fc_encap_type = nla_get_u16(nla);
2896                 }
2897
2898                 rt = ip6_route_info_create(&r_cfg);
2899                 if (IS_ERR(rt)) {
2900                         err = PTR_ERR(rt);
2901                         rt = NULL;
2902                         goto cleanup;
2903                 }
2904
2905                 err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
2906                 if (err) {
2907                         dst_free(&rt->dst);
2908                         goto cleanup;
2909                 }
2910
2911                 rtnh = rtnh_next(rtnh, &remaining);
2912         }
2913
2914         err_nh = NULL;
2915         list_for_each_entry(nh, &rt6_nh_list, next) {
2916                 err = __ip6_ins_rt(nh->rt6_info, &cfg->fc_nlinfo, &nh->mxc);
2917                 /* nh->rt6_info is used or freed at this point, reset to NULL*/
2918                 nh->rt6_info = NULL;
2919                 if (err) {
2920                         if (replace && nhn)
2921                                 ip6_print_replace_route_err(&rt6_nh_list);
2922                         err_nh = nh;
2923                         goto add_errout;
2924                 }
2925
2926                 /* Because each route is added like a single route we remove
2927                  * these flags after the first nexthop: if there is a collision,
2928                  * we have already failed to add the first nexthop:
2929                  * fib6_add_rt2node() has rejected it; when replacing, old
2930                  * nexthops have been replaced by first new, the rest should
2931                  * be added to it.
2932                  */
2933                 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
2934                                                      NLM_F_REPLACE);
2935                 nhn++;
2936         }
2937
2938         goto cleanup;
2939
2940 add_errout:
2941         /* Delete routes that were already added */
2942         list_for_each_entry(nh, &rt6_nh_list, next) {
2943                 if (err_nh == nh)
2944                         break;
2945                 ip6_route_del(&nh->r_cfg);
2946         }
2947
2948 cleanup:
2949         list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
2950                 if (nh->rt6_info)
2951                         dst_free(&nh->rt6_info->dst);
2952                 kfree(nh->mxc.mx);
2953                 list_del(&nh->next);
2954                 kfree(nh);
2955         }
2956
2957         return err;
2958 }
2959
2960 static int ip6_route_multipath_del(struct fib6_config *cfg)
2961 {
2962         struct fib6_config r_cfg;
2963         struct rtnexthop *rtnh;
2964         int remaining;
2965         int attrlen;
2966         int err = 1, last_err = 0;
2967
2968         remaining = cfg->fc_mp_len;
2969         rtnh = (struct rtnexthop *)cfg->fc_mp;
2970
2971         /* Parse a Multipath Entry */
2972         while (rtnh_ok(rtnh, remaining)) {
2973                 memcpy(&r_cfg, cfg, sizeof(*cfg));
2974                 if (rtnh->rtnh_ifindex)
2975                         r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
2976
2977                 attrlen = rtnh_attrlen(rtnh);
2978                 if (attrlen > 0) {
2979                         struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
2980
2981                         nla = nla_find(attrs, attrlen, RTA_GATEWAY);
2982                         if (nla) {
2983                                 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
2984                                 r_cfg.fc_flags |= RTF_GATEWAY;
2985                         }
2986                 }
2987                 err = ip6_route_del(&r_cfg);
2988                 if (err)
2989                         last_err = err;
2990
2991                 rtnh = rtnh_next(rtnh, &remaining);
2992         }
2993
2994         return last_err;
2995 }
2996
2997 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
2998 {
2999         struct fib6_config cfg;
3000         int err;
3001
3002         err = rtm_to_fib6_config(skb, nlh, &cfg);
3003         if (err < 0)
3004                 return err;
3005
3006         if (cfg.fc_mp)
3007                 return ip6_route_multipath_del(&cfg);
3008         else
3009                 return ip6_route_del(&cfg);
3010 }
3011
3012 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
3013 {
3014         struct fib6_config cfg;
3015         int err;
3016
3017         err = rtm_to_fib6_config(skb, nlh, &cfg);
3018         if (err < 0)
3019                 return err;
3020
3021         if (cfg.fc_mp)
3022                 return ip6_route_multipath_add(&cfg);
3023         else
3024                 return ip6_route_add(&cfg);
3025 }
3026
3027 static inline size_t rt6_nlmsg_size(struct rt6_info *rt)
3028 {
3029         return NLMSG_ALIGN(sizeof(struct rtmsg))
3030                + nla_total_size(16) /* RTA_SRC */
3031                + nla_total_size(16) /* RTA_DST */
3032                + nla_total_size(16) /* RTA_GATEWAY */
3033                + nla_total_size(16) /* RTA_PREFSRC */
3034                + nla_total_size(4) /* RTA_TABLE */
3035                + nla_total_size(4) /* RTA_IIF */
3036                + nla_total_size(4) /* RTA_OIF */
3037                + nla_total_size(4) /* RTA_PRIORITY */
3038                + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
3039                + nla_total_size(sizeof(struct rta_cacheinfo))
3040                + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
3041                + nla_total_size(1) /* RTA_PREF */
3042                + lwtunnel_get_encap_size(rt->dst.lwtstate);
3043 }
3044
3045 static int rt6_fill_node(struct net *net,
3046                          struct sk_buff *skb, struct rt6_info *rt,
3047                          struct in6_addr *dst, struct in6_addr *src,
3048                          int iif, int type, u32 portid, u32 seq,
3049                          int prefix, int nowait, unsigned int flags)
3050 {
3051         u32 metrics[RTAX_MAX];
3052         struct rtmsg *rtm;
3053         struct nlmsghdr *nlh;
3054         long expires;
3055         u32 table;
3056
3057         if (prefix) {   /* user wants prefix routes only */
3058                 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
3059                         /* success since this is not a prefix route */
3060                         return 1;
3061                 }
3062         }
3063
3064         nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
3065         if (!nlh)
3066                 return -EMSGSIZE;
3067
3068         rtm = nlmsg_data(nlh);
3069         rtm->rtm_family = AF_INET6;
3070         rtm->rtm_dst_len = rt->rt6i_dst.plen;
3071         rtm->rtm_src_len = rt->rt6i_src.plen;
3072         rtm->rtm_tos = 0;
3073         if (rt->rt6i_table)
3074                 table = rt->rt6i_table->tb6_id;
3075         else
3076                 table = RT6_TABLE_UNSPEC;
3077         rtm->rtm_table = table;
3078         if (nla_put_u32(skb, RTA_TABLE, table))
3079                 goto nla_put_failure;
3080         if (rt->rt6i_flags & RTF_REJECT) {
3081                 switch (rt->dst.error) {
3082                 case -EINVAL:
3083                         rtm->rtm_type = RTN_BLACKHOLE;
3084                         break;
3085                 case -EACCES:
3086                         rtm->rtm_type = RTN_PROHIBIT;
3087                         break;
3088                 case -EAGAIN:
3089                         rtm->rtm_type = RTN_THROW;
3090                         break;
3091                 default:
3092                         rtm->rtm_type = RTN_UNREACHABLE;
3093                         break;
3094                 }
3095         }
3096         else if (rt->rt6i_flags & RTF_LOCAL)
3097                 rtm->rtm_type = RTN_LOCAL;
3098         else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
3099                 rtm->rtm_type = RTN_LOCAL;
3100         else
3101                 rtm->rtm_type = RTN_UNICAST;
3102         rtm->rtm_flags = 0;
3103         if (!netif_carrier_ok(rt->dst.dev)) {
3104                 rtm->rtm_flags |= RTNH_F_LINKDOWN;
3105                 if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
3106                         rtm->rtm_flags |= RTNH_F_DEAD;
3107         }
3108         rtm->rtm_scope = RT_SCOPE_UNIVERSE;
3109         rtm->rtm_protocol = rt->rt6i_protocol;
3110         if (rt->rt6i_flags & RTF_DYNAMIC)
3111                 rtm->rtm_protocol = RTPROT_REDIRECT;
3112         else if (rt->rt6i_flags & RTF_ADDRCONF) {
3113                 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
3114                         rtm->rtm_protocol = RTPROT_RA;
3115                 else
3116                         rtm->rtm_protocol = RTPROT_KERNEL;
3117         }
3118
3119         if (rt->rt6i_flags & RTF_CACHE)
3120                 rtm->rtm_flags |= RTM_F_CLONED;
3121
3122         if (dst) {
3123                 if (nla_put_in6_addr(skb, RTA_DST, dst))
3124                         goto nla_put_failure;
3125                 rtm->rtm_dst_len = 128;
3126         } else if (rtm->rtm_dst_len)
3127                 if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr))
3128                         goto nla_put_failure;
3129 #ifdef CONFIG_IPV6_SUBTREES
3130         if (src) {
3131                 if (nla_put_in6_addr(skb, RTA_SRC, src))
3132                         goto nla_put_failure;
3133                 rtm->rtm_src_len = 128;
3134         } else if (rtm->rtm_src_len &&
3135                    nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr))
3136                 goto nla_put_failure;
3137 #endif
3138         if (iif) {
3139 #ifdef CONFIG_IPV6_MROUTE
3140                 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
3141                         int err = ip6mr_get_route(net, skb, rtm, nowait);
3142                         if (err <= 0) {
3143                                 if (!nowait) {
3144                                         if (err == 0)
3145                                                 return 0;
3146                                         goto nla_put_failure;
3147                                 } else {
3148                                         if (err == -EMSGSIZE)
3149                                                 goto nla_put_failure;
3150                                 }
3151                         }
3152                 } else
3153 #endif
3154                         if (nla_put_u32(skb, RTA_IIF, iif))
3155                                 goto nla_put_failure;
3156         } else if (dst) {
3157                 struct in6_addr saddr_buf;
3158                 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
3159                     nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
3160                         goto nla_put_failure;
3161         }
3162
3163         if (rt->rt6i_prefsrc.plen) {
3164                 struct in6_addr saddr_buf;
3165                 saddr_buf = rt->rt6i_prefsrc.addr;
3166                 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
3167                         goto nla_put_failure;
3168         }
3169
3170         memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
3171         if (rt->rt6i_pmtu)
3172                 metrics[RTAX_MTU - 1] = rt->rt6i_pmtu;
3173         if (rtnetlink_put_metrics(skb, metrics) < 0)
3174                 goto nla_put_failure;
3175
3176         if (rt->rt6i_flags & RTF_GATEWAY) {
3177                 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
3178                         goto nla_put_failure;
3179         }
3180
3181         if (rt->dst.dev &&
3182             nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
3183                 goto nla_put_failure;
3184         if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
3185                 goto nla_put_failure;
3186
3187         expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
3188
3189         if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
3190                 goto nla_put_failure;
3191
3192         if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
3193                 goto nla_put_failure;
3194
3195         lwtunnel_fill_encap(skb, rt->dst.lwtstate);
3196
3197         nlmsg_end(skb, nlh);
3198         return 0;
3199
3200 nla_put_failure:
3201         nlmsg_cancel(skb, nlh);
3202         return -EMSGSIZE;
3203 }
3204
3205 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
3206 {
3207         struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
3208         int prefix;
3209
3210         if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
3211                 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
3212                 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
3213         } else
3214                 prefix = 0;
3215
3216         return rt6_fill_node(arg->net,
3217                      arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
3218                      NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
3219                      prefix, 0, NLM_F_MULTI);
3220 }
3221
3222 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
3223 {
3224         struct net *net = sock_net(in_skb->sk);
3225         struct nlattr *tb[RTA_MAX+1];
3226         struct rt6_info *rt;
3227         struct sk_buff *skb;
3228         struct rtmsg *rtm;
3229         struct flowi6 fl6;
3230         int err, iif = 0, oif = 0;
3231
3232         err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
3233         if (err < 0)
3234                 goto errout;
3235
3236         err = -EINVAL;
3237         memset(&fl6, 0, sizeof(fl6));
3238
3239         if (tb[RTA_SRC]) {
3240                 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
3241                         goto errout;
3242
3243                 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
3244         }
3245
3246         if (tb[RTA_DST]) {
3247                 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
3248                         goto errout;
3249
3250                 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
3251         }
3252
3253         if (tb[RTA_IIF])
3254                 iif = nla_get_u32(tb[RTA_IIF]);
3255
3256         if (tb[RTA_OIF])
3257                 oif = nla_get_u32(tb[RTA_OIF]);
3258
3259         if (tb[RTA_MARK])
3260                 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
3261
3262         if (iif) {
3263                 struct net_device *dev;
3264                 int flags = 0;
3265
3266                 dev = __dev_get_by_index(net, iif);
3267                 if (!dev) {
3268                         err = -ENODEV;
3269                         goto errout;
3270                 }
3271
3272                 fl6.flowi6_iif = iif;
3273
3274                 if (!ipv6_addr_any(&fl6.saddr))
3275                         flags |= RT6_LOOKUP_F_HAS_SADDR;
3276
3277                 rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6,
3278                                                                flags);
3279         } else {
3280                 fl6.flowi6_oif = oif;
3281
3282                 if (netif_index_is_l3_master(net, oif)) {
3283                         fl6.flowi6_flags = FLOWI_FLAG_L3MDEV_SRC |
3284                                            FLOWI_FLAG_SKIP_NH_OIF;
3285                 }
3286
3287                 rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
3288         }
3289
3290         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3291         if (!skb) {
3292                 ip6_rt_put(rt);
3293                 err = -ENOBUFS;
3294                 goto errout;
3295         }
3296
3297         /* Reserve room for dummy headers, this skb can pass
3298            through good chunk of routing engine.
3299          */
3300         skb_reset_mac_header(skb);
3301         skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
3302
3303         skb_dst_set(skb, &rt->dst);
3304
3305         err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
3306                             RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
3307                             nlh->nlmsg_seq, 0, 0, 0);
3308         if (err < 0) {
3309                 kfree_skb(skb);
3310                 goto errout;
3311         }
3312
3313         err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3314 errout:
3315         return err;
3316 }
3317
3318 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
3319                      unsigned int nlm_flags)
3320 {
3321         struct sk_buff *skb;
3322         struct net *net = info->nl_net;
3323         u32 seq;
3324         int err;
3325
3326         err = -ENOBUFS;
3327         seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3328
3329         skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3330         if (!skb)
3331                 goto errout;
3332
3333         err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
3334                                 event, info->portid, seq, 0, 0, nlm_flags);
3335         if (err < 0) {
3336                 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
3337                 WARN_ON(err == -EMSGSIZE);
3338                 kfree_skb(skb);
3339                 goto errout;
3340         }
3341         rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3342                     info->nlh, gfp_any());
3343         return;
3344 errout:
3345         if (err < 0)
3346                 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
3347 }
3348
3349 static int ip6_route_dev_notify(struct notifier_block *this,
3350                                 unsigned long event, void *ptr)
3351 {
3352         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3353         struct net *net = dev_net(dev);
3354
3355         if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
3356                 net->ipv6.ip6_null_entry->dst.dev = dev;
3357                 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
3358 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3359                 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
3360                 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
3361                 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
3362                 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
3363 #endif
3364         }
3365
3366         return NOTIFY_OK;
3367 }
3368
3369 /*
3370  *      /proc
3371  */
3372
3373 #ifdef CONFIG_PROC_FS
3374
3375 static const struct file_operations ipv6_route_proc_fops = {
3376         .owner          = THIS_MODULE,
3377         .open           = ipv6_route_open,
3378         .read           = seq_read,
3379         .llseek         = seq_lseek,
3380         .release        = seq_release_net,
3381 };
3382
3383 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
3384 {
3385         struct net *net = (struct net *)seq->private;
3386         seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
3387                    net->ipv6.rt6_stats->fib_nodes,
3388                    net->ipv6.rt6_stats->fib_route_nodes,
3389                    net->ipv6.rt6_stats->fib_rt_alloc,
3390                    net->ipv6.rt6_stats->fib_rt_entries,
3391                    net->ipv6.rt6_stats->fib_rt_cache,
3392                    dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
3393                    net->ipv6.rt6_stats->fib_discarded_routes);
3394
3395         return 0;
3396 }
3397
3398 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
3399 {
3400         return single_open_net(inode, file, rt6_stats_seq_show);
3401 }
3402
3403 static const struct file_operations rt6_stats_seq_fops = {
3404         .owner   = THIS_MODULE,
3405         .open    = rt6_stats_seq_open,
3406         .read    = seq_read,
3407         .llseek  = seq_lseek,
3408         .release = single_release_net,
3409 };
3410 #endif  /* CONFIG_PROC_FS */
3411
3412 #ifdef CONFIG_SYSCTL
3413
3414 static
3415 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
3416                               void __user *buffer, size_t *lenp, loff_t *ppos)
3417 {
3418         struct net *net;
3419         int delay;
3420         if (!write)
3421                 return -EINVAL;
3422
3423         net = (struct net *)ctl->extra1;
3424         delay = net->ipv6.sysctl.flush_delay;
3425         proc_dointvec(ctl, write, buffer, lenp, ppos);
3426         fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
3427         return 0;
3428 }
3429
3430 struct ctl_table ipv6_route_table_template[] = {
3431         {
3432                 .procname       =       "flush",
3433                 .data           =       &init_net.ipv6.sysctl.flush_delay,
3434                 .maxlen         =       sizeof(int),
3435                 .mode           =       0200,
3436                 .proc_handler   =       ipv6_sysctl_rtcache_flush
3437         },
3438         {
3439                 .procname       =       "gc_thresh",
3440                 .data           =       &ip6_dst_ops_template.gc_thresh,
3441                 .maxlen         =       sizeof(int),
3442                 .mode           =       0644,
3443                 .proc_handler   =       proc_dointvec,
3444         },
3445         {
3446                 .procname       =       "max_size",
3447                 .data           =       &init_net.ipv6.sysctl.ip6_rt_max_size,
3448                 .maxlen         =       sizeof(int),
3449                 .mode           =       0644,
3450                 .proc_handler   =       proc_dointvec,
3451         },
3452         {
3453                 .procname       =       "gc_min_interval",
3454                 .data           =       &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
3455                 .maxlen         =       sizeof(int),
3456                 .mode           =       0644,
3457                 .proc_handler   =       proc_dointvec_jiffies,
3458         },
3459         {
3460                 .procname       =       "gc_timeout",
3461                 .data           =       &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
3462                 .maxlen         =       sizeof(int),
3463                 .mode           =       0644,
3464                 .proc_handler   =       proc_dointvec_jiffies,
3465         },
3466         {
3467                 .procname       =       "gc_interval",
3468                 .data           =       &init_net.ipv6.sysctl.ip6_rt_gc_interval,
3469                 .maxlen         =       sizeof(int),
3470                 .mode           =       0644,
3471                 .proc_handler   =       proc_dointvec_jiffies,
3472         },
3473         {
3474                 .procname       =       "gc_elasticity",
3475                 .data           =       &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
3476                 .maxlen         =       sizeof(int),
3477                 .mode           =       0644,
3478                 .proc_handler   =       proc_dointvec,
3479         },
3480         {
3481                 .procname       =       "mtu_expires",
3482                 .data           =       &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
3483                 .maxlen         =       sizeof(int),
3484                 .mode           =       0644,
3485                 .proc_handler   =       proc_dointvec_jiffies,
3486         },
3487         {
3488                 .procname       =       "min_adv_mss",
3489                 .data           =       &init_net.ipv6.sysctl.ip6_rt_min_advmss,
3490                 .maxlen         =       sizeof(int),
3491                 .mode           =       0644,
3492                 .proc_handler   =       proc_dointvec,
3493         },
3494         {
3495                 .procname       =       "gc_min_interval_ms",
3496                 .data           =       &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
3497                 .maxlen         =       sizeof(int),
3498                 .mode           =       0644,
3499                 .proc_handler   =       proc_dointvec_ms_jiffies,
3500         },
3501         { }
3502 };
3503
3504 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
3505 {
3506         struct ctl_table *table;
3507
3508         table = kmemdup(ipv6_route_table_template,
3509                         sizeof(ipv6_route_table_template),
3510                         GFP_KERNEL);
3511
3512         if (table) {
3513                 table[0].data = &net->ipv6.sysctl.flush_delay;
3514                 table[0].extra1 = net;
3515                 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
3516                 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
3517                 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3518                 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
3519                 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
3520                 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
3521                 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
3522                 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
3523                 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3524
3525                 /* Don't export sysctls to unprivileged users */
3526                 if (net->user_ns != &init_user_ns)
3527                         table[0].procname = NULL;
3528         }
3529
3530         return table;
3531 }
3532 #endif
3533
3534 static int __net_init ip6_route_net_init(struct net *net)
3535 {
3536         int ret = -ENOMEM;
3537
3538         memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
3539                sizeof(net->ipv6.ip6_dst_ops));
3540
3541         if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
3542                 goto out_ip6_dst_ops;
3543
3544         net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
3545                                            sizeof(*net->ipv6.ip6_null_entry),
3546                                            GFP_KERNEL);
3547         if (!net->ipv6.ip6_null_entry)
3548                 goto out_ip6_dst_entries;
3549         net->ipv6.ip6_null_entry->dst.path =
3550                 (struct dst_entry *)net->ipv6.ip6_null_entry;
3551         net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3552         dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
3553                          ip6_template_metrics, true);
3554
3555 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3556         net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
3557                                                sizeof(*net->ipv6.ip6_prohibit_entry),
3558                                                GFP_KERNEL);
3559         if (!net->ipv6.ip6_prohibit_entry)
3560                 goto out_ip6_null_entry;
3561         net->ipv6.ip6_prohibit_entry->dst.path =
3562                 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
3563         net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3564         dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
3565                          ip6_template_metrics, true);
3566
3567         net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
3568                                                sizeof(*net->ipv6.ip6_blk_hole_entry),
3569                                                GFP_KERNEL);
3570         if (!net->ipv6.ip6_blk_hole_entry)
3571                 goto out_ip6_prohibit_entry;
3572         net->ipv6.ip6_blk_hole_entry->dst.path =
3573                 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
3574         net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3575         dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
3576                          ip6_template_metrics, true);
3577 #endif
3578
3579         net->ipv6.sysctl.flush_delay = 0;
3580         net->ipv6.sysctl.ip6_rt_max_size = 4096;
3581         net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
3582         net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
3583         net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
3584         net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
3585         net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
3586         net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
3587
3588         net->ipv6.ip6_rt_gc_expire = 30*HZ;
3589
3590         ret = 0;
3591 out:
3592         return ret;
3593
3594 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3595 out_ip6_prohibit_entry:
3596         kfree(net->ipv6.ip6_prohibit_entry);
3597 out_ip6_null_entry:
3598         kfree(net->ipv6.ip6_null_entry);
3599 #endif
3600 out_ip6_dst_entries:
3601         dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3602 out_ip6_dst_ops:
3603         goto out;
3604 }
3605
3606 static void __net_exit ip6_route_net_exit(struct net *net)
3607 {
3608         kfree(net->ipv6.ip6_null_entry);
3609 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3610         kfree(net->ipv6.ip6_prohibit_entry);
3611         kfree(net->ipv6.ip6_blk_hole_entry);
3612 #endif
3613         dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3614 }
3615
3616 static int __net_init ip6_route_net_init_late(struct net *net)
3617 {
3618 #ifdef CONFIG_PROC_FS
3619         proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
3620         proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
3621 #endif
3622         return 0;
3623 }
3624
3625 static void __net_exit ip6_route_net_exit_late(struct net *net)
3626 {
3627 #ifdef CONFIG_PROC_FS
3628         remove_proc_entry("ipv6_route", net->proc_net);
3629         remove_proc_entry("rt6_stats", net->proc_net);
3630 #endif
3631 }
3632
3633 static struct pernet_operations ip6_route_net_ops = {
3634         .init = ip6_route_net_init,
3635         .exit = ip6_route_net_exit,
3636 };
3637
3638 static int __net_init ipv6_inetpeer_init(struct net *net)
3639 {
3640         struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3641
3642         if (!bp)
3643                 return -ENOMEM;
3644         inet_peer_base_init(bp);
3645         net->ipv6.peers = bp;
3646         return 0;
3647 }
3648
3649 static void __net_exit ipv6_inetpeer_exit(struct net *net)
3650 {
3651         struct inet_peer_base *bp = net->ipv6.peers;
3652
3653         net->ipv6.peers = NULL;
3654         inetpeer_invalidate_tree(bp);
3655         kfree(bp);
3656 }
3657
3658 static struct pernet_operations ipv6_inetpeer_ops = {
3659         .init   =       ipv6_inetpeer_init,
3660         .exit   =       ipv6_inetpeer_exit,
3661 };
3662
3663 static struct pernet_operations ip6_route_net_late_ops = {
3664         .init = ip6_route_net_init_late,
3665         .exit = ip6_route_net_exit_late,
3666 };
3667
3668 static struct notifier_block ip6_route_dev_notifier = {
3669         .notifier_call = ip6_route_dev_notify,
3670         .priority = 0,
3671 };
3672
3673 int __init ip6_route_init(void)
3674 {
3675         int ret;
3676         int cpu;
3677
3678         ret = -ENOMEM;
3679         ip6_dst_ops_template.kmem_cachep =
3680                 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
3681                                   SLAB_HWCACHE_ALIGN, NULL);
3682         if (!ip6_dst_ops_template.kmem_cachep)
3683                 goto out;
3684
3685         ret = dst_entries_init(&ip6_dst_blackhole_ops);
3686         if (ret)
3687                 goto out_kmem_cache;
3688
3689         ret = register_pernet_subsys(&ipv6_inetpeer_ops);
3690         if (ret)
3691                 goto out_dst_entries;
3692
3693         ret = register_pernet_subsys(&ip6_route_net_ops);
3694         if (ret)
3695                 goto out_register_inetpeer;
3696
3697         ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
3698
3699         /* Registering of the loopback is done before this portion of code,
3700          * the loopback reference in rt6_info will not be taken, do it
3701          * manually for init_net */
3702         init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
3703         init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3704   #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3705         init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
3706         init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3707         init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
3708         init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3709   #endif
3710         ret = fib6_init();
3711         if (ret)
3712                 goto out_register_subsys;
3713
3714         ret = xfrm6_init();
3715         if (ret)
3716                 goto out_fib6_init;
3717
3718         ret = fib6_rules_init();
3719         if (ret)
3720                 goto xfrm6_init;
3721
3722         ret = register_pernet_subsys(&ip6_route_net_late_ops);
3723         if (ret)
3724                 goto fib6_rules_init;
3725
3726         ret = -ENOBUFS;
3727         if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
3728             __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
3729             __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
3730                 goto out_register_late_subsys;
3731
3732         ret = register_netdevice_notifier(&ip6_route_dev_notifier);
3733         if (ret)
3734                 goto out_register_late_subsys;
3735
3736         for_each_possible_cpu(cpu) {
3737                 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
3738
3739                 INIT_LIST_HEAD(&ul->head);
3740                 spin_lock_init(&ul->lock);
3741         }
3742
3743 out:
3744         return ret;
3745
3746 out_register_late_subsys:
3747         unregister_pernet_subsys(&ip6_route_net_late_ops);
3748 fib6_rules_init:
3749         fib6_rules_cleanup();
3750 xfrm6_init:
3751         xfrm6_fini();
3752 out_fib6_init:
3753         fib6_gc_cleanup();
3754 out_register_subsys:
3755         unregister_pernet_subsys(&ip6_route_net_ops);
3756 out_register_inetpeer:
3757         unregister_pernet_subsys(&ipv6_inetpeer_ops);
3758 out_dst_entries:
3759         dst_entries_destroy(&ip6_dst_blackhole_ops);
3760 out_kmem_cache:
3761         kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3762         goto out;
3763 }
3764
3765 void ip6_route_cleanup(void)
3766 {
3767         unregister_netdevice_notifier(&ip6_route_dev_notifier);
3768         unregister_pernet_subsys(&ip6_route_net_late_ops);
3769         fib6_rules_cleanup();
3770         xfrm6_fini();
3771         fib6_gc_cleanup();
3772         unregister_pernet_subsys(&ipv6_inetpeer_ops);
3773         unregister_pernet_subsys(&ip6_route_net_ops);
3774         dst_entries_destroy(&ip6_dst_blackhole_ops);
3775         kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3776 }