2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/stat.h>
102 #include <net/dst_metadata.h>
103 #include <net/pkt_sched.h>
104 #include <net/checksum.h>
105 #include <net/xfrm.h>
106 #include <linux/highmem.h>
107 #include <linux/init.h>
108 #include <linux/module.h>
109 #include <linux/netpoll.h>
110 #include <linux/rcupdate.h>
111 #include <linux/delay.h>
112 #include <net/iw_handler.h>
113 #include <asm/current.h>
114 #include <linux/audit.h>
115 #include <linux/dmaengine.h>
116 #include <linux/err.h>
117 #include <linux/ctype.h>
118 #include <linux/if_arp.h>
119 #include <linux/if_vlan.h>
120 #include <linux/ip.h>
122 #include <net/mpls.h>
123 #include <linux/ipv6.h>
124 #include <linux/in.h>
125 #include <linux/jhash.h>
126 #include <linux/random.h>
127 #include <trace/events/napi.h>
128 #include <trace/events/net.h>
129 #include <trace/events/skb.h>
130 #include <linux/pci.h>
131 #include <linux/inetdevice.h>
132 #include <linux/cpu_rmap.h>
133 #include <linux/static_key.h>
134 #include <linux/hashtable.h>
135 #include <linux/vmalloc.h>
136 #include <linux/if_macvlan.h>
137 #include <linux/errqueue.h>
138 #include <linux/hrtimer.h>
139 #include <linux/netfilter_ingress.h>
141 #include "net-sysfs.h"
143 /* Instead of increasing this, you should create a hash table. */
144 #define MAX_GRO_SKBS 8
146 /* This should be increased if a protocol with a bigger head is added. */
147 #define GRO_MAX_HEAD (MAX_HEADER + 128)
149 static DEFINE_SPINLOCK(ptype_lock);
150 static DEFINE_SPINLOCK(offload_lock);
151 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
152 struct list_head ptype_all __read_mostly; /* Taps */
153 static struct list_head offload_base __read_mostly;
155 static int netif_rx_internal(struct sk_buff *skb);
156 static int call_netdevice_notifiers_info(unsigned long val,
157 struct net_device *dev,
158 struct netdev_notifier_info *info);
161 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
164 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
166 * Writers must hold the rtnl semaphore while they loop through the
167 * dev_base_head list, and hold dev_base_lock for writing when they do the
168 * actual updates. This allows pure readers to access the list even
169 * while a writer is preparing to update it.
171 * To put it another way, dev_base_lock is held for writing only to
172 * protect against pure readers; the rtnl semaphore provides the
173 * protection against other writers.
175 * See, for example usages, register_netdevice() and
176 * unregister_netdevice(), which must be called with the rtnl
179 DEFINE_RWLOCK(dev_base_lock);
180 EXPORT_SYMBOL(dev_base_lock);
182 /* protects napi_hash addition/deletion and napi_gen_id */
183 static DEFINE_SPINLOCK(napi_hash_lock);
185 static unsigned int napi_gen_id;
186 static DEFINE_HASHTABLE(napi_hash, 8);
188 static seqcount_t devnet_rename_seq;
189 static DEFINE_MUTEX(devnet_rename_mutex);
191 static inline void dev_base_seq_inc(struct net *net)
193 while (++net->dev_base_seq == 0);
196 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
198 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
200 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
203 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
205 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
208 static inline void rps_lock(struct softnet_data *sd)
211 raw_spin_lock(&sd->input_pkt_queue.raw_lock);
215 static inline void rps_unlock(struct softnet_data *sd)
218 raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
222 /* Device list insertion */
223 static void list_netdevice(struct net_device *dev)
225 struct net *net = dev_net(dev);
229 write_lock_bh(&dev_base_lock);
230 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
231 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
232 hlist_add_head_rcu(&dev->index_hlist,
233 dev_index_hash(net, dev->ifindex));
234 write_unlock_bh(&dev_base_lock);
236 dev_base_seq_inc(net);
239 /* Device list removal
240 * caller must respect a RCU grace period before freeing/reusing dev
242 static void unlist_netdevice(struct net_device *dev)
246 /* Unlink dev from the device chain */
247 write_lock_bh(&dev_base_lock);
248 list_del_rcu(&dev->dev_list);
249 hlist_del_rcu(&dev->name_hlist);
250 hlist_del_rcu(&dev->index_hlist);
251 write_unlock_bh(&dev_base_lock);
253 dev_base_seq_inc(dev_net(dev));
260 static RAW_NOTIFIER_HEAD(netdev_chain);
263 * Device drivers call our routines to queue packets here. We empty the
264 * queue in the local softnet handler.
267 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
268 EXPORT_PER_CPU_SYMBOL(softnet_data);
270 #ifdef CONFIG_LOCKDEP
272 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
273 * according to dev->type
275 static const unsigned short netdev_lock_type[] =
276 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
277 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
278 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
279 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
280 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
281 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
282 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
283 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
284 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
285 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
286 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
287 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
288 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
289 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
290 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
292 static const char *const netdev_lock_name[] =
293 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
294 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
295 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
296 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
297 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
298 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
299 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
300 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
301 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
302 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
303 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
304 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
305 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
306 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
307 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
309 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
310 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
312 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
316 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
317 if (netdev_lock_type[i] == dev_type)
319 /* the last key is used by default */
320 return ARRAY_SIZE(netdev_lock_type) - 1;
323 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
324 unsigned short dev_type)
328 i = netdev_lock_pos(dev_type);
329 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
330 netdev_lock_name[i]);
333 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
337 i = netdev_lock_pos(dev->type);
338 lockdep_set_class_and_name(&dev->addr_list_lock,
339 &netdev_addr_lock_key[i],
340 netdev_lock_name[i]);
343 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
344 unsigned short dev_type)
347 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
352 /*******************************************************************************
354 Protocol management and registration routines
356 *******************************************************************************/
359 * Add a protocol ID to the list. Now that the input handler is
360 * smarter we can dispense with all the messy stuff that used to be
363 * BEWARE!!! Protocol handlers, mangling input packets,
364 * MUST BE last in hash buckets and checking protocol handlers
365 * MUST start from promiscuous ptype_all chain in net_bh.
366 * It is true now, do not change it.
367 * Explanation follows: if protocol handler, mangling packet, will
368 * be the first on list, it is not able to sense, that packet
369 * is cloned and should be copied-on-write, so that it will
370 * change it and subsequent readers will get broken packet.
374 static inline struct list_head *ptype_head(const struct packet_type *pt)
376 if (pt->type == htons(ETH_P_ALL))
377 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
379 return pt->dev ? &pt->dev->ptype_specific :
380 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
384 * dev_add_pack - add packet handler
385 * @pt: packet type declaration
387 * Add a protocol handler to the networking stack. The passed &packet_type
388 * is linked into kernel lists and may not be freed until it has been
389 * removed from the kernel lists.
391 * This call does not sleep therefore it can not
392 * guarantee all CPU's that are in middle of receiving packets
393 * will see the new packet type (until the next received packet).
396 void dev_add_pack(struct packet_type *pt)
398 struct list_head *head = ptype_head(pt);
400 spin_lock(&ptype_lock);
401 list_add_rcu(&pt->list, head);
402 spin_unlock(&ptype_lock);
404 EXPORT_SYMBOL(dev_add_pack);
407 * __dev_remove_pack - remove packet handler
408 * @pt: packet type declaration
410 * Remove a protocol handler that was previously added to the kernel
411 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
412 * from the kernel lists and can be freed or reused once this function
415 * The packet type might still be in use by receivers
416 * and must not be freed until after all the CPU's have gone
417 * through a quiescent state.
419 void __dev_remove_pack(struct packet_type *pt)
421 struct list_head *head = ptype_head(pt);
422 struct packet_type *pt1;
424 spin_lock(&ptype_lock);
426 list_for_each_entry(pt1, head, list) {
428 list_del_rcu(&pt->list);
433 pr_warn("dev_remove_pack: %p not found\n", pt);
435 spin_unlock(&ptype_lock);
437 EXPORT_SYMBOL(__dev_remove_pack);
440 * dev_remove_pack - remove packet handler
441 * @pt: packet type declaration
443 * Remove a protocol handler that was previously added to the kernel
444 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
445 * from the kernel lists and can be freed or reused once this function
448 * This call sleeps to guarantee that no CPU is looking at the packet
451 void dev_remove_pack(struct packet_type *pt)
453 __dev_remove_pack(pt);
457 EXPORT_SYMBOL(dev_remove_pack);
461 * dev_add_offload - register offload handlers
462 * @po: protocol offload declaration
464 * Add protocol offload handlers to the networking stack. The passed
465 * &proto_offload is linked into kernel lists and may not be freed until
466 * it has been removed from the kernel lists.
468 * This call does not sleep therefore it can not
469 * guarantee all CPU's that are in middle of receiving packets
470 * will see the new offload handlers (until the next received packet).
472 void dev_add_offload(struct packet_offload *po)
474 struct packet_offload *elem;
476 spin_lock(&offload_lock);
477 list_for_each_entry(elem, &offload_base, list) {
478 if (po->priority < elem->priority)
481 list_add_rcu(&po->list, elem->list.prev);
482 spin_unlock(&offload_lock);
484 EXPORT_SYMBOL(dev_add_offload);
487 * __dev_remove_offload - remove offload handler
488 * @po: packet offload declaration
490 * Remove a protocol offload handler that was previously added to the
491 * kernel offload handlers by dev_add_offload(). The passed &offload_type
492 * is removed from the kernel lists and can be freed or reused once this
495 * The packet type might still be in use by receivers
496 * and must not be freed until after all the CPU's have gone
497 * through a quiescent state.
499 static void __dev_remove_offload(struct packet_offload *po)
501 struct list_head *head = &offload_base;
502 struct packet_offload *po1;
504 spin_lock(&offload_lock);
506 list_for_each_entry(po1, head, list) {
508 list_del_rcu(&po->list);
513 pr_warn("dev_remove_offload: %p not found\n", po);
515 spin_unlock(&offload_lock);
519 * dev_remove_offload - remove packet offload handler
520 * @po: packet offload declaration
522 * Remove a packet offload handler that was previously added to the kernel
523 * offload handlers by dev_add_offload(). The passed &offload_type is
524 * removed from the kernel lists and can be freed or reused once this
527 * This call sleeps to guarantee that no CPU is looking at the packet
530 void dev_remove_offload(struct packet_offload *po)
532 __dev_remove_offload(po);
536 EXPORT_SYMBOL(dev_remove_offload);
538 /******************************************************************************
540 Device Boot-time Settings Routines
542 *******************************************************************************/
544 /* Boot time configuration table */
545 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
548 * netdev_boot_setup_add - add new setup entry
549 * @name: name of the device
550 * @map: configured settings for the device
552 * Adds new setup entry to the dev_boot_setup list. The function
553 * returns 0 on error and 1 on success. This is a generic routine to
556 static int netdev_boot_setup_add(char *name, struct ifmap *map)
558 struct netdev_boot_setup *s;
562 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
563 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
564 memset(s[i].name, 0, sizeof(s[i].name));
565 strlcpy(s[i].name, name, IFNAMSIZ);
566 memcpy(&s[i].map, map, sizeof(s[i].map));
571 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
575 * netdev_boot_setup_check - check boot time settings
576 * @dev: the netdevice
578 * Check boot time settings for the device.
579 * The found settings are set for the device to be used
580 * later in the device probing.
581 * Returns 0 if no settings found, 1 if they are.
583 int netdev_boot_setup_check(struct net_device *dev)
585 struct netdev_boot_setup *s = dev_boot_setup;
588 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
589 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
590 !strcmp(dev->name, s[i].name)) {
591 dev->irq = s[i].map.irq;
592 dev->base_addr = s[i].map.base_addr;
593 dev->mem_start = s[i].map.mem_start;
594 dev->mem_end = s[i].map.mem_end;
600 EXPORT_SYMBOL(netdev_boot_setup_check);
604 * netdev_boot_base - get address from boot time settings
605 * @prefix: prefix for network device
606 * @unit: id for network device
608 * Check boot time settings for the base address of device.
609 * The found settings are set for the device to be used
610 * later in the device probing.
611 * Returns 0 if no settings found.
613 unsigned long netdev_boot_base(const char *prefix, int unit)
615 const struct netdev_boot_setup *s = dev_boot_setup;
619 sprintf(name, "%s%d", prefix, unit);
622 * If device already registered then return base of 1
623 * to indicate not to probe for this interface
625 if (__dev_get_by_name(&init_net, name))
628 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
629 if (!strcmp(name, s[i].name))
630 return s[i].map.base_addr;
635 * Saves at boot time configured settings for any netdevice.
637 int __init netdev_boot_setup(char *str)
642 str = get_options(str, ARRAY_SIZE(ints), ints);
647 memset(&map, 0, sizeof(map));
651 map.base_addr = ints[2];
653 map.mem_start = ints[3];
655 map.mem_end = ints[4];
657 /* Add new entry to the list */
658 return netdev_boot_setup_add(str, &map);
661 __setup("netdev=", netdev_boot_setup);
663 /*******************************************************************************
665 Device Interface Subroutines
667 *******************************************************************************/
670 * dev_get_iflink - get 'iflink' value of a interface
671 * @dev: targeted interface
673 * Indicates the ifindex the interface is linked to.
674 * Physical interfaces have the same 'ifindex' and 'iflink' values.
677 int dev_get_iflink(const struct net_device *dev)
679 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
680 return dev->netdev_ops->ndo_get_iflink(dev);
684 EXPORT_SYMBOL(dev_get_iflink);
687 * dev_fill_metadata_dst - Retrieve tunnel egress information.
688 * @dev: targeted interface
691 * For better visibility of tunnel traffic OVS needs to retrieve
692 * egress tunnel information for a packet. Following API allows
693 * user to get this info.
695 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
697 struct ip_tunnel_info *info;
699 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
702 info = skb_tunnel_info_unclone(skb);
705 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
708 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
710 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
713 * __dev_get_by_name - find a device by its name
714 * @net: the applicable net namespace
715 * @name: name to find
717 * Find an interface by name. Must be called under RTNL semaphore
718 * or @dev_base_lock. If the name is found a pointer to the device
719 * is returned. If the name is not found then %NULL is returned. The
720 * reference counters are not incremented so the caller must be
721 * careful with locks.
724 struct net_device *__dev_get_by_name(struct net *net, const char *name)
726 struct net_device *dev;
727 struct hlist_head *head = dev_name_hash(net, name);
729 hlist_for_each_entry(dev, head, name_hlist)
730 if (!strncmp(dev->name, name, IFNAMSIZ))
735 EXPORT_SYMBOL(__dev_get_by_name);
738 * dev_get_by_name_rcu - find a device by its name
739 * @net: the applicable net namespace
740 * @name: name to find
742 * Find an interface by name.
743 * If the name is found a pointer to the device is returned.
744 * If the name is not found then %NULL is returned.
745 * The reference counters are not incremented so the caller must be
746 * careful with locks. The caller must hold RCU lock.
749 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
751 struct net_device *dev;
752 struct hlist_head *head = dev_name_hash(net, name);
754 hlist_for_each_entry_rcu(dev, head, name_hlist)
755 if (!strncmp(dev->name, name, IFNAMSIZ))
760 EXPORT_SYMBOL(dev_get_by_name_rcu);
763 * dev_get_by_name - find a device by its name
764 * @net: the applicable net namespace
765 * @name: name to find
767 * Find an interface by name. This can be called from any
768 * context and does its own locking. The returned handle has
769 * the usage count incremented and the caller must use dev_put() to
770 * release it when it is no longer needed. %NULL is returned if no
771 * matching device is found.
774 struct net_device *dev_get_by_name(struct net *net, const char *name)
776 struct net_device *dev;
779 dev = dev_get_by_name_rcu(net, name);
785 EXPORT_SYMBOL(dev_get_by_name);
788 * __dev_get_by_index - find a device by its ifindex
789 * @net: the applicable net namespace
790 * @ifindex: index of device
792 * Search for an interface by index. Returns %NULL if the device
793 * is not found or a pointer to the device. The device has not
794 * had its reference counter increased so the caller must be careful
795 * about locking. The caller must hold either the RTNL semaphore
799 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
801 struct net_device *dev;
802 struct hlist_head *head = dev_index_hash(net, ifindex);
804 hlist_for_each_entry(dev, head, index_hlist)
805 if (dev->ifindex == ifindex)
810 EXPORT_SYMBOL(__dev_get_by_index);
813 * dev_get_by_index_rcu - find a device by its ifindex
814 * @net: the applicable net namespace
815 * @ifindex: index of device
817 * Search for an interface by index. Returns %NULL if the device
818 * is not found or a pointer to the device. The device has not
819 * had its reference counter increased so the caller must be careful
820 * about locking. The caller must hold RCU lock.
823 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
825 struct net_device *dev;
826 struct hlist_head *head = dev_index_hash(net, ifindex);
828 hlist_for_each_entry_rcu(dev, head, index_hlist)
829 if (dev->ifindex == ifindex)
834 EXPORT_SYMBOL(dev_get_by_index_rcu);
838 * dev_get_by_index - find a device by its ifindex
839 * @net: the applicable net namespace
840 * @ifindex: index of device
842 * Search for an interface by index. Returns NULL if the device
843 * is not found or a pointer to the device. The device returned has
844 * had a reference added and the pointer is safe until the user calls
845 * dev_put to indicate they have finished with it.
848 struct net_device *dev_get_by_index(struct net *net, int ifindex)
850 struct net_device *dev;
853 dev = dev_get_by_index_rcu(net, ifindex);
859 EXPORT_SYMBOL(dev_get_by_index);
862 * netdev_get_name - get a netdevice name, knowing its ifindex.
863 * @net: network namespace
864 * @name: a pointer to the buffer where the name will be stored.
865 * @ifindex: the ifindex of the interface to get the name from.
867 * The use of raw_seqcount_begin() and cond_resched() before
868 * retrying is required as we want to give the writers a chance
869 * to complete when CONFIG_PREEMPT is not set.
871 int netdev_get_name(struct net *net, char *name, int ifindex)
873 struct net_device *dev;
877 seq = raw_seqcount_begin(&devnet_rename_seq);
879 dev = dev_get_by_index_rcu(net, ifindex);
885 strcpy(name, dev->name);
887 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
888 mutex_lock(&devnet_rename_mutex);
889 mutex_unlock(&devnet_rename_mutex);
897 * dev_getbyhwaddr_rcu - find a device by its hardware address
898 * @net: the applicable net namespace
899 * @type: media type of device
900 * @ha: hardware address
902 * Search for an interface by MAC address. Returns NULL if the device
903 * is not found or a pointer to the device.
904 * The caller must hold RCU or RTNL.
905 * The returned device has not had its ref count increased
906 * and the caller must therefore be careful about locking
910 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
913 struct net_device *dev;
915 for_each_netdev_rcu(net, dev)
916 if (dev->type == type &&
917 !memcmp(dev->dev_addr, ha, dev->addr_len))
922 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
924 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
926 struct net_device *dev;
929 for_each_netdev(net, dev)
930 if (dev->type == type)
935 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
937 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
939 struct net_device *dev, *ret = NULL;
942 for_each_netdev_rcu(net, dev)
943 if (dev->type == type) {
951 EXPORT_SYMBOL(dev_getfirstbyhwtype);
954 * __dev_get_by_flags - find any device with given flags
955 * @net: the applicable net namespace
956 * @if_flags: IFF_* values
957 * @mask: bitmask of bits in if_flags to check
959 * Search for any interface with the given flags. Returns NULL if a device
960 * is not found or a pointer to the device. Must be called inside
961 * rtnl_lock(), and result refcount is unchanged.
964 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
967 struct net_device *dev, *ret;
972 for_each_netdev(net, dev) {
973 if (((dev->flags ^ if_flags) & mask) == 0) {
980 EXPORT_SYMBOL(__dev_get_by_flags);
983 * dev_valid_name - check if name is okay for network device
986 * Network device names need to be valid file names to
987 * to allow sysfs to work. We also disallow any kind of
990 bool dev_valid_name(const char *name)
994 if (strlen(name) >= IFNAMSIZ)
996 if (!strcmp(name, ".") || !strcmp(name, ".."))
1000 if (*name == '/' || *name == ':' || isspace(*name))
1006 EXPORT_SYMBOL(dev_valid_name);
1009 * __dev_alloc_name - allocate a name for a device
1010 * @net: network namespace to allocate the device name in
1011 * @name: name format string
1012 * @buf: scratch buffer and result name string
1014 * Passed a format string - eg "lt%d" it will try and find a suitable
1015 * id. It scans list of devices to build up a free map, then chooses
1016 * the first empty slot. The caller must hold the dev_base or rtnl lock
1017 * while allocating the name and adding the device in order to avoid
1019 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1020 * Returns the number of the unit assigned or a negative errno code.
1023 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1027 const int max_netdevices = 8*PAGE_SIZE;
1028 unsigned long *inuse;
1029 struct net_device *d;
1031 p = strnchr(name, IFNAMSIZ-1, '%');
1034 * Verify the string as this thing may have come from
1035 * the user. There must be either one "%d" and no other "%"
1038 if (p[1] != 'd' || strchr(p + 2, '%'))
1041 /* Use one page as a bit array of possible slots */
1042 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1046 for_each_netdev(net, d) {
1047 if (!sscanf(d->name, name, &i))
1049 if (i < 0 || i >= max_netdevices)
1052 /* avoid cases where sscanf is not exact inverse of printf */
1053 snprintf(buf, IFNAMSIZ, name, i);
1054 if (!strncmp(buf, d->name, IFNAMSIZ))
1058 i = find_first_zero_bit(inuse, max_netdevices);
1059 free_page((unsigned long) inuse);
1063 snprintf(buf, IFNAMSIZ, name, i);
1064 if (!__dev_get_by_name(net, buf))
1067 /* It is possible to run out of possible slots
1068 * when the name is long and there isn't enough space left
1069 * for the digits, or if all bits are used.
1075 * dev_alloc_name - allocate a name for a device
1077 * @name: name format string
1079 * Passed a format string - eg "lt%d" it will try and find a suitable
1080 * id. It scans list of devices to build up a free map, then chooses
1081 * the first empty slot. The caller must hold the dev_base or rtnl lock
1082 * while allocating the name and adding the device in order to avoid
1084 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1085 * Returns the number of the unit assigned or a negative errno code.
1088 int dev_alloc_name(struct net_device *dev, const char *name)
1094 BUG_ON(!dev_net(dev));
1096 ret = __dev_alloc_name(net, name, buf);
1098 strlcpy(dev->name, buf, IFNAMSIZ);
1101 EXPORT_SYMBOL(dev_alloc_name);
1103 static int dev_alloc_name_ns(struct net *net,
1104 struct net_device *dev,
1110 ret = __dev_alloc_name(net, name, buf);
1112 strlcpy(dev->name, buf, IFNAMSIZ);
1116 static int dev_get_valid_name(struct net *net,
1117 struct net_device *dev,
1122 if (!dev_valid_name(name))
1125 if (strchr(name, '%'))
1126 return dev_alloc_name_ns(net, dev, name);
1127 else if (__dev_get_by_name(net, name))
1129 else if (dev->name != name)
1130 strlcpy(dev->name, name, IFNAMSIZ);
1136 * dev_change_name - change name of a device
1138 * @newname: name (or format string) must be at least IFNAMSIZ
1140 * Change name of a device, can pass format strings "eth%d".
1143 int dev_change_name(struct net_device *dev, const char *newname)
1145 unsigned char old_assign_type;
1146 char oldname[IFNAMSIZ];
1152 BUG_ON(!dev_net(dev));
1155 if (dev->flags & IFF_UP)
1158 mutex_lock(&devnet_rename_mutex);
1159 __raw_write_seqcount_begin(&devnet_rename_seq);
1161 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
1164 memcpy(oldname, dev->name, IFNAMSIZ);
1166 err = dev_get_valid_name(net, dev, newname);
1170 if (oldname[0] && !strchr(oldname, '%'))
1171 netdev_info(dev, "renamed from %s\n", oldname);
1173 old_assign_type = dev->name_assign_type;
1174 dev->name_assign_type = NET_NAME_RENAMED;
1177 ret = device_rename(&dev->dev, dev->name);
1179 memcpy(dev->name, oldname, IFNAMSIZ);
1180 dev->name_assign_type = old_assign_type;
1185 __raw_write_seqcount_end(&devnet_rename_seq);
1186 mutex_unlock(&devnet_rename_mutex);
1188 netdev_adjacent_rename_links(dev, oldname);
1190 write_lock_bh(&dev_base_lock);
1191 hlist_del_rcu(&dev->name_hlist);
1192 write_unlock_bh(&dev_base_lock);
1196 write_lock_bh(&dev_base_lock);
1197 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1198 write_unlock_bh(&dev_base_lock);
1200 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1201 ret = notifier_to_errno(ret);
1204 /* err >= 0 after dev_alloc_name() or stores the first errno */
1207 mutex_lock(&devnet_rename_mutex);
1208 __raw_write_seqcount_begin(&devnet_rename_seq);
1209 memcpy(dev->name, oldname, IFNAMSIZ);
1210 memcpy(oldname, newname, IFNAMSIZ);
1211 dev->name_assign_type = old_assign_type;
1212 old_assign_type = NET_NAME_RENAMED;
1215 pr_err("%s: name change rollback failed: %d\n",
1223 __raw_write_seqcount_end(&devnet_rename_seq);
1224 mutex_unlock(&devnet_rename_mutex);
1229 * dev_set_alias - change ifalias of a device
1231 * @alias: name up to IFALIASZ
1232 * @len: limit of bytes to copy from info
1234 * Set ifalias for a device,
1236 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1242 if (len >= IFALIASZ)
1246 kfree(dev->ifalias);
1247 dev->ifalias = NULL;
1251 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1254 dev->ifalias = new_ifalias;
1256 strlcpy(dev->ifalias, alias, len+1);
1262 * netdev_features_change - device changes features
1263 * @dev: device to cause notification
1265 * Called to indicate a device has changed features.
1267 void netdev_features_change(struct net_device *dev)
1269 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1271 EXPORT_SYMBOL(netdev_features_change);
1274 * netdev_state_change - device changes state
1275 * @dev: device to cause notification
1277 * Called to indicate a device has changed state. This function calls
1278 * the notifier chains for netdev_chain and sends a NEWLINK message
1279 * to the routing socket.
1281 void netdev_state_change(struct net_device *dev)
1283 if (dev->flags & IFF_UP) {
1284 struct netdev_notifier_change_info change_info;
1286 change_info.flags_changed = 0;
1287 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1289 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1292 EXPORT_SYMBOL(netdev_state_change);
1295 * netdev_notify_peers - notify network peers about existence of @dev
1296 * @dev: network device
1298 * Generate traffic such that interested network peers are aware of
1299 * @dev, such as by generating a gratuitous ARP. This may be used when
1300 * a device wants to inform the rest of the network about some sort of
1301 * reconfiguration such as a failover event or virtual machine
1304 void netdev_notify_peers(struct net_device *dev)
1307 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1310 EXPORT_SYMBOL(netdev_notify_peers);
1312 static int __dev_open(struct net_device *dev)
1314 const struct net_device_ops *ops = dev->netdev_ops;
1319 if (!netif_device_present(dev))
1322 /* Block netpoll from trying to do any rx path servicing.
1323 * If we don't do this there is a chance ndo_poll_controller
1324 * or ndo_poll may be running while we open the device
1326 netpoll_poll_disable(dev);
1328 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1329 ret = notifier_to_errno(ret);
1333 set_bit(__LINK_STATE_START, &dev->state);
1335 if (ops->ndo_validate_addr)
1336 ret = ops->ndo_validate_addr(dev);
1338 if (!ret && ops->ndo_open)
1339 ret = ops->ndo_open(dev);
1341 netpoll_poll_enable(dev);
1344 clear_bit(__LINK_STATE_START, &dev->state);
1346 dev->flags |= IFF_UP;
1347 dev_set_rx_mode(dev);
1349 add_device_randomness(dev->dev_addr, dev->addr_len);
1356 * dev_open - prepare an interface for use.
1357 * @dev: device to open
1359 * Takes a device from down to up state. The device's private open
1360 * function is invoked and then the multicast lists are loaded. Finally
1361 * the device is moved into the up state and a %NETDEV_UP message is
1362 * sent to the netdev notifier chain.
1364 * Calling this function on an active interface is a nop. On a failure
1365 * a negative errno code is returned.
1367 int dev_open(struct net_device *dev)
1371 if (dev->flags & IFF_UP)
1374 ret = __dev_open(dev);
1378 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1379 call_netdevice_notifiers(NETDEV_UP, dev);
1383 EXPORT_SYMBOL(dev_open);
1385 static int __dev_close_many(struct list_head *head)
1387 struct net_device *dev;
1392 list_for_each_entry(dev, head, close_list) {
1393 /* Temporarily disable netpoll until the interface is down */
1394 netpoll_poll_disable(dev);
1396 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1398 clear_bit(__LINK_STATE_START, &dev->state);
1400 /* Synchronize to scheduled poll. We cannot touch poll list, it
1401 * can be even on different cpu. So just clear netif_running().
1403 * dev->stop() will invoke napi_disable() on all of it's
1404 * napi_struct instances on this device.
1406 smp_mb__after_atomic(); /* Commit netif_running(). */
1409 dev_deactivate_many(head);
1411 list_for_each_entry(dev, head, close_list) {
1412 const struct net_device_ops *ops = dev->netdev_ops;
1415 * Call the device specific close. This cannot fail.
1416 * Only if device is UP
1418 * We allow it to be called even after a DETACH hot-plug
1424 dev->flags &= ~IFF_UP;
1425 netpoll_poll_enable(dev);
1431 static int __dev_close(struct net_device *dev)
1436 list_add(&dev->close_list, &single);
1437 retval = __dev_close_many(&single);
1443 int dev_close_many(struct list_head *head, bool unlink)
1445 struct net_device *dev, *tmp;
1447 /* Remove the devices that don't need to be closed */
1448 list_for_each_entry_safe(dev, tmp, head, close_list)
1449 if (!(dev->flags & IFF_UP))
1450 list_del_init(&dev->close_list);
1452 __dev_close_many(head);
1454 list_for_each_entry_safe(dev, tmp, head, close_list) {
1455 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1456 call_netdevice_notifiers(NETDEV_DOWN, dev);
1458 list_del_init(&dev->close_list);
1463 EXPORT_SYMBOL(dev_close_many);
1466 * dev_close - shutdown an interface.
1467 * @dev: device to shutdown
1469 * This function moves an active device into down state. A
1470 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1471 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1474 int dev_close(struct net_device *dev)
1476 if (dev->flags & IFF_UP) {
1479 list_add(&dev->close_list, &single);
1480 dev_close_many(&single, true);
1485 EXPORT_SYMBOL(dev_close);
1489 * dev_disable_lro - disable Large Receive Offload on a device
1492 * Disable Large Receive Offload (LRO) on a net device. Must be
1493 * called under RTNL. This is needed if received packets may be
1494 * forwarded to another interface.
1496 void dev_disable_lro(struct net_device *dev)
1498 struct net_device *lower_dev;
1499 struct list_head *iter;
1501 dev->wanted_features &= ~NETIF_F_LRO;
1502 netdev_update_features(dev);
1504 if (unlikely(dev->features & NETIF_F_LRO))
1505 netdev_WARN(dev, "failed to disable LRO!\n");
1507 netdev_for_each_lower_dev(dev, lower_dev, iter)
1508 dev_disable_lro(lower_dev);
1510 EXPORT_SYMBOL(dev_disable_lro);
1512 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1513 struct net_device *dev)
1515 struct netdev_notifier_info info;
1517 netdev_notifier_info_init(&info, dev);
1518 return nb->notifier_call(nb, val, &info);
1521 static int dev_boot_phase = 1;
1524 * register_netdevice_notifier - register a network notifier block
1527 * Register a notifier to be called when network device events occur.
1528 * The notifier passed is linked into the kernel structures and must
1529 * not be reused until it has been unregistered. A negative errno code
1530 * is returned on a failure.
1532 * When registered all registration and up events are replayed
1533 * to the new notifier to allow device to have a race free
1534 * view of the network device list.
1537 int register_netdevice_notifier(struct notifier_block *nb)
1539 struct net_device *dev;
1540 struct net_device *last;
1545 err = raw_notifier_chain_register(&netdev_chain, nb);
1551 for_each_netdev(net, dev) {
1552 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1553 err = notifier_to_errno(err);
1557 if (!(dev->flags & IFF_UP))
1560 call_netdevice_notifier(nb, NETDEV_UP, dev);
1571 for_each_netdev(net, dev) {
1575 if (dev->flags & IFF_UP) {
1576 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1578 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1580 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1585 raw_notifier_chain_unregister(&netdev_chain, nb);
1588 EXPORT_SYMBOL(register_netdevice_notifier);
1591 * unregister_netdevice_notifier - unregister a network notifier block
1594 * Unregister a notifier previously registered by
1595 * register_netdevice_notifier(). The notifier is unlinked into the
1596 * kernel structures and may then be reused. A negative errno code
1597 * is returned on a failure.
1599 * After unregistering unregister and down device events are synthesized
1600 * for all devices on the device list to the removed notifier to remove
1601 * the need for special case cleanup code.
1604 int unregister_netdevice_notifier(struct notifier_block *nb)
1606 struct net_device *dev;
1611 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1616 for_each_netdev(net, dev) {
1617 if (dev->flags & IFF_UP) {
1618 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1620 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1622 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1629 EXPORT_SYMBOL(unregister_netdevice_notifier);
1632 * call_netdevice_notifiers_info - call all network notifier blocks
1633 * @val: value passed unmodified to notifier function
1634 * @dev: net_device pointer passed unmodified to notifier function
1635 * @info: notifier information data
1637 * Call all network notifier blocks. Parameters and return value
1638 * are as for raw_notifier_call_chain().
1641 static int call_netdevice_notifiers_info(unsigned long val,
1642 struct net_device *dev,
1643 struct netdev_notifier_info *info)
1646 netdev_notifier_info_init(info, dev);
1647 return raw_notifier_call_chain(&netdev_chain, val, info);
1651 * call_netdevice_notifiers - call all network notifier blocks
1652 * @val: value passed unmodified to notifier function
1653 * @dev: net_device pointer passed unmodified to notifier function
1655 * Call all network notifier blocks. Parameters and return value
1656 * are as for raw_notifier_call_chain().
1659 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1661 struct netdev_notifier_info info;
1663 return call_netdevice_notifiers_info(val, dev, &info);
1665 EXPORT_SYMBOL(call_netdevice_notifiers);
1667 #ifdef CONFIG_NET_INGRESS
1668 static struct static_key ingress_needed __read_mostly;
1670 void net_inc_ingress_queue(void)
1672 static_key_slow_inc(&ingress_needed);
1674 EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1676 void net_dec_ingress_queue(void)
1678 static_key_slow_dec(&ingress_needed);
1680 EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1683 static struct static_key netstamp_needed __read_mostly;
1684 #ifdef HAVE_JUMP_LABEL
1685 static atomic_t netstamp_needed_deferred;
1686 static void netstamp_clear(struct work_struct *work)
1688 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1691 static_key_slow_dec(&netstamp_needed);
1693 static DECLARE_WORK(netstamp_work, netstamp_clear);
1696 void net_enable_timestamp(void)
1698 static_key_slow_inc(&netstamp_needed);
1700 EXPORT_SYMBOL(net_enable_timestamp);
1702 void net_disable_timestamp(void)
1704 #ifdef HAVE_JUMP_LABEL
1705 /* net_disable_timestamp() can be called from non process context */
1706 atomic_inc(&netstamp_needed_deferred);
1707 schedule_work(&netstamp_work);
1709 static_key_slow_dec(&netstamp_needed);
1712 EXPORT_SYMBOL(net_disable_timestamp);
1714 static inline void net_timestamp_set(struct sk_buff *skb)
1716 skb->tstamp.tv64 = 0;
1717 if (static_key_false(&netstamp_needed))
1718 __net_timestamp(skb);
1721 #define net_timestamp_check(COND, SKB) \
1722 if (static_key_false(&netstamp_needed)) { \
1723 if ((COND) && !(SKB)->tstamp.tv64) \
1724 __net_timestamp(SKB); \
1727 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
1731 if (!(dev->flags & IFF_UP))
1734 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1735 if (skb->len <= len)
1738 /* if TSO is enabled, we don't care about the length as the packet
1739 * could be forwarded without being segmented before
1741 if (skb_is_gso(skb))
1746 EXPORT_SYMBOL_GPL(is_skb_forwardable);
1748 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1750 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
1751 unlikely(!is_skb_forwardable(dev, skb))) {
1752 atomic_long_inc(&dev->rx_dropped);
1757 skb_scrub_packet(skb, true);
1759 skb->protocol = eth_type_trans(skb, dev);
1760 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1764 EXPORT_SYMBOL_GPL(__dev_forward_skb);
1767 * dev_forward_skb - loopback an skb to another netif
1769 * @dev: destination network device
1770 * @skb: buffer to forward
1773 * NET_RX_SUCCESS (no congestion)
1774 * NET_RX_DROP (packet was dropped, but freed)
1776 * dev_forward_skb can be used for injecting an skb from the
1777 * start_xmit function of one device into the receive queue
1778 * of another device.
1780 * The receiving device may be in another namespace, so
1781 * we have to clear all information in the skb that could
1782 * impact namespace isolation.
1784 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1786 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
1788 EXPORT_SYMBOL_GPL(dev_forward_skb);
1790 static inline int deliver_skb(struct sk_buff *skb,
1791 struct packet_type *pt_prev,
1792 struct net_device *orig_dev)
1794 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1796 atomic_inc(&skb->users);
1797 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1800 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1801 struct packet_type **pt,
1802 struct net_device *orig_dev,
1804 struct list_head *ptype_list)
1806 struct packet_type *ptype, *pt_prev = *pt;
1808 list_for_each_entry_rcu(ptype, ptype_list, list) {
1809 if (ptype->type != type)
1812 deliver_skb(skb, pt_prev, orig_dev);
1818 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1820 if (!ptype->af_packet_priv || !skb->sk)
1823 if (ptype->id_match)
1824 return ptype->id_match(ptype, skb->sk);
1825 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1832 * Support routine. Sends outgoing frames to any network
1833 * taps currently in use.
1836 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1838 struct packet_type *ptype;
1839 struct sk_buff *skb2 = NULL;
1840 struct packet_type *pt_prev = NULL;
1841 struct list_head *ptype_list = &ptype_all;
1845 list_for_each_entry_rcu(ptype, ptype_list, list) {
1846 /* Never send packets back to the socket
1847 * they originated from - MvS (miquels@drinkel.ow.org)
1849 if (skb_loop_sk(ptype, skb))
1853 deliver_skb(skb2, pt_prev, skb->dev);
1858 /* need to clone skb, done only once */
1859 skb2 = skb_clone(skb, GFP_ATOMIC);
1863 net_timestamp_set(skb2);
1865 /* skb->nh should be correctly
1866 * set by sender, so that the second statement is
1867 * just protection against buggy protocols.
1869 skb_reset_mac_header(skb2);
1871 if (skb_network_header(skb2) < skb2->data ||
1872 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1873 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1874 ntohs(skb2->protocol),
1876 skb_reset_network_header(skb2);
1879 skb2->transport_header = skb2->network_header;
1880 skb2->pkt_type = PACKET_OUTGOING;
1884 if (ptype_list == &ptype_all) {
1885 ptype_list = &dev->ptype_all;
1890 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1895 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1896 * @dev: Network device
1897 * @txq: number of queues available
1899 * If real_num_tx_queues is changed the tc mappings may no longer be
1900 * valid. To resolve this verify the tc mapping remains valid and if
1901 * not NULL the mapping. With no priorities mapping to this
1902 * offset/count pair it will no longer be used. In the worst case TC0
1903 * is invalid nothing can be done so disable priority mappings. If is
1904 * expected that drivers will fix this mapping if they can before
1905 * calling netif_set_real_num_tx_queues.
1907 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1910 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1912 /* If TC0 is invalidated disable TC mapping */
1913 if (tc->offset + tc->count > txq) {
1914 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1919 /* Invalidated prio to tc mappings set to TC0 */
1920 for (i = 1; i < TC_BITMASK + 1; i++) {
1921 int q = netdev_get_prio_tc_map(dev, i);
1923 tc = &dev->tc_to_txq[q];
1924 if (tc->offset + tc->count > txq) {
1925 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1927 netdev_set_prio_tc_map(dev, i, 0);
1933 static DEFINE_MUTEX(xps_map_mutex);
1934 #define xmap_dereference(P) \
1935 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1937 static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1940 struct xps_map *map = NULL;
1944 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1946 for (pos = 0; map && pos < map->len; pos++) {
1947 if (map->queues[pos] == index) {
1949 map->queues[pos] = map->queues[--map->len];
1951 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1952 kfree_rcu(map, rcu);
1962 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
1964 struct xps_dev_maps *dev_maps;
1966 bool active = false;
1968 mutex_lock(&xps_map_mutex);
1969 dev_maps = xmap_dereference(dev->xps_maps);
1974 for_each_possible_cpu(cpu) {
1975 for (i = index; i < dev->num_tx_queues; i++) {
1976 if (!remove_xps_queue(dev_maps, cpu, i))
1979 if (i == dev->num_tx_queues)
1984 RCU_INIT_POINTER(dev->xps_maps, NULL);
1985 kfree_rcu(dev_maps, rcu);
1988 for (i = index; i < dev->num_tx_queues; i++)
1989 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1993 mutex_unlock(&xps_map_mutex);
1996 static struct xps_map *expand_xps_map(struct xps_map *map,
1999 struct xps_map *new_map;
2000 int alloc_len = XPS_MIN_MAP_ALLOC;
2003 for (pos = 0; map && pos < map->len; pos++) {
2004 if (map->queues[pos] != index)
2009 /* Need to add queue to this CPU's existing map */
2011 if (pos < map->alloc_len)
2014 alloc_len = map->alloc_len * 2;
2017 /* Need to allocate new map to store queue on this CPU's map */
2018 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2023 for (i = 0; i < pos; i++)
2024 new_map->queues[i] = map->queues[i];
2025 new_map->alloc_len = alloc_len;
2031 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2034 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
2035 struct xps_map *map, *new_map;
2036 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
2037 int cpu, numa_node_id = -2;
2038 bool active = false;
2040 mutex_lock(&xps_map_mutex);
2042 dev_maps = xmap_dereference(dev->xps_maps);
2044 /* allocate memory for queue storage */
2045 for_each_online_cpu(cpu) {
2046 if (!cpumask_test_cpu(cpu, mask))
2050 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2051 if (!new_dev_maps) {
2052 mutex_unlock(&xps_map_mutex);
2056 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2059 map = expand_xps_map(map, cpu, index);
2063 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2067 goto out_no_new_maps;
2069 for_each_possible_cpu(cpu) {
2070 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2071 /* add queue to CPU maps */
2074 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2075 while ((pos < map->len) && (map->queues[pos] != index))
2078 if (pos == map->len)
2079 map->queues[map->len++] = index;
2081 if (numa_node_id == -2)
2082 numa_node_id = cpu_to_node(cpu);
2083 else if (numa_node_id != cpu_to_node(cpu))
2086 } else if (dev_maps) {
2087 /* fill in the new device map from the old device map */
2088 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2089 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2094 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2096 /* Cleanup old maps */
2098 for_each_possible_cpu(cpu) {
2099 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2100 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2101 if (map && map != new_map)
2102 kfree_rcu(map, rcu);
2105 kfree_rcu(dev_maps, rcu);
2108 dev_maps = new_dev_maps;
2112 /* update Tx queue numa node */
2113 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2114 (numa_node_id >= 0) ? numa_node_id :
2120 /* removes queue from unused CPUs */
2121 for_each_possible_cpu(cpu) {
2122 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2125 if (remove_xps_queue(dev_maps, cpu, index))
2129 /* free map if not active */
2131 RCU_INIT_POINTER(dev->xps_maps, NULL);
2132 kfree_rcu(dev_maps, rcu);
2136 mutex_unlock(&xps_map_mutex);
2140 /* remove any maps that we added */
2141 for_each_possible_cpu(cpu) {
2142 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2143 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2145 if (new_map && new_map != map)
2149 mutex_unlock(&xps_map_mutex);
2151 kfree(new_dev_maps);
2154 EXPORT_SYMBOL(netif_set_xps_queue);
2158 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2159 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2161 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2165 if (txq < 1 || txq > dev->num_tx_queues)
2168 if (dev->reg_state == NETREG_REGISTERED ||
2169 dev->reg_state == NETREG_UNREGISTERING) {
2172 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2178 netif_setup_tc(dev, txq);
2180 if (txq < dev->real_num_tx_queues) {
2181 qdisc_reset_all_tx_gt(dev, txq);
2183 netif_reset_xps_queues_gt(dev, txq);
2188 dev->real_num_tx_queues = txq;
2191 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2195 * netif_set_real_num_rx_queues - set actual number of RX queues used
2196 * @dev: Network device
2197 * @rxq: Actual number of RX queues
2199 * This must be called either with the rtnl_lock held or before
2200 * registration of the net device. Returns 0 on success, or a
2201 * negative error code. If called before registration, it always
2204 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2208 if (rxq < 1 || rxq > dev->num_rx_queues)
2211 if (dev->reg_state == NETREG_REGISTERED) {
2214 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2220 dev->real_num_rx_queues = rxq;
2223 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2227 * netif_get_num_default_rss_queues - default number of RSS queues
2229 * This routine should set an upper limit on the number of RSS queues
2230 * used by default by multiqueue devices.
2232 int netif_get_num_default_rss_queues(void)
2234 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2236 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2238 static inline void __netif_reschedule(struct Qdisc *q)
2240 struct softnet_data *sd;
2241 unsigned long flags;
2243 local_irq_save(flags);
2244 sd = this_cpu_ptr(&softnet_data);
2245 q->next_sched = NULL;
2246 *sd->output_queue_tailp = q;
2247 sd->output_queue_tailp = &q->next_sched;
2248 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2249 local_irq_restore(flags);
2250 preempt_check_resched_rt();
2253 void __netif_schedule(struct Qdisc *q)
2255 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2256 __netif_reschedule(q);
2258 EXPORT_SYMBOL(__netif_schedule);
2260 struct dev_kfree_skb_cb {
2261 enum skb_free_reason reason;
2264 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2266 return (struct dev_kfree_skb_cb *)skb->cb;
2269 void netif_schedule_queue(struct netdev_queue *txq)
2272 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2273 struct Qdisc *q = rcu_dereference(txq->qdisc);
2275 __netif_schedule(q);
2279 EXPORT_SYMBOL(netif_schedule_queue);
2282 * netif_wake_subqueue - allow sending packets on subqueue
2283 * @dev: network device
2284 * @queue_index: sub queue index
2286 * Resume individual transmit queue of a device with multiple transmit queues.
2288 void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2290 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2292 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
2296 q = rcu_dereference(txq->qdisc);
2297 __netif_schedule(q);
2301 EXPORT_SYMBOL(netif_wake_subqueue);
2303 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2305 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2309 q = rcu_dereference(dev_queue->qdisc);
2310 __netif_schedule(q);
2314 EXPORT_SYMBOL(netif_tx_wake_queue);
2316 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2318 unsigned long flags;
2320 if (likely(atomic_read(&skb->users) == 1)) {
2322 atomic_set(&skb->users, 0);
2323 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2326 get_kfree_skb_cb(skb)->reason = reason;
2327 local_irq_save(flags);
2328 skb->next = __this_cpu_read(softnet_data.completion_queue);
2329 __this_cpu_write(softnet_data.completion_queue, skb);
2330 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2331 local_irq_restore(flags);
2332 preempt_check_resched_rt();
2334 EXPORT_SYMBOL(__dev_kfree_skb_irq);
2336 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
2338 if (in_irq() || irqs_disabled())
2339 __dev_kfree_skb_irq(skb, reason);
2343 EXPORT_SYMBOL(__dev_kfree_skb_any);
2347 * netif_device_detach - mark device as removed
2348 * @dev: network device
2350 * Mark device as removed from system and therefore no longer available.
2352 void netif_device_detach(struct net_device *dev)
2354 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2355 netif_running(dev)) {
2356 netif_tx_stop_all_queues(dev);
2359 EXPORT_SYMBOL(netif_device_detach);
2362 * netif_device_attach - mark device as attached
2363 * @dev: network device
2365 * Mark device as attached from system and restart if needed.
2367 void netif_device_attach(struct net_device *dev)
2369 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2370 netif_running(dev)) {
2371 netif_tx_wake_all_queues(dev);
2372 __netdev_watchdog_up(dev);
2375 EXPORT_SYMBOL(netif_device_attach);
2378 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2379 * to be used as a distribution range.
2381 u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2382 unsigned int num_tx_queues)
2386 u16 qcount = num_tx_queues;
2388 if (skb_rx_queue_recorded(skb)) {
2389 hash = skb_get_rx_queue(skb);
2390 while (unlikely(hash >= num_tx_queues))
2391 hash -= num_tx_queues;
2396 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2397 qoffset = dev->tc_to_txq[tc].offset;
2398 qcount = dev->tc_to_txq[tc].count;
2401 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2403 EXPORT_SYMBOL(__skb_tx_hash);
2405 static void skb_warn_bad_offload(const struct sk_buff *skb)
2407 static const netdev_features_t null_features = 0;
2408 struct net_device *dev = skb->dev;
2409 const char *name = "";
2411 if (!net_ratelimit())
2415 if (dev->dev.parent)
2416 name = dev_driver_string(dev->dev.parent);
2418 name = netdev_name(dev);
2420 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2421 "gso_type=%d ip_summed=%d\n",
2422 name, dev ? &dev->features : &null_features,
2423 skb->sk ? &skb->sk->sk_route_caps : &null_features,
2424 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2425 skb_shinfo(skb)->gso_type, skb->ip_summed);
2429 * Invalidate hardware checksum when packet is to be mangled, and
2430 * complete checksum manually on outgoing path.
2432 int skb_checksum_help(struct sk_buff *skb)
2435 int ret = 0, offset;
2437 if (skb->ip_summed == CHECKSUM_COMPLETE)
2438 goto out_set_summed;
2440 if (unlikely(skb_shinfo(skb)->gso_size)) {
2441 skb_warn_bad_offload(skb);
2445 /* Before computing a checksum, we should make sure no frag could
2446 * be modified by an external entity : checksum could be wrong.
2448 if (skb_has_shared_frag(skb)) {
2449 ret = __skb_linearize(skb);
2454 offset = skb_checksum_start_offset(skb);
2455 BUG_ON(offset >= skb_headlen(skb));
2456 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2458 offset += skb->csum_offset;
2459 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2461 if (skb_cloned(skb) &&
2462 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2463 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2468 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
2470 skb->ip_summed = CHECKSUM_NONE;
2474 EXPORT_SYMBOL(skb_checksum_help);
2476 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2478 __be16 type = skb->protocol;
2480 /* Tunnel gso handlers can set protocol to ethernet. */
2481 if (type == htons(ETH_P_TEB)) {
2484 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2487 eth = (struct ethhdr *)skb_mac_header(skb);
2488 type = eth->h_proto;
2491 return __vlan_get_protocol(skb, type, depth);
2495 * skb_mac_gso_segment - mac layer segmentation handler.
2496 * @skb: buffer to segment
2497 * @features: features for the output path (see dev->features)
2499 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2500 netdev_features_t features)
2502 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2503 struct packet_offload *ptype;
2504 int vlan_depth = skb->mac_len;
2505 __be16 type = skb_network_protocol(skb, &vlan_depth);
2507 if (unlikely(!type))
2508 return ERR_PTR(-EINVAL);
2510 __skb_pull(skb, vlan_depth);
2513 list_for_each_entry_rcu(ptype, &offload_base, list) {
2514 if (ptype->type == type && ptype->callbacks.gso_segment) {
2515 segs = ptype->callbacks.gso_segment(skb, features);
2521 __skb_push(skb, skb->data - skb_mac_header(skb));
2525 EXPORT_SYMBOL(skb_mac_gso_segment);
2528 /* openvswitch calls this on rx path, so we need a different check.
2530 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2533 return skb->ip_summed != CHECKSUM_PARTIAL;
2535 return skb->ip_summed == CHECKSUM_NONE;
2539 * __skb_gso_segment - Perform segmentation on skb.
2540 * @skb: buffer to segment
2541 * @features: features for the output path (see dev->features)
2542 * @tx_path: whether it is called in TX path
2544 * This function segments the given skb and returns a list of segments.
2546 * It may return NULL if the skb requires no segmentation. This is
2547 * only possible when GSO is used for verifying header integrity.
2549 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
2551 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2552 netdev_features_t features, bool tx_path)
2554 if (unlikely(skb_needs_check(skb, tx_path))) {
2557 skb_warn_bad_offload(skb);
2559 err = skb_cow_head(skb, 0);
2561 return ERR_PTR(err);
2564 BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
2565 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
2567 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
2568 SKB_GSO_CB(skb)->encap_level = 0;
2570 skb_reset_mac_header(skb);
2571 skb_reset_mac_len(skb);
2573 return skb_mac_gso_segment(skb, features);
2575 EXPORT_SYMBOL(__skb_gso_segment);
2577 /* Take action when hardware reception checksum errors are detected. */
2579 void netdev_rx_csum_fault(struct net_device *dev)
2581 if (net_ratelimit()) {
2582 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2586 EXPORT_SYMBOL(netdev_rx_csum_fault);
2589 /* Actually, we should eliminate this check as soon as we know, that:
2590 * 1. IOMMU is present and allows to map all the memory.
2591 * 2. No high memory really exists on this machine.
2594 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2596 #ifdef CONFIG_HIGHMEM
2598 if (!(dev->features & NETIF_F_HIGHDMA)) {
2599 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2600 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2601 if (PageHighMem(skb_frag_page(frag)))
2606 if (PCI_DMA_BUS_IS_PHYS) {
2607 struct device *pdev = dev->dev.parent;
2611 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2612 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2613 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2614 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2622 /* If MPLS offload request, verify we are testing hardware MPLS features
2623 * instead of standard features for the netdev.
2625 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
2626 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2627 netdev_features_t features,
2630 if (eth_p_mpls(type))
2631 features &= skb->dev->mpls_features;
2636 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2637 netdev_features_t features,
2644 static netdev_features_t harmonize_features(struct sk_buff *skb,
2645 netdev_features_t features)
2650 type = skb_network_protocol(skb, &tmp);
2651 features = net_mpls_features(skb, features, type);
2653 if (skb->ip_summed != CHECKSUM_NONE &&
2654 !can_checksum_protocol(features, type)) {
2655 features &= ~NETIF_F_ALL_CSUM;
2657 if (illegal_highdma(skb->dev, skb))
2658 features &= ~NETIF_F_SG;
2663 netdev_features_t passthru_features_check(struct sk_buff *skb,
2664 struct net_device *dev,
2665 netdev_features_t features)
2669 EXPORT_SYMBOL(passthru_features_check);
2671 static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2672 struct net_device *dev,
2673 netdev_features_t features)
2675 return vlan_features_check(skb, features);
2678 netdev_features_t netif_skb_features(struct sk_buff *skb)
2680 struct net_device *dev = skb->dev;
2681 netdev_features_t features = dev->features;
2682 u16 gso_segs = skb_shinfo(skb)->gso_segs;
2684 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
2685 features &= ~NETIF_F_GSO_MASK;
2687 /* If encapsulation offload request, verify we are testing
2688 * hardware encapsulation features instead of standard
2689 * features for the netdev
2691 if (skb->encapsulation)
2692 features &= dev->hw_enc_features;
2694 if (skb_vlan_tagged(skb))
2695 features = netdev_intersect_features(features,
2696 dev->vlan_features |
2697 NETIF_F_HW_VLAN_CTAG_TX |
2698 NETIF_F_HW_VLAN_STAG_TX);
2700 if (dev->netdev_ops->ndo_features_check)
2701 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2704 features &= dflt_features_check(skb, dev, features);
2706 return harmonize_features(skb, features);
2708 EXPORT_SYMBOL(netif_skb_features);
2710 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
2711 struct netdev_queue *txq, bool more)
2716 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
2717 dev_queue_xmit_nit(skb, dev);
2720 trace_net_dev_start_xmit(skb, dev);
2721 rc = netdev_start_xmit(skb, dev, txq, more);
2722 trace_net_dev_xmit(skb, rc, dev, len);
2727 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2728 struct netdev_queue *txq, int *ret)
2730 struct sk_buff *skb = first;
2731 int rc = NETDEV_TX_OK;
2734 struct sk_buff *next = skb->next;
2737 rc = xmit_one(skb, dev, txq, next != NULL);
2738 if (unlikely(!dev_xmit_complete(rc))) {
2744 if (netif_xmit_stopped(txq) && skb) {
2745 rc = NETDEV_TX_BUSY;
2755 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2756 netdev_features_t features)
2758 if (skb_vlan_tag_present(skb) &&
2759 !vlan_hw_offload_capable(features, skb->vlan_proto))
2760 skb = __vlan_hwaccel_push_inside(skb);
2764 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
2766 netdev_features_t features;
2771 features = netif_skb_features(skb);
2772 skb = validate_xmit_vlan(skb, features);
2776 if (netif_needs_gso(skb, features)) {
2777 struct sk_buff *segs;
2779 segs = skb_gso_segment(skb, features);
2787 if (skb_needs_linearize(skb, features) &&
2788 __skb_linearize(skb))
2791 /* If packet is not checksummed and device does not
2792 * support checksumming for this protocol, complete
2793 * checksumming here.
2795 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2796 if (skb->encapsulation)
2797 skb_set_inner_transport_header(skb,
2798 skb_checksum_start_offset(skb));
2800 skb_set_transport_header(skb,
2801 skb_checksum_start_offset(skb));
2802 if (!(features & NETIF_F_ALL_CSUM) &&
2803 skb_checksum_help(skb))
2816 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2818 struct sk_buff *next, *head = NULL, *tail;
2820 for (; skb != NULL; skb = next) {
2824 /* in case skb wont be segmented, point to itself */
2827 skb = validate_xmit_skb(skb, dev);
2835 /* If skb was segmented, skb->prev points to
2836 * the last segment. If not, it still contains skb.
2842 EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
2844 static void qdisc_pkt_len_init(struct sk_buff *skb)
2846 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2848 qdisc_skb_cb(skb)->pkt_len = skb->len;
2850 /* To get more precise estimation of bytes sent on wire,
2851 * we add to pkt_len the headers size of all segments
2853 if (shinfo->gso_size) {
2854 unsigned int hdr_len;
2855 u16 gso_segs = shinfo->gso_segs;
2857 /* mac layer + network layer */
2858 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2860 /* + transport layer */
2861 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2862 hdr_len += tcp_hdrlen(skb);
2864 hdr_len += sizeof(struct udphdr);
2866 if (shinfo->gso_type & SKB_GSO_DODGY)
2867 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2870 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
2874 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2875 struct net_device *dev,
2876 struct netdev_queue *txq)
2878 spinlock_t *root_lock = qdisc_lock(q);
2882 qdisc_pkt_len_init(skb);
2883 qdisc_calculate_pkt_len(skb, q);
2885 * Heuristic to force contended enqueues to serialize on a
2886 * separate lock before trying to get qdisc main lock.
2887 * This permits __QDISC___STATE_RUNNING owner to get the lock more
2888 * often and dequeue packets faster.
2890 #ifdef CONFIG_PREEMPT_RT_FULL
2893 contended = qdisc_is_running(q);
2895 if (unlikely(contended))
2896 spin_lock(&q->busylock);
2898 spin_lock(root_lock);
2899 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2902 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2903 qdisc_run_begin(q)) {
2905 * This is a work-conserving queue; there are no old skbs
2906 * waiting to be sent out; and the qdisc is not running -
2907 * xmit the skb directly.
2910 qdisc_bstats_update(q, skb);
2912 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
2913 if (unlikely(contended)) {
2914 spin_unlock(&q->busylock);
2921 rc = NET_XMIT_SUCCESS;
2923 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2924 if (qdisc_run_begin(q)) {
2925 if (unlikely(contended)) {
2926 spin_unlock(&q->busylock);
2932 spin_unlock(root_lock);
2933 if (unlikely(contended))
2934 spin_unlock(&q->busylock);
2938 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2939 static void skb_update_prio(struct sk_buff *skb)
2941 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2943 if (!skb->priority && skb->sk && map) {
2944 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2946 if (prioidx < map->priomap_len)
2947 skb->priority = map->priomap[prioidx];
2951 #define skb_update_prio(skb)
2954 #ifdef CONFIG_PREEMPT_RT_FULL
2956 static inline int xmit_rec_read(void)
2958 return current->xmit_recursion;
2961 static inline void xmit_rec_inc(void)
2963 current->xmit_recursion++;
2966 static inline void xmit_rec_dec(void)
2968 current->xmit_recursion--;
2973 DEFINE_PER_CPU(int, xmit_recursion);
2974 EXPORT_SYMBOL(xmit_recursion);
2976 static inline int xmit_rec_read(void)
2978 return __this_cpu_read(xmit_recursion);
2981 static inline void xmit_rec_inc(void)
2983 __this_cpu_inc(xmit_recursion);
2986 static inline void xmit_rec_dec(void)
2988 __this_cpu_dec(xmit_recursion);
2992 #define RECURSION_LIMIT 10
2995 * dev_loopback_xmit - loop back @skb
2996 * @net: network namespace this loopback is happening in
2997 * @sk: sk needed to be a netfilter okfn
2998 * @skb: buffer to transmit
3000 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3002 skb_reset_mac_header(skb);
3003 __skb_pull(skb, skb_network_offset(skb));
3004 skb->pkt_type = PACKET_LOOPBACK;
3005 skb->ip_summed = CHECKSUM_UNNECESSARY;
3006 WARN_ON(!skb_dst(skb));
3011 EXPORT_SYMBOL(dev_loopback_xmit);
3013 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
3016 struct xps_dev_maps *dev_maps;
3017 struct xps_map *map;
3018 int queue_index = -1;
3021 dev_maps = rcu_dereference(dev->xps_maps);
3023 map = rcu_dereference(
3024 dev_maps->cpu_map[skb->sender_cpu - 1]);
3027 queue_index = map->queues[0];
3029 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
3031 if (unlikely(queue_index >= dev->real_num_tx_queues))
3043 static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
3045 struct sock *sk = skb->sk;
3046 int queue_index = sk_tx_queue_get(sk);
3048 if (queue_index < 0 || skb->ooo_okay ||
3049 queue_index >= dev->real_num_tx_queues) {
3050 int new_index = get_xps_queue(dev, skb);
3052 new_index = skb_tx_hash(dev, skb);
3054 if (queue_index != new_index && sk &&
3056 rcu_access_pointer(sk->sk_dst_cache))
3057 sk_tx_queue_set(sk, new_index);
3059 queue_index = new_index;
3065 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3066 struct sk_buff *skb,
3069 int queue_index = 0;
3072 if (skb->sender_cpu == 0)
3073 skb->sender_cpu = raw_smp_processor_id() + 1;
3076 if (dev->real_num_tx_queues != 1) {
3077 const struct net_device_ops *ops = dev->netdev_ops;
3078 if (ops->ndo_select_queue)
3079 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3082 queue_index = __netdev_pick_tx(dev, skb);
3085 queue_index = netdev_cap_txqueue(dev, queue_index);
3088 skb_set_queue_mapping(skb, queue_index);
3089 return netdev_get_tx_queue(dev, queue_index);
3093 * __dev_queue_xmit - transmit a buffer
3094 * @skb: buffer to transmit
3095 * @accel_priv: private data used for L2 forwarding offload
3097 * Queue a buffer for transmission to a network device. The caller must
3098 * have set the device and priority and built the buffer before calling
3099 * this function. The function can be called from an interrupt.
3101 * A negative errno code is returned on a failure. A success does not
3102 * guarantee the frame will be transmitted as it may be dropped due
3103 * to congestion or traffic shaping.
3105 * -----------------------------------------------------------------------------------
3106 * I notice this method can also return errors from the queue disciplines,
3107 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3110 * Regardless of the return value, the skb is consumed, so it is currently
3111 * difficult to retry a send to this method. (You can bump the ref count
3112 * before sending to hold a reference for retry if you are careful.)
3114 * When calling this method, interrupts MUST be enabled. This is because
3115 * the BH enable code must have IRQs enabled so that it will not deadlock.
3118 static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
3120 struct net_device *dev = skb->dev;
3121 struct netdev_queue *txq;
3125 skb_reset_mac_header(skb);
3127 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3128 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3130 /* Disable soft irqs for various locks below. Also
3131 * stops preemption for RCU.
3135 skb_update_prio(skb);
3137 /* If device/qdisc don't need skb->dst, release it right now while
3138 * its hot in this cpu cache.
3140 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3145 #ifdef CONFIG_NET_SWITCHDEV
3146 /* Don't forward if offload device already forwarded */
3147 if (skb->offload_fwd_mark &&
3148 skb->offload_fwd_mark == dev->offload_fwd_mark) {
3150 rc = NET_XMIT_SUCCESS;
3155 txq = netdev_pick_tx(dev, skb, accel_priv);
3156 q = rcu_dereference_bh(txq->qdisc);
3158 #ifdef CONFIG_NET_CLS_ACT
3159 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
3161 trace_net_dev_queue(skb);
3163 rc = __dev_xmit_skb(skb, q, dev, txq);
3167 /* The device has no queue. Common case for software devices:
3168 loopback, all the sorts of tunnels...
3170 Really, it is unlikely that netif_tx_lock protection is necessary
3171 here. (f.e. loopback and IP tunnels are clean ignoring statistics
3173 However, it is possible, that they rely on protection
3176 Check this and shot the lock. It is not prone from deadlocks.
3177 Either shot noqueue qdisc, it is even simpler 8)
3179 if (dev->flags & IFF_UP) {
3180 int cpu = smp_processor_id(); /* ok because BHs are off */
3182 if (txq->xmit_lock_owner != cpu) {
3184 if (xmit_rec_read() > RECURSION_LIMIT)
3185 goto recursion_alert;
3187 skb = validate_xmit_skb(skb, dev);
3191 HARD_TX_LOCK(dev, txq, cpu);
3193 if (!netif_xmit_stopped(txq)) {
3195 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
3197 if (dev_xmit_complete(rc)) {
3198 HARD_TX_UNLOCK(dev, txq);
3202 HARD_TX_UNLOCK(dev, txq);
3203 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3206 /* Recursion is detected! It is possible,
3210 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3217 rcu_read_unlock_bh();
3219 atomic_long_inc(&dev->tx_dropped);
3220 kfree_skb_list(skb);
3223 rcu_read_unlock_bh();
3227 int dev_queue_xmit(struct sk_buff *skb)
3229 return __dev_queue_xmit(skb, NULL);
3231 EXPORT_SYMBOL(dev_queue_xmit);
3233 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3235 return __dev_queue_xmit(skb, accel_priv);
3237 EXPORT_SYMBOL(dev_queue_xmit_accel);
3240 /*=======================================================================
3242 =======================================================================*/
3244 int netdev_max_backlog __read_mostly = 1000;
3245 EXPORT_SYMBOL(netdev_max_backlog);
3247 int netdev_tstamp_prequeue __read_mostly = 1;
3248 int netdev_budget __read_mostly = 300;
3249 int weight_p __read_mostly = 64; /* old backlog weight */
3251 /* Called with irq disabled */
3252 static inline void ____napi_schedule(struct softnet_data *sd,
3253 struct napi_struct *napi)
3255 list_add_tail(&napi->poll_list, &sd->poll_list);
3256 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3261 /* One global table that all flow-based protocols share. */
3262 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
3263 EXPORT_SYMBOL(rps_sock_flow_table);
3264 u32 rps_cpu_mask __read_mostly;
3265 EXPORT_SYMBOL(rps_cpu_mask);
3267 struct static_key rps_needed __read_mostly;
3269 static struct rps_dev_flow *
3270 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3271 struct rps_dev_flow *rflow, u16 next_cpu)
3273 if (next_cpu < nr_cpu_ids) {
3274 #ifdef CONFIG_RFS_ACCEL
3275 struct netdev_rx_queue *rxqueue;
3276 struct rps_dev_flow_table *flow_table;
3277 struct rps_dev_flow *old_rflow;
3282 /* Should we steer this flow to a different hardware queue? */
3283 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3284 !(dev->features & NETIF_F_NTUPLE))
3286 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3287 if (rxq_index == skb_get_rx_queue(skb))
3290 rxqueue = dev->_rx + rxq_index;
3291 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3294 flow_id = skb_get_hash(skb) & flow_table->mask;
3295 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3296 rxq_index, flow_id);
3300 rflow = &flow_table->flows[flow_id];
3302 if (old_rflow->filter == rflow->filter)
3303 old_rflow->filter = RPS_NO_FILTER;
3307 per_cpu(softnet_data, next_cpu).input_queue_head;
3310 rflow->cpu = next_cpu;
3315 * get_rps_cpu is called from netif_receive_skb and returns the target
3316 * CPU from the RPS map of the receiving queue for a given skb.
3317 * rcu_read_lock must be held on entry.
3319 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3320 struct rps_dev_flow **rflowp)
3322 const struct rps_sock_flow_table *sock_flow_table;
3323 struct netdev_rx_queue *rxqueue = dev->_rx;
3324 struct rps_dev_flow_table *flow_table;
3325 struct rps_map *map;
3330 if (skb_rx_queue_recorded(skb)) {
3331 u16 index = skb_get_rx_queue(skb);
3333 if (unlikely(index >= dev->real_num_rx_queues)) {
3334 WARN_ONCE(dev->real_num_rx_queues > 1,
3335 "%s received packet on queue %u, but number "
3336 "of RX queues is %u\n",
3337 dev->name, index, dev->real_num_rx_queues);
3343 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3345 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3346 map = rcu_dereference(rxqueue->rps_map);
3347 if (!flow_table && !map)
3350 skb_reset_network_header(skb);
3351 hash = skb_get_hash(skb);
3355 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3356 if (flow_table && sock_flow_table) {
3357 struct rps_dev_flow *rflow;
3361 /* First check into global flow table if there is a match */
3362 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3363 if ((ident ^ hash) & ~rps_cpu_mask)
3366 next_cpu = ident & rps_cpu_mask;
3368 /* OK, now we know there is a match,
3369 * we can look at the local (per receive queue) flow table
3371 rflow = &flow_table->flows[hash & flow_table->mask];
3375 * If the desired CPU (where last recvmsg was done) is
3376 * different from current CPU (one in the rx-queue flow
3377 * table entry), switch if one of the following holds:
3378 * - Current CPU is unset (>= nr_cpu_ids).
3379 * - Current CPU is offline.
3380 * - The current CPU's queue tail has advanced beyond the
3381 * last packet that was enqueued using this table entry.
3382 * This guarantees that all previous packets for the flow
3383 * have been dequeued, thus preserving in order delivery.
3385 if (unlikely(tcpu != next_cpu) &&
3386 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
3387 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
3388 rflow->last_qtail)) >= 0)) {
3390 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
3393 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
3403 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
3404 if (cpu_online(tcpu)) {
3414 #ifdef CONFIG_RFS_ACCEL
3417 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3418 * @dev: Device on which the filter was set
3419 * @rxq_index: RX queue index
3420 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3421 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3423 * Drivers that implement ndo_rx_flow_steer() should periodically call
3424 * this function for each installed filter and remove the filters for
3425 * which it returns %true.
3427 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3428 u32 flow_id, u16 filter_id)
3430 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3431 struct rps_dev_flow_table *flow_table;
3432 struct rps_dev_flow *rflow;
3437 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3438 if (flow_table && flow_id <= flow_table->mask) {
3439 rflow = &flow_table->flows[flow_id];
3440 cpu = ACCESS_ONCE(rflow->cpu);
3441 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
3442 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3443 rflow->last_qtail) <
3444 (int)(10 * flow_table->mask)))
3450 EXPORT_SYMBOL(rps_may_expire_flow);
3452 #endif /* CONFIG_RFS_ACCEL */
3454 /* Called from hardirq (IPI) context */
3455 static void rps_trigger_softirq(void *data)
3457 struct softnet_data *sd = data;
3459 ____napi_schedule(sd, &sd->backlog);
3463 #endif /* CONFIG_RPS */
3466 * Check if this softnet_data structure is another cpu one
3467 * If yes, queue it to our IPI list and return 1
3470 static int rps_ipi_queued(struct softnet_data *sd)
3473 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
3476 sd->rps_ipi_next = mysd->rps_ipi_list;
3477 mysd->rps_ipi_list = sd;
3479 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3482 #endif /* CONFIG_RPS */
3486 #ifdef CONFIG_NET_FLOW_LIMIT
3487 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3490 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3492 #ifdef CONFIG_NET_FLOW_LIMIT
3493 struct sd_flow_limit *fl;
3494 struct softnet_data *sd;
3495 unsigned int old_flow, new_flow;
3497 if (qlen < (netdev_max_backlog >> 1))
3500 sd = this_cpu_ptr(&softnet_data);
3503 fl = rcu_dereference(sd->flow_limit);
3505 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
3506 old_flow = fl->history[fl->history_head];
3507 fl->history[fl->history_head] = new_flow;
3510 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3512 if (likely(fl->buckets[old_flow]))
3513 fl->buckets[old_flow]--;
3515 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3527 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3528 * queue (may be a remote CPU queue).
3530 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3531 unsigned int *qtail)
3533 struct softnet_data *sd;
3534 unsigned long flags;
3537 sd = &per_cpu(softnet_data, cpu);
3539 local_irq_save(flags);
3542 if (!netif_running(skb->dev))
3544 qlen = skb_queue_len(&sd->input_pkt_queue);
3545 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
3548 __skb_queue_tail(&sd->input_pkt_queue, skb);
3549 input_queue_tail_incr_save(sd, qtail);
3551 local_irq_restore(flags);
3552 return NET_RX_SUCCESS;
3555 /* Schedule NAPI for backlog device
3556 * We can use non atomic operation since we own the queue lock
3558 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
3559 if (!rps_ipi_queued(sd))
3560 ____napi_schedule(sd, &sd->backlog);
3569 local_irq_restore(flags);
3570 preempt_check_resched_rt();
3572 atomic_long_inc(&skb->dev->rx_dropped);
3577 static int netif_rx_internal(struct sk_buff *skb)
3581 net_timestamp_check(netdev_tstamp_prequeue, skb);
3583 trace_netif_rx(skb);
3585 if (static_key_false(&rps_needed)) {
3586 struct rps_dev_flow voidflow, *rflow = &voidflow;
3592 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3594 cpu = smp_processor_id();
3596 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3604 ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
3611 * netif_rx - post buffer to the network code
3612 * @skb: buffer to post
3614 * This function receives a packet from a device driver and queues it for
3615 * the upper (protocol) levels to process. It always succeeds. The buffer
3616 * may be dropped during processing for congestion control or by the
3620 * NET_RX_SUCCESS (no congestion)
3621 * NET_RX_DROP (packet was dropped)
3625 int netif_rx(struct sk_buff *skb)
3627 trace_netif_rx_entry(skb);
3629 return netif_rx_internal(skb);
3631 EXPORT_SYMBOL(netif_rx);
3633 int netif_rx_ni(struct sk_buff *skb)
3637 trace_netif_rx_ni_entry(skb);
3640 err = netif_rx_internal(skb);
3645 EXPORT_SYMBOL(netif_rx_ni);
3647 #ifdef CONFIG_PREEMPT_RT_FULL
3649 * RT runs ksoftirqd as a real time thread and the root_lock is a
3650 * "sleeping spinlock". If the trylock fails then we can go into an
3651 * infinite loop when ksoftirqd preempted the task which actually
3652 * holds the lock, because we requeue q and raise NET_TX softirq
3653 * causing ksoftirqd to loop forever.
3655 * It's safe to use spin_lock on RT here as softirqs run in thread
3656 * context and cannot deadlock against the thread which is holding
3659 * On !RT the trylock might fail, but there we bail out from the
3660 * softirq loop after 10 attempts which we can't do on RT. And the
3661 * task holding root_lock cannot be preempted, so the only downside of
3662 * that trylock is that we need 10 loops to decide that we should have
3663 * given up in the first one :)
3665 static inline int take_root_lock(spinlock_t *lock)
3671 static inline int take_root_lock(spinlock_t *lock)
3673 return spin_trylock(lock);
3677 static void net_tx_action(struct softirq_action *h)
3679 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
3681 if (sd->completion_queue) {
3682 struct sk_buff *clist;
3684 local_irq_disable();
3685 clist = sd->completion_queue;
3686 sd->completion_queue = NULL;
3690 struct sk_buff *skb = clist;
3691 clist = clist->next;
3693 WARN_ON(atomic_read(&skb->users));
3694 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3695 trace_consume_skb(skb);
3697 trace_kfree_skb(skb, net_tx_action);
3702 if (sd->output_queue) {
3705 local_irq_disable();
3706 head = sd->output_queue;
3707 sd->output_queue = NULL;
3708 sd->output_queue_tailp = &sd->output_queue;
3712 struct Qdisc *q = head;
3713 spinlock_t *root_lock;
3715 head = head->next_sched;
3717 root_lock = qdisc_lock(q);
3718 if (take_root_lock(root_lock)) {
3719 smp_mb__before_atomic();
3720 clear_bit(__QDISC_STATE_SCHED,
3723 spin_unlock(root_lock);
3725 if (!test_bit(__QDISC_STATE_DEACTIVATED,
3727 __netif_reschedule(q);
3729 smp_mb__before_atomic();
3730 clear_bit(__QDISC_STATE_SCHED,
3738 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3739 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3740 /* This hook is defined here for ATM LANE */
3741 int (*br_fdb_test_addr_hook)(struct net_device *dev,
3742 unsigned char *addr) __read_mostly;
3743 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3746 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3747 struct packet_type **pt_prev,
3748 int *ret, struct net_device *orig_dev)
3750 #ifdef CONFIG_NET_CLS_ACT
3751 struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
3752 struct tcf_result cl_res;
3754 /* If there's at least one ingress present somewhere (so
3755 * we get here via enabled static key), remaining devices
3756 * that are not configured with an ingress qdisc will bail
3762 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3766 qdisc_skb_cb(skb)->pkt_len = skb->len;
3767 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3768 qdisc_bstats_cpu_update(cl->q, skb);
3770 switch (tc_classify(skb, cl, &cl_res, false)) {
3772 case TC_ACT_RECLASSIFY:
3773 skb->tc_index = TC_H_MIN(cl_res.classid);
3776 qdisc_qstats_cpu_drop(cl->q);
3781 case TC_ACT_REDIRECT:
3782 /* skb_mac_header check was done by cls/act_bpf, so
3783 * we can safely push the L2 header back before
3784 * redirecting to another netdev
3786 __skb_push(skb, skb->mac_len);
3787 skb_do_redirect(skb);
3792 #endif /* CONFIG_NET_CLS_ACT */
3797 * netdev_is_rx_handler_busy - check if receive handler is registered
3798 * @dev: device to check
3800 * Check if a receive handler is already registered for a given device.
3801 * Return true if there one.
3803 * The caller must hold the rtnl_mutex.
3805 bool netdev_is_rx_handler_busy(struct net_device *dev)
3808 return dev && rtnl_dereference(dev->rx_handler);
3810 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
3813 * netdev_rx_handler_register - register receive handler
3814 * @dev: device to register a handler for
3815 * @rx_handler: receive handler to register
3816 * @rx_handler_data: data pointer that is used by rx handler
3818 * Register a receive handler for a device. This handler will then be
3819 * called from __netif_receive_skb. A negative errno code is returned
3822 * The caller must hold the rtnl_mutex.
3824 * For a general description of rx_handler, see enum rx_handler_result.
3826 int netdev_rx_handler_register(struct net_device *dev,
3827 rx_handler_func_t *rx_handler,
3828 void *rx_handler_data)
3832 if (dev->rx_handler)
3835 /* Note: rx_handler_data must be set before rx_handler */
3836 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3837 rcu_assign_pointer(dev->rx_handler, rx_handler);
3841 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3844 * netdev_rx_handler_unregister - unregister receive handler
3845 * @dev: device to unregister a handler from
3847 * Unregister a receive handler from a device.
3849 * The caller must hold the rtnl_mutex.
3851 void netdev_rx_handler_unregister(struct net_device *dev)
3855 RCU_INIT_POINTER(dev->rx_handler, NULL);
3856 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3857 * section has a guarantee to see a non NULL rx_handler_data
3861 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3863 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3866 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3867 * the special handling of PFMEMALLOC skbs.
3869 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3871 switch (skb->protocol) {
3872 case htons(ETH_P_ARP):
3873 case htons(ETH_P_IP):
3874 case htons(ETH_P_IPV6):
3875 case htons(ETH_P_8021Q):
3876 case htons(ETH_P_8021AD):
3883 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
3884 int *ret, struct net_device *orig_dev)
3886 #ifdef CONFIG_NETFILTER_INGRESS
3887 if (nf_hook_ingress_active(skb)) {
3889 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3893 return nf_hook_ingress(skb);
3895 #endif /* CONFIG_NETFILTER_INGRESS */
3899 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3901 struct packet_type *ptype, *pt_prev;
3902 rx_handler_func_t *rx_handler;
3903 struct net_device *orig_dev;
3904 bool deliver_exact = false;
3905 int ret = NET_RX_DROP;
3908 net_timestamp_check(!netdev_tstamp_prequeue, skb);
3910 trace_netif_receive_skb(skb);
3912 orig_dev = skb->dev;
3914 skb_reset_network_header(skb);
3915 if (!skb_transport_header_was_set(skb))
3916 skb_reset_transport_header(skb);
3917 skb_reset_mac_len(skb);
3922 skb->skb_iif = skb->dev->ifindex;
3924 __this_cpu_inc(softnet_data.processed);
3926 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3927 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
3928 skb = skb_vlan_untag(skb);
3933 #ifdef CONFIG_NET_CLS_ACT
3934 if (skb->tc_verd & TC_NCLS) {
3935 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3943 list_for_each_entry_rcu(ptype, &ptype_all, list) {
3945 ret = deliver_skb(skb, pt_prev, orig_dev);
3949 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
3951 ret = deliver_skb(skb, pt_prev, orig_dev);
3956 #ifdef CONFIG_NET_INGRESS
3957 if (static_key_false(&ingress_needed)) {
3958 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3962 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
3966 #ifdef CONFIG_NET_CLS_ACT
3970 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
3973 if (skb_vlan_tag_present(skb)) {
3975 ret = deliver_skb(skb, pt_prev, orig_dev);
3978 if (vlan_do_receive(&skb))
3980 else if (unlikely(!skb))
3984 rx_handler = rcu_dereference(skb->dev->rx_handler);
3987 ret = deliver_skb(skb, pt_prev, orig_dev);
3990 switch (rx_handler(&skb)) {
3991 case RX_HANDLER_CONSUMED:
3992 ret = NET_RX_SUCCESS;
3994 case RX_HANDLER_ANOTHER:
3996 case RX_HANDLER_EXACT:
3997 deliver_exact = true;
3998 case RX_HANDLER_PASS:
4005 if (unlikely(skb_vlan_tag_present(skb))) {
4006 if (skb_vlan_tag_get_id(skb))
4007 skb->pkt_type = PACKET_OTHERHOST;
4008 /* Note: we might in the future use prio bits
4009 * and set skb->priority like in vlan_do_receive()
4010 * For the time being, just ignore Priority Code Point
4015 type = skb->protocol;
4017 /* deliver only exact match when indicated */
4018 if (likely(!deliver_exact)) {
4019 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4020 &ptype_base[ntohs(type) &
4024 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4025 &orig_dev->ptype_specific);
4027 if (unlikely(skb->dev != orig_dev)) {
4028 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4029 &skb->dev->ptype_specific);
4033 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
4036 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
4039 atomic_long_inc(&skb->dev->rx_dropped);
4041 /* Jamal, now you will not able to escape explaining
4042 * me how you were going to use this. :-)
4051 static int __netif_receive_skb(struct sk_buff *skb)
4055 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
4056 unsigned long pflags = current->flags;
4059 * PFMEMALLOC skbs are special, they should
4060 * - be delivered to SOCK_MEMALLOC sockets only
4061 * - stay away from userspace
4062 * - have bounded memory usage
4064 * Use PF_MEMALLOC as this saves us from propagating the allocation
4065 * context down to all allocation sites.
4067 current->flags |= PF_MEMALLOC;
4068 ret = __netif_receive_skb_core(skb, true);
4069 tsk_restore_flags(current, pflags, PF_MEMALLOC);
4071 ret = __netif_receive_skb_core(skb, false);
4076 static int netif_receive_skb_internal(struct sk_buff *skb)
4080 net_timestamp_check(netdev_tstamp_prequeue, skb);
4082 if (skb_defer_rx_timestamp(skb))
4083 return NET_RX_SUCCESS;
4088 if (static_key_false(&rps_needed)) {
4089 struct rps_dev_flow voidflow, *rflow = &voidflow;
4090 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
4093 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4099 ret = __netif_receive_skb(skb);
4105 * netif_receive_skb - process receive buffer from network
4106 * @skb: buffer to process
4108 * netif_receive_skb() is the main receive data processing function.
4109 * It always succeeds. The buffer may be dropped during processing
4110 * for congestion control or by the protocol layers.
4112 * This function may only be called from softirq context and interrupts
4113 * should be enabled.
4115 * Return values (usually ignored):
4116 * NET_RX_SUCCESS: no congestion
4117 * NET_RX_DROP: packet was dropped
4119 int netif_receive_skb(struct sk_buff *skb)
4121 trace_netif_receive_skb_entry(skb);
4123 return netif_receive_skb_internal(skb);
4125 EXPORT_SYMBOL(netif_receive_skb);
4127 /* Network device is going away, flush any packets still pending
4128 * Called with irqs disabled.
4130 static void flush_backlog(void *arg)
4132 struct net_device *dev = arg;
4133 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4134 struct sk_buff *skb, *tmp;
4137 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
4138 if (skb->dev == dev) {
4139 __skb_unlink(skb, &sd->input_pkt_queue);
4140 __skb_queue_tail(&sd->tofree_queue, skb);
4141 input_queue_head_incr(sd);
4146 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
4147 if (skb->dev == dev) {
4148 __skb_unlink(skb, &sd->process_queue);
4149 __skb_queue_tail(&sd->tofree_queue, skb);
4150 input_queue_head_incr(sd);
4154 if (!skb_queue_empty(&sd->tofree_queue))
4155 raise_softirq_irqoff(NET_RX_SOFTIRQ);
4158 static int napi_gro_complete(struct sk_buff *skb)
4160 struct packet_offload *ptype;
4161 __be16 type = skb->protocol;
4162 struct list_head *head = &offload_base;
4165 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
4167 if (NAPI_GRO_CB(skb)->count == 1) {
4168 skb_shinfo(skb)->gso_size = 0;
4173 list_for_each_entry_rcu(ptype, head, list) {
4174 if (ptype->type != type || !ptype->callbacks.gro_complete)
4177 err = ptype->callbacks.gro_complete(skb, 0);
4183 WARN_ON(&ptype->list == head);
4185 return NET_RX_SUCCESS;
4189 return netif_receive_skb_internal(skb);
4192 /* napi->gro_list contains packets ordered by age.
4193 * youngest packets at the head of it.
4194 * Complete skbs in reverse order to reduce latencies.
4196 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
4198 struct sk_buff *skb, *prev = NULL;
4200 /* scan list and build reverse chain */
4201 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4206 for (skb = prev; skb; skb = prev) {
4209 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4213 napi_gro_complete(skb);
4217 napi->gro_list = NULL;
4219 EXPORT_SYMBOL(napi_gro_flush);
4221 static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4224 unsigned int maclen = skb->dev->hard_header_len;
4225 u32 hash = skb_get_hash_raw(skb);
4227 for (p = napi->gro_list; p; p = p->next) {
4228 unsigned long diffs;
4230 NAPI_GRO_CB(p)->flush = 0;
4232 if (hash != skb_get_hash_raw(p)) {
4233 NAPI_GRO_CB(p)->same_flow = 0;
4237 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
4238 diffs |= p->vlan_tci ^ skb->vlan_tci;
4239 diffs |= skb_metadata_dst_cmp(p, skb);
4240 if (maclen == ETH_HLEN)
4241 diffs |= compare_ether_header(skb_mac_header(p),
4242 skb_mac_header(skb));
4244 diffs = memcmp(skb_mac_header(p),
4245 skb_mac_header(skb),
4247 NAPI_GRO_CB(p)->same_flow = !diffs;
4251 static void skb_gro_reset_offset(struct sk_buff *skb)
4253 const struct skb_shared_info *pinfo = skb_shinfo(skb);
4254 const skb_frag_t *frag0 = &pinfo->frags[0];
4256 NAPI_GRO_CB(skb)->data_offset = 0;
4257 NAPI_GRO_CB(skb)->frag0 = NULL;
4258 NAPI_GRO_CB(skb)->frag0_len = 0;
4260 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4262 !PageHighMem(skb_frag_page(frag0))) {
4263 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
4264 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
4265 skb_frag_size(frag0),
4266 skb->end - skb->tail);
4270 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4272 struct skb_shared_info *pinfo = skb_shinfo(skb);
4274 BUG_ON(skb->end - skb->tail < grow);
4276 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4278 skb->data_len -= grow;
4281 pinfo->frags[0].page_offset += grow;
4282 skb_frag_size_sub(&pinfo->frags[0], grow);
4284 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4285 skb_frag_unref(skb, 0);
4286 memmove(pinfo->frags, pinfo->frags + 1,
4287 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4291 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4293 struct sk_buff **pp = NULL;
4294 struct packet_offload *ptype;
4295 __be16 type = skb->protocol;
4296 struct list_head *head = &offload_base;
4298 enum gro_result ret;
4301 if (!(skb->dev->features & NETIF_F_GRO))
4304 if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
4307 gro_list_prepare(napi, skb);
4310 list_for_each_entry_rcu(ptype, head, list) {
4311 if (ptype->type != type || !ptype->callbacks.gro_receive)
4314 skb_set_network_header(skb, skb_gro_offset(skb));
4315 skb_reset_mac_len(skb);
4316 NAPI_GRO_CB(skb)->same_flow = 0;
4317 NAPI_GRO_CB(skb)->flush = 0;
4318 NAPI_GRO_CB(skb)->free = 0;
4319 NAPI_GRO_CB(skb)->encap_mark = 0;
4320 NAPI_GRO_CB(skb)->recursion_counter = 0;
4321 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
4323 /* Setup for GRO checksum validation */
4324 switch (skb->ip_summed) {
4325 case CHECKSUM_COMPLETE:
4326 NAPI_GRO_CB(skb)->csum = skb->csum;
4327 NAPI_GRO_CB(skb)->csum_valid = 1;
4328 NAPI_GRO_CB(skb)->csum_cnt = 0;
4330 case CHECKSUM_UNNECESSARY:
4331 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4332 NAPI_GRO_CB(skb)->csum_valid = 0;
4335 NAPI_GRO_CB(skb)->csum_cnt = 0;
4336 NAPI_GRO_CB(skb)->csum_valid = 0;
4339 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
4344 if (&ptype->list == head)
4347 same_flow = NAPI_GRO_CB(skb)->same_flow;
4348 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
4351 struct sk_buff *nskb = *pp;
4355 napi_gro_complete(nskb);
4362 if (NAPI_GRO_CB(skb)->flush)
4365 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4366 struct sk_buff *nskb = napi->gro_list;
4368 /* locate the end of the list to select the 'oldest' flow */
4369 while (nskb->next) {
4375 napi_gro_complete(nskb);
4379 NAPI_GRO_CB(skb)->count = 1;
4380 NAPI_GRO_CB(skb)->age = jiffies;
4381 NAPI_GRO_CB(skb)->last = skb;
4382 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
4383 skb->next = napi->gro_list;
4384 napi->gro_list = skb;
4388 grow = skb_gro_offset(skb) - skb_headlen(skb);
4390 gro_pull_from_frag0(skb, grow);
4399 struct packet_offload *gro_find_receive_by_type(__be16 type)
4401 struct list_head *offload_head = &offload_base;
4402 struct packet_offload *ptype;
4404 list_for_each_entry_rcu(ptype, offload_head, list) {
4405 if (ptype->type != type || !ptype->callbacks.gro_receive)
4411 EXPORT_SYMBOL(gro_find_receive_by_type);
4413 struct packet_offload *gro_find_complete_by_type(__be16 type)
4415 struct list_head *offload_head = &offload_base;
4416 struct packet_offload *ptype;
4418 list_for_each_entry_rcu(ptype, offload_head, list) {
4419 if (ptype->type != type || !ptype->callbacks.gro_complete)
4425 EXPORT_SYMBOL(gro_find_complete_by_type);
4427 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
4431 if (netif_receive_skb_internal(skb))
4439 case GRO_MERGED_FREE:
4440 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
4442 kmem_cache_free(skbuff_head_cache, skb);
4456 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4458 trace_napi_gro_receive_entry(skb);
4460 skb_gro_reset_offset(skb);
4462 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
4464 EXPORT_SYMBOL(napi_gro_receive);
4466 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4468 if (unlikely(skb->pfmemalloc)) {
4472 __skb_pull(skb, skb_headlen(skb));
4473 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4474 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
4476 skb->dev = napi->dev;
4478 skb->encapsulation = 0;
4479 skb_shinfo(skb)->gso_type = 0;
4480 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
4485 struct sk_buff *napi_get_frags(struct napi_struct *napi)
4487 struct sk_buff *skb = napi->skb;
4490 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
4495 EXPORT_SYMBOL(napi_get_frags);
4497 static gro_result_t napi_frags_finish(struct napi_struct *napi,
4498 struct sk_buff *skb,
4504 __skb_push(skb, ETH_HLEN);
4505 skb->protocol = eth_type_trans(skb, skb->dev);
4506 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
4511 case GRO_MERGED_FREE:
4512 napi_reuse_skb(napi, skb);
4522 /* Upper GRO stack assumes network header starts at gro_offset=0
4523 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4524 * We copy ethernet header into skb->data to have a common layout.
4526 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
4528 struct sk_buff *skb = napi->skb;
4529 const struct ethhdr *eth;
4530 unsigned int hlen = sizeof(*eth);
4534 skb_reset_mac_header(skb);
4535 skb_gro_reset_offset(skb);
4537 eth = skb_gro_header_fast(skb, 0);
4538 if (unlikely(skb_gro_header_hard(skb, hlen))) {
4539 eth = skb_gro_header_slow(skb, hlen, 0);
4540 if (unlikely(!eth)) {
4541 napi_reuse_skb(napi, skb);
4545 gro_pull_from_frag0(skb, hlen);
4546 NAPI_GRO_CB(skb)->frag0 += hlen;
4547 NAPI_GRO_CB(skb)->frag0_len -= hlen;
4549 __skb_pull(skb, hlen);
4552 * This works because the only protocols we care about don't require
4554 * We'll fix it up properly in napi_frags_finish()
4556 skb->protocol = eth->h_proto;
4561 gro_result_t napi_gro_frags(struct napi_struct *napi)
4563 struct sk_buff *skb = napi_frags_skb(napi);
4568 trace_napi_gro_frags_entry(skb);
4570 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
4572 EXPORT_SYMBOL(napi_gro_frags);
4574 /* Compute the checksum from gro_offset and return the folded value
4575 * after adding in any pseudo checksum.
4577 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4582 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4584 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4585 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4587 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4588 !skb->csum_complete_sw)
4589 netdev_rx_csum_fault(skb->dev);
4592 NAPI_GRO_CB(skb)->csum = wsum;
4593 NAPI_GRO_CB(skb)->csum_valid = 1;
4597 EXPORT_SYMBOL(__skb_gro_checksum_complete);
4600 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
4601 * Note: called with local irq disabled, but exits with local irq enabled.
4603 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4606 struct softnet_data *remsd = sd->rps_ipi_list;
4609 sd->rps_ipi_list = NULL;
4612 preempt_check_resched_rt();
4614 /* Send pending IPI's to kick RPS processing on remote cpus. */
4616 struct softnet_data *next = remsd->rps_ipi_next;
4618 if (cpu_online(remsd->cpu))
4619 smp_call_function_single_async(remsd->cpu,
4626 preempt_check_resched_rt();
4629 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
4632 return sd->rps_ipi_list != NULL;
4638 static int process_backlog(struct napi_struct *napi, int quota)
4641 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
4643 /* Check if we have pending ipi, its better to send them now,
4644 * not waiting net_rx_action() end.
4646 if (sd_has_rps_ipi_waiting(sd)) {
4647 local_irq_disable();
4648 net_rps_action_and_irq_enable(sd);
4651 napi->weight = weight_p;
4652 local_irq_disable();
4654 struct sk_buff *skb;
4656 while ((skb = __skb_dequeue(&sd->process_queue))) {
4659 __netif_receive_skb(skb);
4661 local_irq_disable();
4662 input_queue_head_incr(sd);
4663 if (++work >= quota) {
4670 if (skb_queue_empty(&sd->input_pkt_queue)) {
4672 * Inline a custom version of __napi_complete().
4673 * only current cpu owns and manipulates this napi,
4674 * and NAPI_STATE_SCHED is the only possible flag set
4676 * We can use a plain write instead of clear_bit(),
4677 * and we dont need an smp_mb() memory barrier.
4685 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4686 &sd->process_queue);
4695 * __napi_schedule - schedule for receive
4696 * @n: entry to schedule
4698 * The entry's receive function will be scheduled to run.
4699 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
4701 void __napi_schedule(struct napi_struct *n)
4703 unsigned long flags;
4705 local_irq_save(flags);
4706 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4707 local_irq_restore(flags);
4708 preempt_check_resched_rt();
4710 EXPORT_SYMBOL(__napi_schedule);
4712 #ifndef CONFIG_PREEMPT_RT_FULL
4714 * __napi_schedule_irqoff - schedule for receive
4715 * @n: entry to schedule
4717 * Variant of __napi_schedule() assuming hard irqs are masked
4719 void __napi_schedule_irqoff(struct napi_struct *n)
4721 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4723 EXPORT_SYMBOL(__napi_schedule_irqoff);
4726 void __napi_complete(struct napi_struct *n)
4728 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4730 list_del_init(&n->poll_list);
4731 smp_mb__before_atomic();
4732 clear_bit(NAPI_STATE_SCHED, &n->state);
4734 EXPORT_SYMBOL(__napi_complete);
4736 void napi_complete_done(struct napi_struct *n, int work_done)
4738 unsigned long flags;
4741 * don't let napi dequeue from the cpu poll list
4742 * just in case its running on a different cpu
4744 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4748 unsigned long timeout = 0;
4751 timeout = n->dev->gro_flush_timeout;
4754 hrtimer_start(&n->timer, ns_to_ktime(timeout),
4755 HRTIMER_MODE_REL_PINNED);
4757 napi_gro_flush(n, false);
4759 if (likely(list_empty(&n->poll_list))) {
4760 WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
4762 /* If n->poll_list is not empty, we need to mask irqs */
4763 local_irq_save(flags);
4765 local_irq_restore(flags);
4768 EXPORT_SYMBOL(napi_complete_done);
4770 /* must be called under rcu_read_lock(), as we dont take a reference */
4771 struct napi_struct *napi_by_id(unsigned int napi_id)
4773 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4774 struct napi_struct *napi;
4776 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4777 if (napi->napi_id == napi_id)
4782 EXPORT_SYMBOL_GPL(napi_by_id);
4784 void napi_hash_add(struct napi_struct *napi)
4786 if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4788 spin_lock(&napi_hash_lock);
4790 /* 0 is not a valid id, we also skip an id that is taken
4791 * we expect both events to be extremely rare
4794 while (!napi->napi_id) {
4795 napi->napi_id = ++napi_gen_id;
4796 if (napi_by_id(napi->napi_id))
4800 hlist_add_head_rcu(&napi->napi_hash_node,
4801 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4803 spin_unlock(&napi_hash_lock);
4806 EXPORT_SYMBOL_GPL(napi_hash_add);
4808 /* Warning : caller is responsible to make sure rcu grace period
4809 * is respected before freeing memory containing @napi
4811 void napi_hash_del(struct napi_struct *napi)
4813 spin_lock(&napi_hash_lock);
4815 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4816 hlist_del_rcu(&napi->napi_hash_node);
4818 spin_unlock(&napi_hash_lock);
4820 EXPORT_SYMBOL_GPL(napi_hash_del);
4822 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
4824 struct napi_struct *napi;
4826 napi = container_of(timer, struct napi_struct, timer);
4828 napi_schedule(napi);
4830 return HRTIMER_NORESTART;
4833 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4834 int (*poll)(struct napi_struct *, int), int weight)
4836 INIT_LIST_HEAD(&napi->poll_list);
4837 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
4838 napi->timer.function = napi_watchdog;
4839 napi->gro_count = 0;
4840 napi->gro_list = NULL;
4843 if (weight > NAPI_POLL_WEIGHT)
4844 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4846 napi->weight = weight;
4847 list_add(&napi->dev_list, &dev->napi_list);
4849 #ifdef CONFIG_NETPOLL
4850 spin_lock_init(&napi->poll_lock);
4851 napi->poll_owner = -1;
4853 set_bit(NAPI_STATE_SCHED, &napi->state);
4855 EXPORT_SYMBOL(netif_napi_add);
4857 void napi_disable(struct napi_struct *n)
4860 set_bit(NAPI_STATE_DISABLE, &n->state);
4862 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
4864 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
4867 hrtimer_cancel(&n->timer);
4869 clear_bit(NAPI_STATE_DISABLE, &n->state);
4871 EXPORT_SYMBOL(napi_disable);
4873 void netif_napi_del(struct napi_struct *napi)
4875 list_del_init(&napi->dev_list);
4876 napi_free_frags(napi);
4878 kfree_skb_list(napi->gro_list);
4879 napi->gro_list = NULL;
4880 napi->gro_count = 0;
4882 EXPORT_SYMBOL(netif_napi_del);
4884 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
4889 list_del_init(&n->poll_list);
4891 have = netpoll_poll_lock(n);
4895 /* This NAPI_STATE_SCHED test is for avoiding a race
4896 * with netpoll's poll_napi(). Only the entity which
4897 * obtains the lock and sees NAPI_STATE_SCHED set will
4898 * actually make the ->poll() call. Therefore we avoid
4899 * accidentally calling ->poll() when NAPI is not scheduled.
4902 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4903 work = n->poll(n, weight);
4907 WARN_ON_ONCE(work > weight);
4909 if (likely(work < weight))
4912 /* Drivers must not modify the NAPI state if they
4913 * consume the entire weight. In such cases this code
4914 * still "owns" the NAPI instance and therefore can
4915 * move the instance around on the list at-will.
4917 if (unlikely(napi_disable_pending(n))) {
4923 /* flush too old packets
4924 * If HZ < 1000, flush all packets.
4926 napi_gro_flush(n, HZ >= 1000);
4929 /* Some drivers may have called napi_schedule
4930 * prior to exhausting their budget.
4932 if (unlikely(!list_empty(&n->poll_list))) {
4933 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
4934 n->dev ? n->dev->name : "backlog");
4938 list_add_tail(&n->poll_list, repoll);
4941 netpoll_poll_unlock(have);
4946 static void net_rx_action(struct softirq_action *h)
4948 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4949 unsigned long time_limit = jiffies + 2;
4950 int budget = netdev_budget;
4951 struct sk_buff_head tofree_q;
4952 struct sk_buff *skb;
4956 __skb_queue_head_init(&tofree_q);
4958 local_irq_disable();
4959 skb_queue_splice_init(&sd->tofree_queue, &tofree_q);
4960 list_splice_init(&sd->poll_list, &list);
4963 while ((skb = __skb_dequeue(&tofree_q)))
4967 struct napi_struct *n;
4969 if (list_empty(&list)) {
4970 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
4975 n = list_first_entry(&list, struct napi_struct, poll_list);
4976 budget -= napi_poll(n, &repoll);
4978 /* If softirq window is exhausted then punt.
4979 * Allow this to run for 2 jiffies since which will allow
4980 * an average latency of 1.5/HZ.
4982 if (unlikely(budget <= 0 ||
4983 time_after_eq(jiffies, time_limit))) {
4989 local_irq_disable();
4991 list_splice_tail_init(&sd->poll_list, &list);
4992 list_splice_tail(&repoll, &list);
4993 list_splice(&list, &sd->poll_list);
4994 if (!list_empty(&sd->poll_list))
4995 __raise_softirq_irqoff_ksoft(NET_RX_SOFTIRQ);
4997 net_rps_action_and_irq_enable(sd);
5000 struct netdev_adjacent {
5001 struct net_device *dev;
5003 /* upper master flag, there can only be one master device per list */
5006 /* counter for the number of times this device was added to us */
5009 /* private field for the users */
5012 struct list_head list;
5013 struct rcu_head rcu;
5016 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
5017 struct list_head *adj_list)
5019 struct netdev_adjacent *adj;
5021 list_for_each_entry(adj, adj_list, list) {
5022 if (adj->dev == adj_dev)
5029 * netdev_has_upper_dev - Check if device is linked to an upper device
5031 * @upper_dev: upper device to check
5033 * Find out if a device is linked to specified upper device and return true
5034 * in case it is. Note that this checks only immediate upper device,
5035 * not through a complete stack of devices. The caller must hold the RTNL lock.
5037 bool netdev_has_upper_dev(struct net_device *dev,
5038 struct net_device *upper_dev)
5042 return __netdev_find_adj(upper_dev, &dev->all_adj_list.upper);
5044 EXPORT_SYMBOL(netdev_has_upper_dev);
5047 * netdev_has_any_upper_dev - Check if device is linked to some device
5050 * Find out if a device is linked to an upper device and return true in case
5051 * it is. The caller must hold the RTNL lock.
5053 static bool netdev_has_any_upper_dev(struct net_device *dev)
5057 return !list_empty(&dev->all_adj_list.upper);
5061 * netdev_master_upper_dev_get - Get master upper device
5064 * Find a master upper device and return pointer to it or NULL in case
5065 * it's not there. The caller must hold the RTNL lock.
5067 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
5069 struct netdev_adjacent *upper;
5073 if (list_empty(&dev->adj_list.upper))
5076 upper = list_first_entry(&dev->adj_list.upper,
5077 struct netdev_adjacent, list);
5078 if (likely(upper->master))
5082 EXPORT_SYMBOL(netdev_master_upper_dev_get);
5084 void *netdev_adjacent_get_private(struct list_head *adj_list)
5086 struct netdev_adjacent *adj;
5088 adj = list_entry(adj_list, struct netdev_adjacent, list);
5090 return adj->private;
5092 EXPORT_SYMBOL(netdev_adjacent_get_private);
5095 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
5097 * @iter: list_head ** of the current position
5099 * Gets the next device from the dev's upper list, starting from iter
5100 * position. The caller must hold RCU read lock.
5102 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
5103 struct list_head **iter)
5105 struct netdev_adjacent *upper;
5107 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5109 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5111 if (&upper->list == &dev->adj_list.upper)
5114 *iter = &upper->list;
5118 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
5121 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
5123 * @iter: list_head ** of the current position
5125 * Gets the next device from the dev's upper list, starting from iter
5126 * position. The caller must hold RCU read lock.
5128 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
5129 struct list_head **iter)
5131 struct netdev_adjacent *upper;
5133 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5135 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5137 if (&upper->list == &dev->all_adj_list.upper)
5140 *iter = &upper->list;
5144 EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
5147 * netdev_lower_get_next_private - Get the next ->private from the
5148 * lower neighbour list
5150 * @iter: list_head ** of the current position
5152 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5153 * list, starting from iter position. The caller must hold either hold the
5154 * RTNL lock or its own locking that guarantees that the neighbour lower
5155 * list will remain unchanged.
5157 void *netdev_lower_get_next_private(struct net_device *dev,
5158 struct list_head **iter)
5160 struct netdev_adjacent *lower;
5162 lower = list_entry(*iter, struct netdev_adjacent, list);
5164 if (&lower->list == &dev->adj_list.lower)
5167 *iter = lower->list.next;
5169 return lower->private;
5171 EXPORT_SYMBOL(netdev_lower_get_next_private);
5174 * netdev_lower_get_next_private_rcu - Get the next ->private from the
5175 * lower neighbour list, RCU
5178 * @iter: list_head ** of the current position
5180 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5181 * list, starting from iter position. The caller must hold RCU read lock.
5183 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5184 struct list_head **iter)
5186 struct netdev_adjacent *lower;
5188 WARN_ON_ONCE(!rcu_read_lock_held());
5190 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5192 if (&lower->list == &dev->adj_list.lower)
5195 *iter = &lower->list;
5197 return lower->private;
5199 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
5202 * netdev_lower_get_next - Get the next device from the lower neighbour
5205 * @iter: list_head ** of the current position
5207 * Gets the next netdev_adjacent from the dev's lower neighbour
5208 * list, starting from iter position. The caller must hold RTNL lock or
5209 * its own locking that guarantees that the neighbour lower
5210 * list will remain unchanged.
5212 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5214 struct netdev_adjacent *lower;
5216 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
5218 if (&lower->list == &dev->adj_list.lower)
5221 *iter = &lower->list;
5225 EXPORT_SYMBOL(netdev_lower_get_next);
5228 * netdev_lower_get_first_private_rcu - Get the first ->private from the
5229 * lower neighbour list, RCU
5233 * Gets the first netdev_adjacent->private from the dev's lower neighbour
5234 * list. The caller must hold RCU read lock.
5236 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
5238 struct netdev_adjacent *lower;
5240 lower = list_first_or_null_rcu(&dev->adj_list.lower,
5241 struct netdev_adjacent, list);
5243 return lower->private;
5246 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
5249 * netdev_master_upper_dev_get_rcu - Get master upper device
5252 * Find a master upper device and return pointer to it or NULL in case
5253 * it's not there. The caller must hold the RCU read lock.
5255 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
5257 struct netdev_adjacent *upper;
5259 upper = list_first_or_null_rcu(&dev->adj_list.upper,
5260 struct netdev_adjacent, list);
5261 if (upper && likely(upper->master))
5265 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
5267 static int netdev_adjacent_sysfs_add(struct net_device *dev,
5268 struct net_device *adj_dev,
5269 struct list_head *dev_list)
5271 char linkname[IFNAMSIZ+7];
5272 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5273 "upper_%s" : "lower_%s", adj_dev->name);
5274 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
5277 static void netdev_adjacent_sysfs_del(struct net_device *dev,
5279 struct list_head *dev_list)
5281 char linkname[IFNAMSIZ+7];
5282 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5283 "upper_%s" : "lower_%s", name);
5284 sysfs_remove_link(&(dev->dev.kobj), linkname);
5287 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
5288 struct net_device *adj_dev,
5289 struct list_head *dev_list)
5291 return (dev_list == &dev->adj_list.upper ||
5292 dev_list == &dev->adj_list.lower) &&
5293 net_eq(dev_net(dev), dev_net(adj_dev));
5296 static int __netdev_adjacent_dev_insert(struct net_device *dev,
5297 struct net_device *adj_dev,
5299 struct list_head *dev_list,
5300 void *private, bool master)
5302 struct netdev_adjacent *adj;
5305 adj = __netdev_find_adj(adj_dev, dev_list);
5308 adj->ref_nr += ref_nr;
5312 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
5317 adj->master = master;
5318 adj->ref_nr = ref_nr;
5319 adj->private = private;
5322 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
5323 adj_dev->name, dev->name, adj_dev->name);
5325 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
5326 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
5331 /* Ensure that master link is always the first item in list. */
5333 ret = sysfs_create_link(&(dev->dev.kobj),
5334 &(adj_dev->dev.kobj), "master");
5336 goto remove_symlinks;
5338 list_add_rcu(&adj->list, dev_list);
5340 list_add_tail_rcu(&adj->list, dev_list);
5346 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
5347 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5355 static void __netdev_adjacent_dev_remove(struct net_device *dev,
5356 struct net_device *adj_dev,
5358 struct list_head *dev_list)
5360 struct netdev_adjacent *adj;
5362 adj = __netdev_find_adj(adj_dev, dev_list);
5365 pr_err("tried to remove device %s from %s\n",
5366 dev->name, adj_dev->name);
5370 if (adj->ref_nr > ref_nr) {
5371 pr_debug("%s to %s ref_nr-%d = %d\n", dev->name, adj_dev->name,
5372 ref_nr, adj->ref_nr-ref_nr);
5373 adj->ref_nr -= ref_nr;
5378 sysfs_remove_link(&(dev->dev.kobj), "master");
5380 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
5381 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5383 list_del_rcu(&adj->list);
5384 pr_debug("dev_put for %s, because link removed from %s to %s\n",
5385 adj_dev->name, dev->name, adj_dev->name);
5387 kfree_rcu(adj, rcu);
5390 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
5391 struct net_device *upper_dev,
5393 struct list_head *up_list,
5394 struct list_head *down_list,
5395 void *private, bool master)
5399 ret = __netdev_adjacent_dev_insert(dev, upper_dev, ref_nr, up_list,
5404 ret = __netdev_adjacent_dev_insert(upper_dev, dev, ref_nr, down_list,
5407 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
5414 static int __netdev_adjacent_dev_link(struct net_device *dev,
5415 struct net_device *upper_dev,
5418 return __netdev_adjacent_dev_link_lists(dev, upper_dev, ref_nr,
5419 &dev->all_adj_list.upper,
5420 &upper_dev->all_adj_list.lower,
5424 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
5425 struct net_device *upper_dev,
5427 struct list_head *up_list,
5428 struct list_head *down_list)
5430 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
5431 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
5434 static void __netdev_adjacent_dev_unlink(struct net_device *dev,
5435 struct net_device *upper_dev,
5438 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, ref_nr,
5439 &dev->all_adj_list.upper,
5440 &upper_dev->all_adj_list.lower);
5443 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
5444 struct net_device *upper_dev,
5445 void *private, bool master)
5447 int ret = __netdev_adjacent_dev_link(dev, upper_dev, 1);
5452 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev, 1,
5453 &dev->adj_list.upper,
5454 &upper_dev->adj_list.lower,
5457 __netdev_adjacent_dev_unlink(dev, upper_dev, 1);
5464 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
5465 struct net_device *upper_dev)
5467 __netdev_adjacent_dev_unlink(dev, upper_dev, 1);
5468 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
5469 &dev->adj_list.upper,
5470 &upper_dev->adj_list.lower);
5473 static int __netdev_upper_dev_link(struct net_device *dev,
5474 struct net_device *upper_dev, bool master,
5477 struct netdev_notifier_changeupper_info changeupper_info;
5478 struct netdev_adjacent *i, *j, *to_i, *to_j;
5483 if (dev == upper_dev)
5486 /* To prevent loops, check if dev is not upper device to upper_dev. */
5487 if (__netdev_find_adj(dev, &upper_dev->all_adj_list.upper))
5490 if (__netdev_find_adj(upper_dev, &dev->adj_list.upper))
5493 if (master && netdev_master_upper_dev_get(dev))
5496 changeupper_info.upper_dev = upper_dev;
5497 changeupper_info.master = master;
5498 changeupper_info.linking = true;
5500 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
5501 &changeupper_info.info);
5502 ret = notifier_to_errno(ret);
5506 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
5511 /* Now that we linked these devs, make all the upper_dev's
5512 * all_adj_list.upper visible to every dev's all_adj_list.lower an
5513 * versa, and don't forget the devices itself. All of these
5514 * links are non-neighbours.
5516 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5517 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5518 pr_debug("Interlinking %s with %s, non-neighbour\n",
5519 i->dev->name, j->dev->name);
5520 ret = __netdev_adjacent_dev_link(i->dev, j->dev, i->ref_nr);
5526 /* add dev to every upper_dev's upper device */
5527 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5528 pr_debug("linking %s's upper device %s with %s\n",
5529 upper_dev->name, i->dev->name, dev->name);
5530 ret = __netdev_adjacent_dev_link(dev, i->dev, i->ref_nr);
5532 goto rollback_upper_mesh;
5535 /* add upper_dev to every dev's lower device */
5536 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5537 pr_debug("linking %s's lower device %s with %s\n", dev->name,
5538 i->dev->name, upper_dev->name);
5539 ret = __netdev_adjacent_dev_link(i->dev, upper_dev, i->ref_nr);
5541 goto rollback_lower_mesh;
5544 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
5545 &changeupper_info.info);
5548 rollback_lower_mesh:
5550 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5553 __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr);
5558 rollback_upper_mesh:
5560 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5563 __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
5571 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5572 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5573 if (i == to_i && j == to_j)
5575 __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr);
5581 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5587 * netdev_upper_dev_link - Add a link to the upper device
5589 * @upper_dev: new upper device
5591 * Adds a link to device which is upper to this one. The caller must hold
5592 * the RTNL lock. On a failure a negative errno code is returned.
5593 * On success the reference counts are adjusted and the function
5596 int netdev_upper_dev_link(struct net_device *dev,
5597 struct net_device *upper_dev)
5599 return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
5601 EXPORT_SYMBOL(netdev_upper_dev_link);
5604 * netdev_master_upper_dev_link - Add a master link to the upper device
5606 * @upper_dev: new upper device
5608 * Adds a link to device which is upper to this one. In this case, only
5609 * one master upper device can be linked, although other non-master devices
5610 * might be linked as well. The caller must hold the RTNL lock.
5611 * On a failure a negative errno code is returned. On success the reference
5612 * counts are adjusted and the function returns zero.
5614 int netdev_master_upper_dev_link(struct net_device *dev,
5615 struct net_device *upper_dev)
5617 return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
5619 EXPORT_SYMBOL(netdev_master_upper_dev_link);
5621 int netdev_master_upper_dev_link_private(struct net_device *dev,
5622 struct net_device *upper_dev,
5625 return __netdev_upper_dev_link(dev, upper_dev, true, private);
5627 EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
5630 * netdev_upper_dev_unlink - Removes a link to upper device
5632 * @upper_dev: new upper device
5634 * Removes a link to device which is upper to this one. The caller must hold
5637 void netdev_upper_dev_unlink(struct net_device *dev,
5638 struct net_device *upper_dev)
5640 struct netdev_notifier_changeupper_info changeupper_info;
5641 struct netdev_adjacent *i, *j;
5644 changeupper_info.upper_dev = upper_dev;
5645 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
5646 changeupper_info.linking = false;
5648 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
5649 &changeupper_info.info);
5651 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5653 /* Here is the tricky part. We must remove all dev's lower
5654 * devices from all upper_dev's upper devices and vice
5655 * versa, to maintain the graph relationship.
5657 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5658 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
5659 __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr);
5661 /* remove also the devices itself from lower/upper device
5664 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5665 __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr);
5667 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
5668 __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
5670 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
5671 &changeupper_info.info);
5673 EXPORT_SYMBOL(netdev_upper_dev_unlink);
5676 * netdev_bonding_info_change - Dispatch event about slave change
5678 * @bonding_info: info to dispatch
5680 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
5681 * The caller must hold the RTNL lock.
5683 void netdev_bonding_info_change(struct net_device *dev,
5684 struct netdev_bonding_info *bonding_info)
5686 struct netdev_notifier_bonding_info info;
5688 memcpy(&info.bonding_info, bonding_info,
5689 sizeof(struct netdev_bonding_info));
5690 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
5693 EXPORT_SYMBOL(netdev_bonding_info_change);
5695 static void netdev_adjacent_add_links(struct net_device *dev)
5697 struct netdev_adjacent *iter;
5699 struct net *net = dev_net(dev);
5701 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5702 if (!net_eq(net,dev_net(iter->dev)))
5704 netdev_adjacent_sysfs_add(iter->dev, dev,
5705 &iter->dev->adj_list.lower);
5706 netdev_adjacent_sysfs_add(dev, iter->dev,
5707 &dev->adj_list.upper);
5710 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5711 if (!net_eq(net,dev_net(iter->dev)))
5713 netdev_adjacent_sysfs_add(iter->dev, dev,
5714 &iter->dev->adj_list.upper);
5715 netdev_adjacent_sysfs_add(dev, iter->dev,
5716 &dev->adj_list.lower);
5720 static void netdev_adjacent_del_links(struct net_device *dev)
5722 struct netdev_adjacent *iter;
5724 struct net *net = dev_net(dev);
5726 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5727 if (!net_eq(net,dev_net(iter->dev)))
5729 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5730 &iter->dev->adj_list.lower);
5731 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5732 &dev->adj_list.upper);
5735 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5736 if (!net_eq(net,dev_net(iter->dev)))
5738 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5739 &iter->dev->adj_list.upper);
5740 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5741 &dev->adj_list.lower);
5745 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
5747 struct netdev_adjacent *iter;
5749 struct net *net = dev_net(dev);
5751 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5752 if (!net_eq(net,dev_net(iter->dev)))
5754 netdev_adjacent_sysfs_del(iter->dev, oldname,
5755 &iter->dev->adj_list.lower);
5756 netdev_adjacent_sysfs_add(iter->dev, dev,
5757 &iter->dev->adj_list.lower);
5760 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5761 if (!net_eq(net,dev_net(iter->dev)))
5763 netdev_adjacent_sysfs_del(iter->dev, oldname,
5764 &iter->dev->adj_list.upper);
5765 netdev_adjacent_sysfs_add(iter->dev, dev,
5766 &iter->dev->adj_list.upper);
5770 void *netdev_lower_dev_get_private(struct net_device *dev,
5771 struct net_device *lower_dev)
5773 struct netdev_adjacent *lower;
5777 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
5781 return lower->private;
5783 EXPORT_SYMBOL(netdev_lower_dev_get_private);
5786 int dev_get_nest_level(struct net_device *dev,
5787 bool (*type_check)(struct net_device *dev))
5789 struct net_device *lower = NULL;
5790 struct list_head *iter;
5796 netdev_for_each_lower_dev(dev, lower, iter) {
5797 nest = dev_get_nest_level(lower, type_check);
5798 if (max_nest < nest)
5802 if (type_check(dev))
5807 EXPORT_SYMBOL(dev_get_nest_level);
5809 static void dev_change_rx_flags(struct net_device *dev, int flags)
5811 const struct net_device_ops *ops = dev->netdev_ops;
5813 if (ops->ndo_change_rx_flags)
5814 ops->ndo_change_rx_flags(dev, flags);
5817 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
5819 unsigned int old_flags = dev->flags;
5825 dev->flags |= IFF_PROMISC;
5826 dev->promiscuity += inc;
5827 if (dev->promiscuity == 0) {
5830 * If inc causes overflow, untouch promisc and return error.
5833 dev->flags &= ~IFF_PROMISC;
5835 dev->promiscuity -= inc;
5836 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5841 if (dev->flags != old_flags) {
5842 pr_info("device %s %s promiscuous mode\n",
5844 dev->flags & IFF_PROMISC ? "entered" : "left");
5845 if (audit_enabled) {
5846 current_uid_gid(&uid, &gid);
5847 audit_log(current->audit_context, GFP_ATOMIC,
5848 AUDIT_ANOM_PROMISCUOUS,
5849 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5850 dev->name, (dev->flags & IFF_PROMISC),
5851 (old_flags & IFF_PROMISC),
5852 from_kuid(&init_user_ns, audit_get_loginuid(current)),
5853 from_kuid(&init_user_ns, uid),
5854 from_kgid(&init_user_ns, gid),
5855 audit_get_sessionid(current));
5858 dev_change_rx_flags(dev, IFF_PROMISC);
5861 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
5866 * dev_set_promiscuity - update promiscuity count on a device
5870 * Add or remove promiscuity from a device. While the count in the device
5871 * remains above zero the interface remains promiscuous. Once it hits zero
5872 * the device reverts back to normal filtering operation. A negative inc
5873 * value is used to drop promiscuity on the device.
5874 * Return 0 if successful or a negative errno code on error.
5876 int dev_set_promiscuity(struct net_device *dev, int inc)
5878 unsigned int old_flags = dev->flags;
5881 err = __dev_set_promiscuity(dev, inc, true);
5884 if (dev->flags != old_flags)
5885 dev_set_rx_mode(dev);
5888 EXPORT_SYMBOL(dev_set_promiscuity);
5890 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
5892 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
5896 dev->flags |= IFF_ALLMULTI;
5897 dev->allmulti += inc;
5898 if (dev->allmulti == 0) {
5901 * If inc causes overflow, untouch allmulti and return error.
5904 dev->flags &= ~IFF_ALLMULTI;
5906 dev->allmulti -= inc;
5907 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5912 if (dev->flags ^ old_flags) {
5913 dev_change_rx_flags(dev, IFF_ALLMULTI);
5914 dev_set_rx_mode(dev);
5916 __dev_notify_flags(dev, old_flags,
5917 dev->gflags ^ old_gflags);
5923 * dev_set_allmulti - update allmulti count on a device
5927 * Add or remove reception of all multicast frames to a device. While the
5928 * count in the device remains above zero the interface remains listening
5929 * to all interfaces. Once it hits zero the device reverts back to normal
5930 * filtering operation. A negative @inc value is used to drop the counter
5931 * when releasing a resource needing all multicasts.
5932 * Return 0 if successful or a negative errno code on error.
5935 int dev_set_allmulti(struct net_device *dev, int inc)
5937 return __dev_set_allmulti(dev, inc, true);
5939 EXPORT_SYMBOL(dev_set_allmulti);
5942 * Upload unicast and multicast address lists to device and
5943 * configure RX filtering. When the device doesn't support unicast
5944 * filtering it is put in promiscuous mode while unicast addresses
5947 void __dev_set_rx_mode(struct net_device *dev)
5949 const struct net_device_ops *ops = dev->netdev_ops;
5951 /* dev_open will call this function so the list will stay sane. */
5952 if (!(dev->flags&IFF_UP))
5955 if (!netif_device_present(dev))
5958 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
5959 /* Unicast addresses changes may only happen under the rtnl,
5960 * therefore calling __dev_set_promiscuity here is safe.
5962 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
5963 __dev_set_promiscuity(dev, 1, false);
5964 dev->uc_promisc = true;
5965 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
5966 __dev_set_promiscuity(dev, -1, false);
5967 dev->uc_promisc = false;
5971 if (ops->ndo_set_rx_mode)
5972 ops->ndo_set_rx_mode(dev);
5975 void dev_set_rx_mode(struct net_device *dev)
5977 netif_addr_lock_bh(dev);
5978 __dev_set_rx_mode(dev);
5979 netif_addr_unlock_bh(dev);
5983 * dev_get_flags - get flags reported to userspace
5986 * Get the combination of flag bits exported through APIs to userspace.
5988 unsigned int dev_get_flags(const struct net_device *dev)
5992 flags = (dev->flags & ~(IFF_PROMISC |
5997 (dev->gflags & (IFF_PROMISC |
6000 if (netif_running(dev)) {
6001 if (netif_oper_up(dev))
6002 flags |= IFF_RUNNING;
6003 if (netif_carrier_ok(dev))
6004 flags |= IFF_LOWER_UP;
6005 if (netif_dormant(dev))
6006 flags |= IFF_DORMANT;
6011 EXPORT_SYMBOL(dev_get_flags);
6013 int __dev_change_flags(struct net_device *dev, unsigned int flags)
6015 unsigned int old_flags = dev->flags;
6021 * Set the flags on our device.
6024 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
6025 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
6027 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
6031 * Load in the correct multicast list now the flags have changed.
6034 if ((old_flags ^ flags) & IFF_MULTICAST)
6035 dev_change_rx_flags(dev, IFF_MULTICAST);
6037 dev_set_rx_mode(dev);
6040 * Have we downed the interface. We handle IFF_UP ourselves
6041 * according to user attempts to set it, rather than blindly
6046 if ((old_flags ^ flags) & IFF_UP)
6047 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
6049 if ((flags ^ dev->gflags) & IFF_PROMISC) {
6050 int inc = (flags & IFF_PROMISC) ? 1 : -1;
6051 unsigned int old_flags = dev->flags;
6053 dev->gflags ^= IFF_PROMISC;
6055 if (__dev_set_promiscuity(dev, inc, false) >= 0)
6056 if (dev->flags != old_flags)
6057 dev_set_rx_mode(dev);
6060 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
6061 is important. Some (broken) drivers set IFF_PROMISC, when
6062 IFF_ALLMULTI is requested not asking us and not reporting.
6064 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
6065 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
6067 dev->gflags ^= IFF_ALLMULTI;
6068 __dev_set_allmulti(dev, inc, false);
6074 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
6075 unsigned int gchanges)
6077 unsigned int changes = dev->flags ^ old_flags;
6080 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
6082 if (changes & IFF_UP) {
6083 if (dev->flags & IFF_UP)
6084 call_netdevice_notifiers(NETDEV_UP, dev);
6086 call_netdevice_notifiers(NETDEV_DOWN, dev);
6089 if (dev->flags & IFF_UP &&
6090 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
6091 struct netdev_notifier_change_info change_info;
6093 change_info.flags_changed = changes;
6094 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
6100 * dev_change_flags - change device settings
6102 * @flags: device state flags
6104 * Change settings on device based state flags. The flags are
6105 * in the userspace exported format.
6107 int dev_change_flags(struct net_device *dev, unsigned int flags)
6110 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
6112 ret = __dev_change_flags(dev, flags);
6116 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
6117 __dev_notify_flags(dev, old_flags, changes);
6120 EXPORT_SYMBOL(dev_change_flags);
6122 static int __dev_set_mtu(struct net_device *dev, int new_mtu)
6124 const struct net_device_ops *ops = dev->netdev_ops;
6126 if (ops->ndo_change_mtu)
6127 return ops->ndo_change_mtu(dev, new_mtu);
6134 * dev_set_mtu - Change maximum transfer unit
6136 * @new_mtu: new transfer unit
6138 * Change the maximum transfer size of the network device.
6140 int dev_set_mtu(struct net_device *dev, int new_mtu)
6144 if (new_mtu == dev->mtu)
6147 /* MTU must be positive. */
6151 if (!netif_device_present(dev))
6154 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
6155 err = notifier_to_errno(err);
6159 orig_mtu = dev->mtu;
6160 err = __dev_set_mtu(dev, new_mtu);
6163 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6164 err = notifier_to_errno(err);
6166 /* setting mtu back and notifying everyone again,
6167 * so that they have a chance to revert changes.
6169 __dev_set_mtu(dev, orig_mtu);
6170 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6175 EXPORT_SYMBOL(dev_set_mtu);
6178 * dev_set_group - Change group this device belongs to
6180 * @new_group: group this device should belong to
6182 void dev_set_group(struct net_device *dev, int new_group)
6184 dev->group = new_group;
6186 EXPORT_SYMBOL(dev_set_group);
6189 * dev_set_mac_address - Change Media Access Control Address
6193 * Change the hardware (MAC) address of the device
6195 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
6197 const struct net_device_ops *ops = dev->netdev_ops;
6200 if (!ops->ndo_set_mac_address)
6202 if (sa->sa_family != dev->type)
6204 if (!netif_device_present(dev))
6206 err = ops->ndo_set_mac_address(dev, sa);
6209 dev->addr_assign_type = NET_ADDR_SET;
6210 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
6211 add_device_randomness(dev->dev_addr, dev->addr_len);
6214 EXPORT_SYMBOL(dev_set_mac_address);
6217 * dev_change_carrier - Change device carrier
6219 * @new_carrier: new value
6221 * Change device carrier
6223 int dev_change_carrier(struct net_device *dev, bool new_carrier)
6225 const struct net_device_ops *ops = dev->netdev_ops;
6227 if (!ops->ndo_change_carrier)
6229 if (!netif_device_present(dev))
6231 return ops->ndo_change_carrier(dev, new_carrier);
6233 EXPORT_SYMBOL(dev_change_carrier);
6236 * dev_get_phys_port_id - Get device physical port ID
6240 * Get device physical port ID
6242 int dev_get_phys_port_id(struct net_device *dev,
6243 struct netdev_phys_item_id *ppid)
6245 const struct net_device_ops *ops = dev->netdev_ops;
6247 if (!ops->ndo_get_phys_port_id)
6249 return ops->ndo_get_phys_port_id(dev, ppid);
6251 EXPORT_SYMBOL(dev_get_phys_port_id);
6254 * dev_get_phys_port_name - Get device physical port name
6258 * Get device physical port name
6260 int dev_get_phys_port_name(struct net_device *dev,
6261 char *name, size_t len)
6263 const struct net_device_ops *ops = dev->netdev_ops;
6265 if (!ops->ndo_get_phys_port_name)
6267 return ops->ndo_get_phys_port_name(dev, name, len);
6269 EXPORT_SYMBOL(dev_get_phys_port_name);
6272 * dev_change_proto_down - update protocol port state information
6274 * @proto_down: new value
6276 * This info can be used by switch drivers to set the phys state of the
6279 int dev_change_proto_down(struct net_device *dev, bool proto_down)
6281 const struct net_device_ops *ops = dev->netdev_ops;
6283 if (!ops->ndo_change_proto_down)
6285 if (!netif_device_present(dev))
6287 return ops->ndo_change_proto_down(dev, proto_down);
6289 EXPORT_SYMBOL(dev_change_proto_down);
6292 * dev_new_index - allocate an ifindex
6293 * @net: the applicable net namespace
6295 * Returns a suitable unique value for a new device interface
6296 * number. The caller must hold the rtnl semaphore or the
6297 * dev_base_lock to be sure it remains unique.
6299 static int dev_new_index(struct net *net)
6301 int ifindex = net->ifindex;
6305 if (!__dev_get_by_index(net, ifindex))
6306 return net->ifindex = ifindex;
6310 /* Delayed registration/unregisteration */
6311 static LIST_HEAD(net_todo_list);
6312 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
6314 static void net_set_todo(struct net_device *dev)
6316 list_add_tail(&dev->todo_list, &net_todo_list);
6317 dev_net(dev)->dev_unreg_count++;
6320 static void rollback_registered_many(struct list_head *head)
6322 struct net_device *dev, *tmp;
6323 LIST_HEAD(close_head);
6325 BUG_ON(dev_boot_phase);
6328 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
6329 /* Some devices call without registering
6330 * for initialization unwind. Remove those
6331 * devices and proceed with the remaining.
6333 if (dev->reg_state == NETREG_UNINITIALIZED) {
6334 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
6338 list_del(&dev->unreg_list);
6341 dev->dismantle = true;
6342 BUG_ON(dev->reg_state != NETREG_REGISTERED);
6345 /* If device is running, close it first. */
6346 list_for_each_entry(dev, head, unreg_list)
6347 list_add_tail(&dev->close_list, &close_head);
6348 dev_close_many(&close_head, true);
6350 list_for_each_entry(dev, head, unreg_list) {
6351 /* And unlink it from device chain. */
6352 unlist_netdevice(dev);
6354 dev->reg_state = NETREG_UNREGISTERING;
6355 on_each_cpu(flush_backlog, dev, 1);
6360 list_for_each_entry(dev, head, unreg_list) {
6361 struct sk_buff *skb = NULL;
6363 /* Shutdown queueing discipline. */
6367 /* Notify protocols, that we are about to destroy
6368 this device. They should clean all the things.
6370 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6372 if (!dev->rtnl_link_ops ||
6373 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6374 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
6378 * Flush the unicast and multicast chains
6383 if (dev->netdev_ops->ndo_uninit)
6384 dev->netdev_ops->ndo_uninit(dev);
6387 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
6389 /* Notifier chain MUST detach us all upper devices. */
6390 WARN_ON(netdev_has_any_upper_dev(dev));
6392 /* Remove entries from kobject tree */
6393 netdev_unregister_kobject(dev);
6395 /* Remove XPS queueing entries */
6396 netif_reset_xps_queues_gt(dev, 0);
6402 list_for_each_entry(dev, head, unreg_list)
6406 static void rollback_registered(struct net_device *dev)
6410 list_add(&dev->unreg_list, &single);
6411 rollback_registered_many(&single);
6415 static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
6416 struct net_device *upper, netdev_features_t features)
6418 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
6419 netdev_features_t feature;
6422 for_each_netdev_feature(&upper_disables, feature_bit) {
6423 feature = __NETIF_F_BIT(feature_bit);
6424 if (!(upper->wanted_features & feature)
6425 && (features & feature)) {
6426 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
6427 &feature, upper->name);
6428 features &= ~feature;
6435 static void netdev_sync_lower_features(struct net_device *upper,
6436 struct net_device *lower, netdev_features_t features)
6438 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
6439 netdev_features_t feature;
6442 for_each_netdev_feature(&upper_disables, feature_bit) {
6443 feature = __NETIF_F_BIT(feature_bit);
6444 if (!(features & feature) && (lower->features & feature)) {
6445 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
6446 &feature, lower->name);
6447 lower->wanted_features &= ~feature;
6448 netdev_update_features(lower);
6450 if (unlikely(lower->features & feature))
6451 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
6452 &feature, lower->name);
6457 static netdev_features_t netdev_fix_features(struct net_device *dev,
6458 netdev_features_t features)
6460 /* Fix illegal checksum combinations */
6461 if ((features & NETIF_F_HW_CSUM) &&
6462 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6463 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
6464 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
6467 /* TSO requires that SG is present as well. */
6468 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6469 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
6470 features &= ~NETIF_F_ALL_TSO;
6473 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
6474 !(features & NETIF_F_IP_CSUM)) {
6475 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
6476 features &= ~NETIF_F_TSO;
6477 features &= ~NETIF_F_TSO_ECN;
6480 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
6481 !(features & NETIF_F_IPV6_CSUM)) {
6482 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
6483 features &= ~NETIF_F_TSO6;
6486 /* TSO ECN requires that TSO is present as well. */
6487 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
6488 features &= ~NETIF_F_TSO_ECN;
6490 /* Software GSO depends on SG. */
6491 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6492 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
6493 features &= ~NETIF_F_GSO;
6496 /* UFO needs SG and checksumming */
6497 if (features & NETIF_F_UFO) {
6498 /* maybe split UFO into V4 and V6? */
6499 if (!((features & NETIF_F_GEN_CSUM) ||
6500 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
6501 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6503 "Dropping NETIF_F_UFO since no checksum offload features.\n");
6504 features &= ~NETIF_F_UFO;
6507 if (!(features & NETIF_F_SG)) {
6509 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
6510 features &= ~NETIF_F_UFO;
6514 #ifdef CONFIG_NET_RX_BUSY_POLL
6515 if (dev->netdev_ops->ndo_busy_poll)
6516 features |= NETIF_F_BUSY_POLL;
6519 features &= ~NETIF_F_BUSY_POLL;
6524 int __netdev_update_features(struct net_device *dev)
6526 struct net_device *upper, *lower;
6527 netdev_features_t features;
6528 struct list_head *iter;
6533 features = netdev_get_wanted_features(dev);
6535 if (dev->netdev_ops->ndo_fix_features)
6536 features = dev->netdev_ops->ndo_fix_features(dev, features);
6538 /* driver might be less strict about feature dependencies */
6539 features = netdev_fix_features(dev, features);
6541 /* some features can't be enabled if they're off an an upper device */
6542 netdev_for_each_upper_dev_rcu(dev, upper, iter)
6543 features = netdev_sync_upper_features(dev, upper, features);
6545 if (dev->features == features)
6548 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6549 &dev->features, &features);
6551 if (dev->netdev_ops->ndo_set_features)
6552 err = dev->netdev_ops->ndo_set_features(dev, features);
6556 if (unlikely(err < 0)) {
6558 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6559 err, &features, &dev->features);
6560 /* return non-0 since some features might have changed and
6561 * it's better to fire a spurious notification than miss it
6567 /* some features must be disabled on lower devices when disabled
6568 * on an upper device (think: bonding master or bridge)
6570 netdev_for_each_lower_dev(dev, lower, iter)
6571 netdev_sync_lower_features(dev, lower, features);
6574 dev->features = features;
6576 return err < 0 ? 0 : 1;
6580 * netdev_update_features - recalculate device features
6581 * @dev: the device to check
6583 * Recalculate dev->features set and send notifications if it
6584 * has changed. Should be called after driver or hardware dependent
6585 * conditions might have changed that influence the features.
6587 void netdev_update_features(struct net_device *dev)
6589 if (__netdev_update_features(dev))
6590 netdev_features_change(dev);
6592 EXPORT_SYMBOL(netdev_update_features);
6595 * netdev_change_features - recalculate device features
6596 * @dev: the device to check
6598 * Recalculate dev->features set and send notifications even
6599 * if they have not changed. Should be called instead of
6600 * netdev_update_features() if also dev->vlan_features might
6601 * have changed to allow the changes to be propagated to stacked
6604 void netdev_change_features(struct net_device *dev)
6606 __netdev_update_features(dev);
6607 netdev_features_change(dev);
6609 EXPORT_SYMBOL(netdev_change_features);
6612 * netif_stacked_transfer_operstate - transfer operstate
6613 * @rootdev: the root or lower level device to transfer state from
6614 * @dev: the device to transfer operstate to
6616 * Transfer operational state from root to device. This is normally
6617 * called when a stacking relationship exists between the root
6618 * device and the device(a leaf device).
6620 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6621 struct net_device *dev)
6623 if (rootdev->operstate == IF_OPER_DORMANT)
6624 netif_dormant_on(dev);
6626 netif_dormant_off(dev);
6628 if (netif_carrier_ok(rootdev)) {
6629 if (!netif_carrier_ok(dev))
6630 netif_carrier_on(dev);
6632 if (netif_carrier_ok(dev))
6633 netif_carrier_off(dev);
6636 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6639 static int netif_alloc_rx_queues(struct net_device *dev)
6641 unsigned int i, count = dev->num_rx_queues;
6642 struct netdev_rx_queue *rx;
6643 size_t sz = count * sizeof(*rx);
6647 rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6655 for (i = 0; i < count; i++)
6661 static void netdev_init_one_queue(struct net_device *dev,
6662 struct netdev_queue *queue, void *_unused)
6664 /* Initialize queue lock */
6665 spin_lock_init(&queue->_xmit_lock);
6666 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6667 queue->xmit_lock_owner = -1;
6668 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
6671 dql_init(&queue->dql, HZ);
6675 static void netif_free_tx_queues(struct net_device *dev)
6680 static int netif_alloc_netdev_queues(struct net_device *dev)
6682 unsigned int count = dev->num_tx_queues;
6683 struct netdev_queue *tx;
6684 size_t sz = count * sizeof(*tx);
6686 if (count < 1 || count > 0xffff)
6689 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6697 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6698 spin_lock_init(&dev->tx_global_lock);
6703 void netif_tx_stop_all_queues(struct net_device *dev)
6707 for (i = 0; i < dev->num_tx_queues; i++) {
6708 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
6709 netif_tx_stop_queue(txq);
6712 EXPORT_SYMBOL(netif_tx_stop_all_queues);
6715 * register_netdevice - register a network device
6716 * @dev: device to register
6718 * Take a completed network device structure and add it to the kernel
6719 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6720 * chain. 0 is returned on success. A negative errno code is returned
6721 * on a failure to set up the device, or if the name is a duplicate.
6723 * Callers must hold the rtnl semaphore. You may want
6724 * register_netdev() instead of this.
6727 * The locking appears insufficient to guarantee two parallel registers
6728 * will not get the same name.
6731 int register_netdevice(struct net_device *dev)
6734 struct net *net = dev_net(dev);
6736 BUG_ON(dev_boot_phase);
6741 /* When net_device's are persistent, this will be fatal. */
6742 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
6745 spin_lock_init(&dev->addr_list_lock);
6746 netdev_set_addr_lockdep_class(dev);
6748 ret = dev_get_valid_name(net, dev, dev->name);
6752 /* Init, if this function is available */
6753 if (dev->netdev_ops->ndo_init) {
6754 ret = dev->netdev_ops->ndo_init(dev);
6762 if (((dev->hw_features | dev->features) &
6763 NETIF_F_HW_VLAN_CTAG_FILTER) &&
6764 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6765 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6766 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6773 dev->ifindex = dev_new_index(net);
6774 else if (__dev_get_by_index(net, dev->ifindex))
6777 /* Transfer changeable features to wanted_features and enable
6778 * software offloads (GSO and GRO).
6780 dev->hw_features |= NETIF_F_SOFT_FEATURES;
6781 dev->features |= NETIF_F_SOFT_FEATURES;
6782 dev->wanted_features = dev->features & dev->hw_features;
6784 if (!(dev->flags & IFF_LOOPBACK)) {
6785 dev->hw_features |= NETIF_F_NOCACHE_COPY;
6788 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
6790 dev->vlan_features |= NETIF_F_HIGHDMA;
6792 /* Make NETIF_F_SG inheritable to tunnel devices.
6794 dev->hw_enc_features |= NETIF_F_SG;
6796 /* Make NETIF_F_SG inheritable to MPLS.
6798 dev->mpls_features |= NETIF_F_SG;
6800 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6801 ret = notifier_to_errno(ret);
6805 ret = netdev_register_kobject(dev);
6808 dev->reg_state = NETREG_REGISTERED;
6810 __netdev_update_features(dev);
6813 * Default initial state at registry is that the
6814 * device is present.
6817 set_bit(__LINK_STATE_PRESENT, &dev->state);
6819 linkwatch_init_dev(dev);
6821 dev_init_scheduler(dev);
6823 list_netdevice(dev);
6824 add_device_randomness(dev->dev_addr, dev->addr_len);
6826 /* If the device has permanent device address, driver should
6827 * set dev_addr and also addr_assign_type should be set to
6828 * NET_ADDR_PERM (default value).
6830 if (dev->addr_assign_type == NET_ADDR_PERM)
6831 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6833 /* Notify protocols, that a new device appeared. */
6834 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
6835 ret = notifier_to_errno(ret);
6837 rollback_registered(dev);
6838 dev->reg_state = NETREG_UNREGISTERED;
6841 * Prevent userspace races by waiting until the network
6842 * device is fully setup before sending notifications.
6844 if (!dev->rtnl_link_ops ||
6845 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6846 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
6852 if (dev->netdev_ops->ndo_uninit)
6853 dev->netdev_ops->ndo_uninit(dev);
6856 EXPORT_SYMBOL(register_netdevice);
6859 * init_dummy_netdev - init a dummy network device for NAPI
6860 * @dev: device to init
6862 * This takes a network device structure and initialize the minimum
6863 * amount of fields so it can be used to schedule NAPI polls without
6864 * registering a full blown interface. This is to be used by drivers
6865 * that need to tie several hardware interfaces to a single NAPI
6866 * poll scheduler due to HW limitations.
6868 int init_dummy_netdev(struct net_device *dev)
6870 /* Clear everything. Note we don't initialize spinlocks
6871 * are they aren't supposed to be taken by any of the
6872 * NAPI code and this dummy netdev is supposed to be
6873 * only ever used for NAPI polls
6875 memset(dev, 0, sizeof(struct net_device));
6877 /* make sure we BUG if trying to hit standard
6878 * register/unregister code path
6880 dev->reg_state = NETREG_DUMMY;
6882 /* NAPI wants this */
6883 INIT_LIST_HEAD(&dev->napi_list);
6885 /* a dummy interface is started by default */
6886 set_bit(__LINK_STATE_PRESENT, &dev->state);
6887 set_bit(__LINK_STATE_START, &dev->state);
6889 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6890 * because users of this 'device' dont need to change
6896 EXPORT_SYMBOL_GPL(init_dummy_netdev);
6900 * register_netdev - register a network device
6901 * @dev: device to register
6903 * Take a completed network device structure and add it to the kernel
6904 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6905 * chain. 0 is returned on success. A negative errno code is returned
6906 * on a failure to set up the device, or if the name is a duplicate.
6908 * This is a wrapper around register_netdevice that takes the rtnl semaphore
6909 * and expands the device name if you passed a format string to
6912 int register_netdev(struct net_device *dev)
6917 err = register_netdevice(dev);
6921 EXPORT_SYMBOL(register_netdev);
6923 int netdev_refcnt_read(const struct net_device *dev)
6927 for_each_possible_cpu(i)
6928 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6931 EXPORT_SYMBOL(netdev_refcnt_read);
6934 * netdev_wait_allrefs - wait until all references are gone.
6935 * @dev: target net_device
6937 * This is called when unregistering network devices.
6939 * Any protocol or device that holds a reference should register
6940 * for netdevice notification, and cleanup and put back the
6941 * reference if they receive an UNREGISTER event.
6942 * We can get stuck here if buggy protocols don't correctly
6945 static void netdev_wait_allrefs(struct net_device *dev)
6947 unsigned long rebroadcast_time, warning_time;
6950 linkwatch_forget_dev(dev);
6952 rebroadcast_time = warning_time = jiffies;
6953 refcnt = netdev_refcnt_read(dev);
6955 while (refcnt != 0) {
6956 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6959 /* Rebroadcast unregister notification */
6960 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6966 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6967 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6969 /* We must not have linkwatch events
6970 * pending on unregister. If this
6971 * happens, we simply run the queue
6972 * unscheduled, resulting in a noop
6975 linkwatch_run_queue();
6980 rebroadcast_time = jiffies;
6985 refcnt = netdev_refcnt_read(dev);
6987 if (time_after(jiffies, warning_time + 10 * HZ)) {
6988 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6990 warning_time = jiffies;
6999 * register_netdevice(x1);
7000 * register_netdevice(x2);
7002 * unregister_netdevice(y1);
7003 * unregister_netdevice(y2);
7009 * We are invoked by rtnl_unlock().
7010 * This allows us to deal with problems:
7011 * 1) We can delete sysfs objects which invoke hotplug
7012 * without deadlocking with linkwatch via keventd.
7013 * 2) Since we run with the RTNL semaphore not held, we can sleep
7014 * safely in order to wait for the netdev refcnt to drop to zero.
7016 * We must not return until all unregister events added during
7017 * the interval the lock was held have been completed.
7019 void netdev_run_todo(void)
7021 struct list_head list;
7023 /* Snapshot list, allow later requests */
7024 list_replace_init(&net_todo_list, &list);
7029 /* Wait for rcu callbacks to finish before next phase */
7030 if (!list_empty(&list))
7033 while (!list_empty(&list)) {
7034 struct net_device *dev
7035 = list_first_entry(&list, struct net_device, todo_list);
7036 list_del(&dev->todo_list);
7039 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7042 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
7043 pr_err("network todo '%s' but state %d\n",
7044 dev->name, dev->reg_state);
7049 dev->reg_state = NETREG_UNREGISTERED;
7051 netdev_wait_allrefs(dev);
7054 BUG_ON(netdev_refcnt_read(dev));
7055 BUG_ON(!list_empty(&dev->ptype_all));
7056 BUG_ON(!list_empty(&dev->ptype_specific));
7057 WARN_ON(rcu_access_pointer(dev->ip_ptr));
7058 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
7059 WARN_ON(dev->dn_ptr);
7061 if (dev->destructor)
7062 dev->destructor(dev);
7064 /* Report a network device has been unregistered */
7066 dev_net(dev)->dev_unreg_count--;
7068 wake_up(&netdev_unregistering_wq);
7070 /* Free network device */
7071 kobject_put(&dev->dev.kobj);
7075 /* Convert net_device_stats to rtnl_link_stats64. They have the same
7076 * fields in the same order, with only the type differing.
7078 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
7079 const struct net_device_stats *netdev_stats)
7081 #if BITS_PER_LONG == 64
7082 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
7083 memcpy(stats64, netdev_stats, sizeof(*stats64));
7085 size_t i, n = sizeof(*stats64) / sizeof(u64);
7086 const unsigned long *src = (const unsigned long *)netdev_stats;
7087 u64 *dst = (u64 *)stats64;
7089 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
7090 sizeof(*stats64) / sizeof(u64));
7091 for (i = 0; i < n; i++)
7095 EXPORT_SYMBOL(netdev_stats_to_stats64);
7098 * dev_get_stats - get network device statistics
7099 * @dev: device to get statistics from
7100 * @storage: place to store stats
7102 * Get network statistics from device. Return @storage.
7103 * The device driver may provide its own method by setting
7104 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
7105 * otherwise the internal statistics structure is used.
7107 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
7108 struct rtnl_link_stats64 *storage)
7110 const struct net_device_ops *ops = dev->netdev_ops;
7112 if (ops->ndo_get_stats64) {
7113 memset(storage, 0, sizeof(*storage));
7114 ops->ndo_get_stats64(dev, storage);
7115 } else if (ops->ndo_get_stats) {
7116 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
7118 netdev_stats_to_stats64(storage, &dev->stats);
7120 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
7121 storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
7124 EXPORT_SYMBOL(dev_get_stats);
7126 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
7128 struct netdev_queue *queue = dev_ingress_queue(dev);
7130 #ifdef CONFIG_NET_CLS_ACT
7133 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
7136 netdev_init_one_queue(dev, queue, NULL);
7137 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
7138 queue->qdisc_sleeping = &noop_qdisc;
7139 rcu_assign_pointer(dev->ingress_queue, queue);
7144 static const struct ethtool_ops default_ethtool_ops;
7146 void netdev_set_default_ethtool_ops(struct net_device *dev,
7147 const struct ethtool_ops *ops)
7149 if (dev->ethtool_ops == &default_ethtool_ops)
7150 dev->ethtool_ops = ops;
7152 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
7154 void netdev_freemem(struct net_device *dev)
7156 char *addr = (char *)dev - dev->padded;
7162 * alloc_netdev_mqs - allocate network device
7163 * @sizeof_priv: size of private data to allocate space for
7164 * @name: device name format string
7165 * @name_assign_type: origin of device name
7166 * @setup: callback to initialize device
7167 * @txqs: the number of TX subqueues to allocate
7168 * @rxqs: the number of RX subqueues to allocate
7170 * Allocates a struct net_device with private data area for driver use
7171 * and performs basic initialization. Also allocates subqueue structs
7172 * for each queue on the device.
7174 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
7175 unsigned char name_assign_type,
7176 void (*setup)(struct net_device *),
7177 unsigned int txqs, unsigned int rxqs)
7179 struct net_device *dev;
7181 struct net_device *p;
7183 BUG_ON(strlen(name) >= sizeof(dev->name));
7186 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
7192 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
7197 alloc_size = sizeof(struct net_device);
7199 /* ensure 32-byte alignment of private area */
7200 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
7201 alloc_size += sizeof_priv;
7203 /* ensure 32-byte alignment of whole construct */
7204 alloc_size += NETDEV_ALIGN - 1;
7206 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
7208 p = vzalloc(alloc_size);
7212 dev = PTR_ALIGN(p, NETDEV_ALIGN);
7213 dev->padded = (char *)dev - (char *)p;
7215 dev->pcpu_refcnt = alloc_percpu(int);
7216 if (!dev->pcpu_refcnt)
7219 if (dev_addr_init(dev))
7225 dev_net_set(dev, &init_net);
7227 dev->gso_max_size = GSO_MAX_SIZE;
7228 dev->gso_max_segs = GSO_MAX_SEGS;
7229 dev->gso_min_segs = 0;
7231 INIT_LIST_HEAD(&dev->napi_list);
7232 INIT_LIST_HEAD(&dev->unreg_list);
7233 INIT_LIST_HEAD(&dev->close_list);
7234 INIT_LIST_HEAD(&dev->link_watch_list);
7235 INIT_LIST_HEAD(&dev->adj_list.upper);
7236 INIT_LIST_HEAD(&dev->adj_list.lower);
7237 INIT_LIST_HEAD(&dev->all_adj_list.upper);
7238 INIT_LIST_HEAD(&dev->all_adj_list.lower);
7239 INIT_LIST_HEAD(&dev->ptype_all);
7240 INIT_LIST_HEAD(&dev->ptype_specific);
7241 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
7244 if (!dev->tx_queue_len) {
7245 dev->priv_flags |= IFF_NO_QUEUE;
7246 dev->tx_queue_len = 1;
7249 dev->num_tx_queues = txqs;
7250 dev->real_num_tx_queues = txqs;
7251 if (netif_alloc_netdev_queues(dev))
7255 dev->num_rx_queues = rxqs;
7256 dev->real_num_rx_queues = rxqs;
7257 if (netif_alloc_rx_queues(dev))
7261 strcpy(dev->name, name);
7262 dev->name_assign_type = name_assign_type;
7263 dev->group = INIT_NETDEV_GROUP;
7264 if (!dev->ethtool_ops)
7265 dev->ethtool_ops = &default_ethtool_ops;
7267 nf_hook_ingress_init(dev);
7276 free_percpu(dev->pcpu_refcnt);
7278 netdev_freemem(dev);
7281 EXPORT_SYMBOL(alloc_netdev_mqs);
7284 * free_netdev - free network device
7287 * This function does the last stage of destroying an allocated device
7288 * interface. The reference to the device object is released.
7289 * If this is the last reference then it will be freed.
7291 void free_netdev(struct net_device *dev)
7293 struct napi_struct *p, *n;
7295 netif_free_tx_queues(dev);
7300 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
7302 /* Flush device addresses */
7303 dev_addr_flush(dev);
7305 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
7308 free_percpu(dev->pcpu_refcnt);
7309 dev->pcpu_refcnt = NULL;
7311 /* Compatibility with error handling in drivers */
7312 if (dev->reg_state == NETREG_UNINITIALIZED) {
7313 netdev_freemem(dev);
7317 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
7318 dev->reg_state = NETREG_RELEASED;
7320 /* will free via device release */
7321 put_device(&dev->dev);
7323 EXPORT_SYMBOL(free_netdev);
7326 * synchronize_net - Synchronize with packet receive processing
7328 * Wait for packets currently being received to be done.
7329 * Does not block later packets from starting.
7331 void synchronize_net(void)
7334 if (rtnl_is_locked() && !IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
7335 synchronize_rcu_expedited();
7339 EXPORT_SYMBOL(synchronize_net);
7342 * unregister_netdevice_queue - remove device from the kernel
7346 * This function shuts down a device interface and removes it
7347 * from the kernel tables.
7348 * If head not NULL, device is queued to be unregistered later.
7350 * Callers must hold the rtnl semaphore. You may want
7351 * unregister_netdev() instead of this.
7354 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
7359 list_move_tail(&dev->unreg_list, head);
7361 rollback_registered(dev);
7362 /* Finish processing unregister after unlock */
7366 EXPORT_SYMBOL(unregister_netdevice_queue);
7369 * unregister_netdevice_many - unregister many devices
7370 * @head: list of devices
7372 * Note: As most callers use a stack allocated list_head,
7373 * we force a list_del() to make sure stack wont be corrupted later.
7375 void unregister_netdevice_many(struct list_head *head)
7377 struct net_device *dev;
7379 if (!list_empty(head)) {
7380 rollback_registered_many(head);
7381 list_for_each_entry(dev, head, unreg_list)
7386 EXPORT_SYMBOL(unregister_netdevice_many);
7389 * unregister_netdev - remove device from the kernel
7392 * This function shuts down a device interface and removes it
7393 * from the kernel tables.
7395 * This is just a wrapper for unregister_netdevice that takes
7396 * the rtnl semaphore. In general you want to use this and not
7397 * unregister_netdevice.
7399 void unregister_netdev(struct net_device *dev)
7402 unregister_netdevice(dev);
7405 EXPORT_SYMBOL(unregister_netdev);
7408 * dev_change_net_namespace - move device to different nethost namespace
7410 * @net: network namespace
7411 * @pat: If not NULL name pattern to try if the current device name
7412 * is already taken in the destination network namespace.
7414 * This function shuts down a device interface and moves it
7415 * to a new network namespace. On success 0 is returned, on
7416 * a failure a netagive errno code is returned.
7418 * Callers must hold the rtnl semaphore.
7421 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
7427 /* Don't allow namespace local devices to be moved. */
7429 if (dev->features & NETIF_F_NETNS_LOCAL)
7432 /* Ensure the device has been registrered */
7433 if (dev->reg_state != NETREG_REGISTERED)
7436 /* Get out if there is nothing todo */
7438 if (net_eq(dev_net(dev), net))
7441 /* Pick the destination device name, and ensure
7442 * we can use it in the destination network namespace.
7445 if (__dev_get_by_name(net, dev->name)) {
7446 /* We get here if we can't use the current device name */
7449 if (dev_get_valid_name(net, dev, pat) < 0)
7454 * And now a mini version of register_netdevice unregister_netdevice.
7457 /* If device is running close it first. */
7460 /* And unlink it from device chain */
7462 unlist_netdevice(dev);
7466 /* Shutdown queueing discipline. */
7469 /* Notify protocols, that we are about to destroy
7470 this device. They should clean all the things.
7472 Note that dev->reg_state stays at NETREG_REGISTERED.
7473 This is wanted because this way 8021q and macvlan know
7474 the device is just moving and can keep their slaves up.
7476 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
7478 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7479 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
7482 * Flush the unicast and multicast chains
7487 /* Send a netdev-removed uevent to the old namespace */
7488 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
7489 netdev_adjacent_del_links(dev);
7491 /* Actually switch the network namespace */
7492 dev_net_set(dev, net);
7494 /* If there is an ifindex conflict assign a new one */
7495 if (__dev_get_by_index(net, dev->ifindex))
7496 dev->ifindex = dev_new_index(net);
7498 /* Send a netdev-add uevent to the new namespace */
7499 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
7500 netdev_adjacent_add_links(dev);
7502 /* Fixup kobjects */
7503 err = device_rename(&dev->dev, dev->name);
7506 /* Add the device back in the hashes */
7507 list_netdevice(dev);
7509 /* Notify protocols, that a new device appeared. */
7510 call_netdevice_notifiers(NETDEV_REGISTER, dev);
7513 * Prevent userspace races by waiting until the network
7514 * device is fully setup before sending notifications.
7516 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
7523 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
7525 static int dev_cpu_callback(struct notifier_block *nfb,
7526 unsigned long action,
7529 struct sk_buff **list_skb;
7530 struct sk_buff *skb;
7531 unsigned int cpu, oldcpu = (unsigned long)ocpu;
7532 struct softnet_data *sd, *oldsd;
7534 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
7537 local_irq_disable();
7538 cpu = smp_processor_id();
7539 sd = &per_cpu(softnet_data, cpu);
7540 oldsd = &per_cpu(softnet_data, oldcpu);
7542 /* Find end of our completion_queue. */
7543 list_skb = &sd->completion_queue;
7545 list_skb = &(*list_skb)->next;
7546 /* Append completion queue from offline CPU. */
7547 *list_skb = oldsd->completion_queue;
7548 oldsd->completion_queue = NULL;
7550 /* Append output queue from offline CPU. */
7551 if (oldsd->output_queue) {
7552 *sd->output_queue_tailp = oldsd->output_queue;
7553 sd->output_queue_tailp = oldsd->output_queue_tailp;
7554 oldsd->output_queue = NULL;
7555 oldsd->output_queue_tailp = &oldsd->output_queue;
7557 /* Append NAPI poll list from offline CPU, with one exception :
7558 * process_backlog() must be called by cpu owning percpu backlog.
7559 * We properly handle process_queue & input_pkt_queue later.
7561 while (!list_empty(&oldsd->poll_list)) {
7562 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
7566 list_del_init(&napi->poll_list);
7567 if (napi->poll == process_backlog)
7570 ____napi_schedule(sd, napi);
7573 raise_softirq_irqoff(NET_TX_SOFTIRQ);
7575 preempt_check_resched_rt();
7577 /* Process offline CPU's input_pkt_queue */
7578 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
7580 input_queue_head_incr(oldsd);
7582 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
7584 input_queue_head_incr(oldsd);
7586 while ((skb = __skb_dequeue(&oldsd->tofree_queue))) {
7595 * netdev_increment_features - increment feature set by one
7596 * @all: current feature set
7597 * @one: new feature set
7598 * @mask: mask feature set
7600 * Computes a new feature set after adding a device with feature set
7601 * @one to the master device with current feature set @all. Will not
7602 * enable anything that is off in @mask. Returns the new feature set.
7604 netdev_features_t netdev_increment_features(netdev_features_t all,
7605 netdev_features_t one, netdev_features_t mask)
7607 if (mask & NETIF_F_GEN_CSUM)
7608 mask |= NETIF_F_ALL_CSUM;
7609 mask |= NETIF_F_VLAN_CHALLENGED;
7611 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
7612 all &= one | ~NETIF_F_ALL_FOR_ALL;
7614 /* If one device supports hw checksumming, set for all. */
7615 if (all & NETIF_F_GEN_CSUM)
7616 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
7620 EXPORT_SYMBOL(netdev_increment_features);
7622 static struct hlist_head * __net_init netdev_create_hash(void)
7625 struct hlist_head *hash;
7627 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7629 for (i = 0; i < NETDEV_HASHENTRIES; i++)
7630 INIT_HLIST_HEAD(&hash[i]);
7635 /* Initialize per network namespace state */
7636 static int __net_init netdev_init(struct net *net)
7638 if (net != &init_net)
7639 INIT_LIST_HEAD(&net->dev_base_head);
7641 net->dev_name_head = netdev_create_hash();
7642 if (net->dev_name_head == NULL)
7645 net->dev_index_head = netdev_create_hash();
7646 if (net->dev_index_head == NULL)
7652 kfree(net->dev_name_head);
7658 * netdev_drivername - network driver for the device
7659 * @dev: network device
7661 * Determine network driver for device.
7663 const char *netdev_drivername(const struct net_device *dev)
7665 const struct device_driver *driver;
7666 const struct device *parent;
7667 const char *empty = "";
7669 parent = dev->dev.parent;
7673 driver = parent->driver;
7674 if (driver && driver->name)
7675 return driver->name;
7679 static void __netdev_printk(const char *level, const struct net_device *dev,
7680 struct va_format *vaf)
7682 if (dev && dev->dev.parent) {
7683 dev_printk_emit(level[1] - '0',
7686 dev_driver_string(dev->dev.parent),
7687 dev_name(dev->dev.parent),
7688 netdev_name(dev), netdev_reg_state(dev),
7691 printk("%s%s%s: %pV",
7692 level, netdev_name(dev), netdev_reg_state(dev), vaf);
7694 printk("%s(NULL net_device): %pV", level, vaf);
7698 void netdev_printk(const char *level, const struct net_device *dev,
7699 const char *format, ...)
7701 struct va_format vaf;
7704 va_start(args, format);
7709 __netdev_printk(level, dev, &vaf);
7713 EXPORT_SYMBOL(netdev_printk);
7715 #define define_netdev_printk_level(func, level) \
7716 void func(const struct net_device *dev, const char *fmt, ...) \
7718 struct va_format vaf; \
7721 va_start(args, fmt); \
7726 __netdev_printk(level, dev, &vaf); \
7730 EXPORT_SYMBOL(func);
7732 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7733 define_netdev_printk_level(netdev_alert, KERN_ALERT);
7734 define_netdev_printk_level(netdev_crit, KERN_CRIT);
7735 define_netdev_printk_level(netdev_err, KERN_ERR);
7736 define_netdev_printk_level(netdev_warn, KERN_WARNING);
7737 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7738 define_netdev_printk_level(netdev_info, KERN_INFO);
7740 static void __net_exit netdev_exit(struct net *net)
7742 kfree(net->dev_name_head);
7743 kfree(net->dev_index_head);
7746 static struct pernet_operations __net_initdata netdev_net_ops = {
7747 .init = netdev_init,
7748 .exit = netdev_exit,
7751 static void __net_exit default_device_exit(struct net *net)
7753 struct net_device *dev, *aux;
7755 * Push all migratable network devices back to the
7756 * initial network namespace
7759 for_each_netdev_safe(net, dev, aux) {
7761 char fb_name[IFNAMSIZ];
7763 /* Ignore unmoveable devices (i.e. loopback) */
7764 if (dev->features & NETIF_F_NETNS_LOCAL)
7767 /* Leave virtual devices for the generic cleanup */
7768 if (dev->rtnl_link_ops)
7771 /* Push remaining network devices to init_net */
7772 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7773 err = dev_change_net_namespace(dev, &init_net, fb_name);
7775 pr_emerg("%s: failed to move %s to init_net: %d\n",
7776 __func__, dev->name, err);
7783 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7785 /* Return with the rtnl_lock held when there are no network
7786 * devices unregistering in any network namespace in net_list.
7790 DEFINE_WAIT_FUNC(wait, woken_wake_function);
7792 add_wait_queue(&netdev_unregistering_wq, &wait);
7794 unregistering = false;
7796 list_for_each_entry(net, net_list, exit_list) {
7797 if (net->dev_unreg_count > 0) {
7798 unregistering = true;
7806 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
7808 remove_wait_queue(&netdev_unregistering_wq, &wait);
7811 static void __net_exit default_device_exit_batch(struct list_head *net_list)
7813 /* At exit all network devices most be removed from a network
7814 * namespace. Do this in the reverse order of registration.
7815 * Do this across as many network namespaces as possible to
7816 * improve batching efficiency.
7818 struct net_device *dev;
7820 LIST_HEAD(dev_kill_list);
7822 /* To prevent network device cleanup code from dereferencing
7823 * loopback devices or network devices that have been freed
7824 * wait here for all pending unregistrations to complete,
7825 * before unregistring the loopback device and allowing the
7826 * network namespace be freed.
7828 * The netdev todo list containing all network devices
7829 * unregistrations that happen in default_device_exit_batch
7830 * will run in the rtnl_unlock() at the end of
7831 * default_device_exit_batch.
7833 rtnl_lock_unregistering(net_list);
7834 list_for_each_entry(net, net_list, exit_list) {
7835 for_each_netdev_reverse(net, dev) {
7836 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
7837 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7839 unregister_netdevice_queue(dev, &dev_kill_list);
7842 unregister_netdevice_many(&dev_kill_list);
7846 static struct pernet_operations __net_initdata default_device_ops = {
7847 .exit = default_device_exit,
7848 .exit_batch = default_device_exit_batch,
7852 * Initialize the DEV module. At boot time this walks the device list and
7853 * unhooks any devices that fail to initialise (normally hardware not
7854 * present) and leaves us with a valid list of present and active devices.
7859 * This is called single threaded during boot, so no need
7860 * to take the rtnl semaphore.
7862 static int __init net_dev_init(void)
7864 int i, rc = -ENOMEM;
7866 BUG_ON(!dev_boot_phase);
7868 if (dev_proc_init())
7871 if (netdev_kobject_init())
7874 INIT_LIST_HEAD(&ptype_all);
7875 for (i = 0; i < PTYPE_HASH_SIZE; i++)
7876 INIT_LIST_HEAD(&ptype_base[i]);
7878 INIT_LIST_HEAD(&offload_base);
7880 if (register_pernet_subsys(&netdev_net_ops))
7884 * Initialise the packet receive queues.
7887 for_each_possible_cpu(i) {
7888 struct softnet_data *sd = &per_cpu(softnet_data, i);
7890 skb_queue_head_init_raw(&sd->input_pkt_queue);
7891 skb_queue_head_init_raw(&sd->process_queue);
7892 skb_queue_head_init_raw(&sd->tofree_queue);
7893 INIT_LIST_HEAD(&sd->poll_list);
7894 sd->output_queue_tailp = &sd->output_queue;
7896 sd->csd.func = rps_trigger_softirq;
7901 sd->backlog.poll = process_backlog;
7902 sd->backlog.weight = weight_p;
7907 /* The loopback device is special if any other network devices
7908 * is present in a network namespace the loopback device must
7909 * be present. Since we now dynamically allocate and free the
7910 * loopback device ensure this invariant is maintained by
7911 * keeping the loopback device as the first device on the
7912 * list of network devices. Ensuring the loopback devices
7913 * is the first device that appears and the last network device
7916 if (register_pernet_device(&loopback_net_ops))
7919 if (register_pernet_device(&default_device_ops))
7922 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7923 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
7925 hotcpu_notifier(dev_cpu_callback, 0);
7932 subsys_initcall(net_dev_init);