2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 * Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
17 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
25 #include <linux/module.h>
27 #include <linux/capability.h>
28 #include <linux/kernel.h>
29 #include <linux/init.h>
30 #include <linux/signal.h>
31 #include <linux/sched.h>
32 #include <linux/errno.h>
33 #include <linux/string.h>
34 #include <linux/stat.h>
35 #include <linux/socket.h>
37 #include <linux/fcntl.h>
38 #include <linux/termios.h>
39 #include <linux/sockios.h>
40 #include <linux/net.h>
42 #include <linux/slab.h>
43 #include <asm/uaccess.h>
44 #include <linux/skbuff.h>
45 #include <linux/netdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/proc_fs.h>
48 #include <linux/seq_file.h>
49 #include <linux/notifier.h>
50 #include <linux/security.h>
51 #include <linux/jhash.h>
52 #include <linux/jiffies.h>
53 #include <linux/random.h>
54 #include <linux/bitops.h>
56 #include <linux/types.h>
57 #include <linux/audit.h>
58 #include <linux/mutex.h>
59 #include <linux/vmalloc.h>
60 #include <linux/if_arp.h>
61 #include <linux/rhashtable.h>
62 #include <asm/cacheflush.h>
63 #include <linux/hash.h>
64 #include <linux/genetlink.h>
66 #include <net/net_namespace.h>
69 #include <net/netlink.h>
71 #include "af_netlink.h"
75 unsigned long masks[0];
79 #define NETLINK_CONGESTED 0x0
82 #define NETLINK_KERNEL_SOCKET 0x1
83 #define NETLINK_RECV_PKTINFO 0x2
84 #define NETLINK_BROADCAST_SEND_ERROR 0x4
85 #define NETLINK_RECV_NO_ENOBUFS 0x8
87 static inline int netlink_is_kernel(struct sock *sk)
89 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
92 struct netlink_table *nl_table __read_mostly;
93 EXPORT_SYMBOL_GPL(nl_table);
95 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
97 static int netlink_dump(struct sock *sk);
98 static void netlink_skb_destructor(struct sk_buff *skb);
100 /* nl_table locking explained:
101 * Lookup and traversal are protected with an RCU read-side lock. Insertion
102 * and removal are protected with per bucket lock while using RCU list
103 * modification primitives and may run in parallel to RCU protected lookups.
104 * Destruction of the Netlink socket may only occur *after* nl_table_lock has
105 * been acquired * either during or after the socket has been removed from
106 * the list and after an RCU grace period.
108 DEFINE_RWLOCK(nl_table_lock);
109 EXPORT_SYMBOL_GPL(nl_table_lock);
110 static atomic_t nl_table_users = ATOMIC_INIT(0);
112 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
114 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
116 static DEFINE_SPINLOCK(netlink_tap_lock);
117 static struct list_head netlink_tap_all __read_mostly;
119 static const struct rhashtable_params netlink_rhashtable_params;
121 static inline u32 netlink_group_mask(u32 group)
123 return group ? 1 << (group - 1) : 0;
126 static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
129 unsigned int len = skb_end_offset(skb);
132 new = alloc_skb(len, gfp_mask);
136 NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
137 NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
138 NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
140 memcpy(skb_put(new, len), skb->data, len);
144 int netlink_add_tap(struct netlink_tap *nt)
146 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
149 spin_lock(&netlink_tap_lock);
150 list_add_rcu(&nt->list, &netlink_tap_all);
151 spin_unlock(&netlink_tap_lock);
153 __module_get(nt->module);
157 EXPORT_SYMBOL_GPL(netlink_add_tap);
159 static int __netlink_remove_tap(struct netlink_tap *nt)
162 struct netlink_tap *tmp;
164 spin_lock(&netlink_tap_lock);
166 list_for_each_entry(tmp, &netlink_tap_all, list) {
168 list_del_rcu(&nt->list);
174 pr_warn("__netlink_remove_tap: %p not found\n", nt);
176 spin_unlock(&netlink_tap_lock);
178 if (found && nt->module)
179 module_put(nt->module);
181 return found ? 0 : -ENODEV;
184 int netlink_remove_tap(struct netlink_tap *nt)
188 ret = __netlink_remove_tap(nt);
193 EXPORT_SYMBOL_GPL(netlink_remove_tap);
195 static bool netlink_filter_tap(const struct sk_buff *skb)
197 struct sock *sk = skb->sk;
199 /* We take the more conservative approach and
200 * whitelist socket protocols that may pass.
202 switch (sk->sk_protocol) {
204 case NETLINK_USERSOCK:
205 case NETLINK_SOCK_DIAG:
208 case NETLINK_FIB_LOOKUP:
209 case NETLINK_NETFILTER:
210 case NETLINK_GENERIC:
217 static int __netlink_deliver_tap_skb(struct sk_buff *skb,
218 struct net_device *dev)
220 struct sk_buff *nskb;
221 struct sock *sk = skb->sk;
226 if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head))
227 nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
229 nskb = skb_clone(skb, GFP_ATOMIC);
232 nskb->protocol = htons((u16) sk->sk_protocol);
233 nskb->pkt_type = netlink_is_kernel(sk) ?
234 PACKET_KERNEL : PACKET_USER;
235 skb_reset_network_header(nskb);
236 ret = dev_queue_xmit(nskb);
237 if (unlikely(ret > 0))
238 ret = net_xmit_errno(ret);
245 static void __netlink_deliver_tap(struct sk_buff *skb)
248 struct netlink_tap *tmp;
250 if (!netlink_filter_tap(skb))
253 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
254 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
260 static void netlink_deliver_tap(struct sk_buff *skb)
264 if (unlikely(!list_empty(&netlink_tap_all)))
265 __netlink_deliver_tap(skb);
270 static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
273 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
274 netlink_deliver_tap(skb);
277 static void netlink_overrun(struct sock *sk)
279 struct netlink_sock *nlk = nlk_sk(sk);
281 if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
282 if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
283 sk->sk_err = ENOBUFS;
284 sk->sk_error_report(sk);
287 atomic_inc(&sk->sk_drops);
290 static void netlink_rcv_wake(struct sock *sk)
292 struct netlink_sock *nlk = nlk_sk(sk);
294 if (skb_queue_empty(&sk->sk_receive_queue))
295 clear_bit(NETLINK_CONGESTED, &nlk->state);
296 if (!test_bit(NETLINK_CONGESTED, &nlk->state))
297 wake_up_interruptible(&nlk->wait);
300 #ifdef CONFIG_NETLINK_MMAP
301 static bool netlink_rx_is_mmaped(struct sock *sk)
303 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
306 static bool netlink_tx_is_mmaped(struct sock *sk)
308 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
311 static __pure struct page *pgvec_to_page(const void *addr)
313 if (is_vmalloc_addr(addr))
314 return vmalloc_to_page(addr);
316 return virt_to_page(addr);
319 static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
323 for (i = 0; i < len; i++) {
324 if (pg_vec[i] != NULL) {
325 if (is_vmalloc_addr(pg_vec[i]))
328 free_pages((unsigned long)pg_vec[i], order);
334 static void *alloc_one_pg_vec_page(unsigned long order)
337 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
338 __GFP_NOWARN | __GFP_NORETRY;
340 buffer = (void *)__get_free_pages(gfp_flags, order);
344 buffer = vzalloc((1 << order) * PAGE_SIZE);
348 gfp_flags &= ~__GFP_NORETRY;
349 return (void *)__get_free_pages(gfp_flags, order);
352 static void **alloc_pg_vec(struct netlink_sock *nlk,
353 struct nl_mmap_req *req, unsigned int order)
355 unsigned int block_nr = req->nm_block_nr;
359 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
363 for (i = 0; i < block_nr; i++) {
364 pg_vec[i] = alloc_one_pg_vec_page(order);
365 if (pg_vec[i] == NULL)
371 free_pg_vec(pg_vec, order, block_nr);
377 __netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
380 struct netlink_sock *nlk = nlk_sk(sk);
381 struct sk_buff_head *queue;
382 struct netlink_ring *ring;
384 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
385 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
387 spin_lock_bh(&queue->lock);
389 ring->frame_max = req->nm_frame_nr - 1;
391 ring->frame_size = req->nm_frame_size;
392 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
394 swap(ring->pg_vec_len, req->nm_block_nr);
395 swap(ring->pg_vec_order, order);
396 swap(ring->pg_vec, pg_vec);
398 __skb_queue_purge(queue);
399 spin_unlock_bh(&queue->lock);
401 WARN_ON(atomic_read(&nlk->mapped));
404 free_pg_vec(pg_vec, order, req->nm_block_nr);
407 static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
410 struct netlink_sock *nlk = nlk_sk(sk);
411 struct netlink_ring *ring;
412 void **pg_vec = NULL;
413 unsigned int order = 0;
415 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
417 if (atomic_read(&nlk->mapped))
419 if (atomic_read(&ring->pending))
422 if (req->nm_block_nr) {
423 if (ring->pg_vec != NULL)
426 if ((int)req->nm_block_size <= 0)
428 if (!PAGE_ALIGNED(req->nm_block_size))
430 if (req->nm_frame_size < NL_MMAP_HDRLEN)
432 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
435 ring->frames_per_block = req->nm_block_size /
437 if (ring->frames_per_block == 0)
439 if (ring->frames_per_block * req->nm_block_nr !=
443 order = get_order(req->nm_block_size);
444 pg_vec = alloc_pg_vec(nlk, req, order);
448 if (req->nm_frame_nr)
452 mutex_lock(&nlk->pg_vec_lock);
453 if (atomic_read(&nlk->mapped) == 0) {
454 __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
455 mutex_unlock(&nlk->pg_vec_lock);
459 mutex_unlock(&nlk->pg_vec_lock);
462 free_pg_vec(pg_vec, order, req->nm_block_nr);
467 static void netlink_mm_open(struct vm_area_struct *vma)
469 struct file *file = vma->vm_file;
470 struct socket *sock = file->private_data;
471 struct sock *sk = sock->sk;
474 atomic_inc(&nlk_sk(sk)->mapped);
477 static void netlink_mm_close(struct vm_area_struct *vma)
479 struct file *file = vma->vm_file;
480 struct socket *sock = file->private_data;
481 struct sock *sk = sock->sk;
484 atomic_dec(&nlk_sk(sk)->mapped);
487 static const struct vm_operations_struct netlink_mmap_ops = {
488 .open = netlink_mm_open,
489 .close = netlink_mm_close,
492 static int netlink_mmap(struct file *file, struct socket *sock,
493 struct vm_area_struct *vma)
495 struct sock *sk = sock->sk;
496 struct netlink_sock *nlk = nlk_sk(sk);
497 struct netlink_ring *ring;
498 unsigned long start, size, expected;
505 mutex_lock(&nlk->pg_vec_lock);
508 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
509 if (ring->pg_vec == NULL)
511 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
517 size = vma->vm_end - vma->vm_start;
518 if (size != expected)
521 start = vma->vm_start;
522 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
523 if (ring->pg_vec == NULL)
526 for (i = 0; i < ring->pg_vec_len; i++) {
528 void *kaddr = ring->pg_vec[i];
531 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
532 page = pgvec_to_page(kaddr);
533 err = vm_insert_page(vma, start, page);
542 atomic_inc(&nlk->mapped);
543 vma->vm_ops = &netlink_mmap_ops;
546 mutex_unlock(&nlk->pg_vec_lock);
550 static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
552 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
553 struct page *p_start, *p_end;
555 /* First page is flushed through netlink_{get,set}_status */
556 p_start = pgvec_to_page(hdr + PAGE_SIZE);
557 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
558 while (p_start <= p_end) {
559 flush_dcache_page(p_start);
565 static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
568 flush_dcache_page(pgvec_to_page(hdr));
569 return hdr->nm_status;
572 static void netlink_set_status(struct nl_mmap_hdr *hdr,
573 enum nl_mmap_status status)
576 hdr->nm_status = status;
577 flush_dcache_page(pgvec_to_page(hdr));
580 static struct nl_mmap_hdr *
581 __netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
583 unsigned int pg_vec_pos, frame_off;
585 pg_vec_pos = pos / ring->frames_per_block;
586 frame_off = pos % ring->frames_per_block;
588 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
591 static struct nl_mmap_hdr *
592 netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
593 enum nl_mmap_status status)
595 struct nl_mmap_hdr *hdr;
597 hdr = __netlink_lookup_frame(ring, pos);
598 if (netlink_get_status(hdr) != status)
604 static struct nl_mmap_hdr *
605 netlink_current_frame(const struct netlink_ring *ring,
606 enum nl_mmap_status status)
608 return netlink_lookup_frame(ring, ring->head, status);
611 static struct nl_mmap_hdr *
612 netlink_previous_frame(const struct netlink_ring *ring,
613 enum nl_mmap_status status)
617 prev = ring->head ? ring->head - 1 : ring->frame_max;
618 return netlink_lookup_frame(ring, prev, status);
621 static void netlink_increment_head(struct netlink_ring *ring)
623 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
626 static void netlink_forward_ring(struct netlink_ring *ring)
628 unsigned int head = ring->head, pos = head;
629 const struct nl_mmap_hdr *hdr;
632 hdr = __netlink_lookup_frame(ring, pos);
633 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
635 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
637 netlink_increment_head(ring);
638 } while (ring->head != head);
641 static bool netlink_dump_space(struct netlink_sock *nlk)
643 struct netlink_ring *ring = &nlk->rx_ring;
644 struct nl_mmap_hdr *hdr;
647 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
651 n = ring->head + ring->frame_max / 2;
652 if (n > ring->frame_max)
653 n -= ring->frame_max;
655 hdr = __netlink_lookup_frame(ring, n);
657 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
660 static unsigned int netlink_poll(struct file *file, struct socket *sock,
663 struct sock *sk = sock->sk;
664 struct netlink_sock *nlk = nlk_sk(sk);
668 if (nlk->rx_ring.pg_vec != NULL) {
669 /* Memory mapped sockets don't call recvmsg(), so flow control
670 * for dumps is performed here. A dump is allowed to continue
671 * if at least half the ring is unused.
673 while (nlk->cb_running && netlink_dump_space(nlk)) {
674 err = netlink_dump(sk);
677 sk->sk_error_report(sk);
681 netlink_rcv_wake(sk);
684 mask = datagram_poll(file, sock, wait);
686 spin_lock_bh(&sk->sk_receive_queue.lock);
687 if (nlk->rx_ring.pg_vec) {
688 netlink_forward_ring(&nlk->rx_ring);
689 if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
690 mask |= POLLIN | POLLRDNORM;
692 spin_unlock_bh(&sk->sk_receive_queue.lock);
694 spin_lock_bh(&sk->sk_write_queue.lock);
695 if (nlk->tx_ring.pg_vec) {
696 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
697 mask |= POLLOUT | POLLWRNORM;
699 spin_unlock_bh(&sk->sk_write_queue.lock);
704 static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
706 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
709 static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
710 struct netlink_ring *ring,
711 struct nl_mmap_hdr *hdr)
716 size = ring->frame_size - NL_MMAP_HDRLEN;
717 data = (void *)hdr + NL_MMAP_HDRLEN;
721 skb_reset_tail_pointer(skb);
722 skb->end = skb->tail + size;
725 skb->destructor = netlink_skb_destructor;
726 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
727 NETLINK_CB(skb).sk = sk;
730 static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
731 u32 dst_portid, u32 dst_group,
732 struct scm_cookie *scm)
734 struct netlink_sock *nlk = nlk_sk(sk);
735 struct netlink_ring *ring;
736 struct nl_mmap_hdr *hdr;
739 int err = 0, len = 0;
741 mutex_lock(&nlk->pg_vec_lock);
743 ring = &nlk->tx_ring;
744 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
749 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
751 if (!(msg->msg_flags & MSG_DONTWAIT) &&
752 atomic_read(&nlk->tx_ring.pending))
757 nm_len = ACCESS_ONCE(hdr->nm_len);
758 if (nm_len > maxlen) {
763 netlink_frame_flush_dcache(hdr, nm_len);
765 skb = alloc_skb(nm_len, GFP_KERNEL);
770 __skb_put(skb, nm_len);
771 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
772 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
774 netlink_increment_head(ring);
776 NETLINK_CB(skb).portid = nlk->portid;
777 NETLINK_CB(skb).dst_group = dst_group;
778 NETLINK_CB(skb).creds = scm->creds;
780 err = security_netlink_send(sk, skb);
786 if (unlikely(dst_group)) {
787 atomic_inc(&skb->users);
788 netlink_broadcast(sk, skb, dst_portid, dst_group,
791 err = netlink_unicast(sk, skb, dst_portid,
792 msg->msg_flags & MSG_DONTWAIT);
797 } while (hdr != NULL ||
798 (!(msg->msg_flags & MSG_DONTWAIT) &&
799 atomic_read(&nlk->tx_ring.pending)));
804 mutex_unlock(&nlk->pg_vec_lock);
808 static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
810 struct nl_mmap_hdr *hdr;
812 hdr = netlink_mmap_hdr(skb);
813 hdr->nm_len = skb->len;
814 hdr->nm_group = NETLINK_CB(skb).dst_group;
815 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
816 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
817 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
818 netlink_frame_flush_dcache(hdr, hdr->nm_len);
819 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
821 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
825 static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
827 struct netlink_sock *nlk = nlk_sk(sk);
828 struct netlink_ring *ring = &nlk->rx_ring;
829 struct nl_mmap_hdr *hdr;
831 spin_lock_bh(&sk->sk_receive_queue.lock);
832 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
834 spin_unlock_bh(&sk->sk_receive_queue.lock);
839 netlink_increment_head(ring);
840 __skb_queue_tail(&sk->sk_receive_queue, skb);
841 spin_unlock_bh(&sk->sk_receive_queue.lock);
843 hdr->nm_len = skb->len;
844 hdr->nm_group = NETLINK_CB(skb).dst_group;
845 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
846 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
847 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
848 netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
851 #else /* CONFIG_NETLINK_MMAP */
852 #define netlink_rx_is_mmaped(sk) false
853 #define netlink_tx_is_mmaped(sk) false
854 #define netlink_mmap sock_no_mmap
855 #define netlink_poll datagram_poll
856 #define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, scm) 0
857 #endif /* CONFIG_NETLINK_MMAP */
859 static void netlink_skb_destructor(struct sk_buff *skb)
861 #ifdef CONFIG_NETLINK_MMAP
862 struct nl_mmap_hdr *hdr;
863 struct netlink_ring *ring;
866 /* If a packet from the kernel to userspace was freed because of an
867 * error without being delivered to userspace, the kernel must reset
868 * the status. In the direction userspace to kernel, the status is
869 * always reset here after the packet was processed and freed.
871 if (netlink_skb_is_mmaped(skb)) {
872 hdr = netlink_mmap_hdr(skb);
873 sk = NETLINK_CB(skb).sk;
875 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
876 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
877 ring = &nlk_sk(sk)->tx_ring;
879 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
881 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
883 ring = &nlk_sk(sk)->rx_ring;
886 WARN_ON(atomic_read(&ring->pending) == 0);
887 atomic_dec(&ring->pending);
893 if (is_vmalloc_addr(skb->head)) {
895 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
904 static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
906 WARN_ON(skb->sk != NULL);
908 skb->destructor = netlink_skb_destructor;
909 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
910 sk_mem_charge(sk, skb->truesize);
913 static void netlink_sock_destruct(struct sock *sk)
915 struct netlink_sock *nlk = nlk_sk(sk);
917 if (nlk->cb_running) {
919 nlk->cb.done(&nlk->cb);
921 module_put(nlk->cb.module);
922 kfree_skb(nlk->cb.skb);
925 skb_queue_purge(&sk->sk_receive_queue);
926 #ifdef CONFIG_NETLINK_MMAP
928 struct nl_mmap_req req;
930 memset(&req, 0, sizeof(req));
931 if (nlk->rx_ring.pg_vec)
932 __netlink_set_ring(sk, &req, false, NULL, 0);
933 memset(&req, 0, sizeof(req));
934 if (nlk->tx_ring.pg_vec)
935 __netlink_set_ring(sk, &req, true, NULL, 0);
937 #endif /* CONFIG_NETLINK_MMAP */
939 if (!sock_flag(sk, SOCK_DEAD)) {
940 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
944 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
945 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
946 WARN_ON(nlk_sk(sk)->groups);
949 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
950 * SMP. Look, when several writers sleep and reader wakes them up, all but one
951 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
952 * this, _but_ remember, it adds useless work on UP machines.
955 void netlink_table_grab(void)
956 __acquires(nl_table_lock)
960 write_lock_irq(&nl_table_lock);
962 if (atomic_read(&nl_table_users)) {
963 DECLARE_WAITQUEUE(wait, current);
965 add_wait_queue_exclusive(&nl_table_wait, &wait);
967 set_current_state(TASK_UNINTERRUPTIBLE);
968 if (atomic_read(&nl_table_users) == 0)
970 write_unlock_irq(&nl_table_lock);
972 write_lock_irq(&nl_table_lock);
975 __set_current_state(TASK_RUNNING);
976 remove_wait_queue(&nl_table_wait, &wait);
980 void netlink_table_ungrab(void)
981 __releases(nl_table_lock)
983 write_unlock_irq(&nl_table_lock);
984 wake_up(&nl_table_wait);
988 netlink_lock_table(void)
990 /* read_lock() synchronizes us to netlink_table_grab */
992 read_lock(&nl_table_lock);
993 atomic_inc(&nl_table_users);
994 read_unlock(&nl_table_lock);
998 netlink_unlock_table(void)
1000 if (atomic_dec_and_test(&nl_table_users))
1001 wake_up(&nl_table_wait);
1004 struct netlink_compare_arg
1006 possible_net_t pnet;
1010 /* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
1011 #define netlink_compare_arg_len \
1012 (offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
1014 static inline int netlink_compare(struct rhashtable_compare_arg *arg,
1017 const struct netlink_compare_arg *x = arg->key;
1018 const struct netlink_sock *nlk = ptr;
1020 return nlk->portid != x->portid ||
1021 !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
1024 static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
1025 struct net *net, u32 portid)
1027 memset(arg, 0, sizeof(*arg));
1028 write_pnet(&arg->pnet, net);
1029 arg->portid = portid;
1032 static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
1035 struct netlink_compare_arg arg;
1037 netlink_compare_arg_init(&arg, net, portid);
1038 return rhashtable_lookup_fast(&table->hash, &arg,
1039 netlink_rhashtable_params);
1042 static int __netlink_insert(struct netlink_table *table, struct sock *sk)
1044 struct netlink_compare_arg arg;
1046 netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
1047 return rhashtable_lookup_insert_key(&table->hash, &arg,
1049 netlink_rhashtable_params);
1052 static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
1054 struct netlink_table *table = &nl_table[protocol];
1058 sk = __netlink_lookup(table, portid, net);
1066 static const struct proto_ops netlink_ops;
1069 netlink_update_listeners(struct sock *sk)
1071 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1074 struct listeners *listeners;
1076 listeners = nl_deref_protected(tbl->listeners);
1080 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
1082 sk_for_each_bound(sk, &tbl->mc_list) {
1083 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1084 mask |= nlk_sk(sk)->groups[i];
1086 listeners->masks[i] = mask;
1088 /* this function is only called with the netlink table "grabbed", which
1089 * makes sure updates are visible before bind or setsockopt return. */
1092 static int netlink_insert(struct sock *sk, u32 portid)
1094 struct netlink_table *table = &nl_table[sk->sk_protocol];
1099 err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY;
1100 if (nlk_sk(sk)->bound)
1104 if (BITS_PER_LONG > 32 &&
1105 unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
1108 nlk_sk(sk)->portid = portid;
1111 err = __netlink_insert(table, sk);
1113 /* In case the hashtable backend returns with -EBUSY
1114 * from here, it must not escape to the caller.
1116 if (unlikely(err == -EBUSY))
1123 /* We need to ensure that the socket is hashed and visible. */
1125 nlk_sk(sk)->bound = portid;
1132 static void netlink_remove(struct sock *sk)
1134 struct netlink_table *table;
1136 table = &nl_table[sk->sk_protocol];
1137 if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
1138 netlink_rhashtable_params)) {
1139 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
1143 netlink_table_grab();
1144 if (nlk_sk(sk)->subscriptions) {
1145 __sk_del_bind_node(sk);
1146 netlink_update_listeners(sk);
1148 if (sk->sk_protocol == NETLINK_GENERIC)
1149 atomic_inc(&genl_sk_destructing_cnt);
1150 netlink_table_ungrab();
1153 static struct proto netlink_proto = {
1155 .owner = THIS_MODULE,
1156 .obj_size = sizeof(struct netlink_sock),
1159 static int __netlink_create(struct net *net, struct socket *sock,
1160 struct mutex *cb_mutex, int protocol)
1163 struct netlink_sock *nlk;
1165 sock->ops = &netlink_ops;
1167 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
1171 sock_init_data(sock, sk);
1175 nlk->cb_mutex = cb_mutex;
1177 nlk->cb_mutex = &nlk->cb_def_mutex;
1178 mutex_init(nlk->cb_mutex);
1180 init_waitqueue_head(&nlk->wait);
1181 #ifdef CONFIG_NETLINK_MMAP
1182 mutex_init(&nlk->pg_vec_lock);
1185 sk->sk_destruct = netlink_sock_destruct;
1186 sk->sk_protocol = protocol;
1190 static int netlink_create(struct net *net, struct socket *sock, int protocol,
1193 struct module *module = NULL;
1194 struct mutex *cb_mutex;
1195 struct netlink_sock *nlk;
1196 int (*bind)(struct net *net, int group);
1197 void (*unbind)(struct net *net, int group);
1200 sock->state = SS_UNCONNECTED;
1202 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1203 return -ESOCKTNOSUPPORT;
1205 if (protocol < 0 || protocol >= MAX_LINKS)
1206 return -EPROTONOSUPPORT;
1208 netlink_lock_table();
1209 #ifdef CONFIG_MODULES
1210 if (!nl_table[protocol].registered) {
1211 netlink_unlock_table();
1212 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
1213 netlink_lock_table();
1216 if (nl_table[protocol].registered &&
1217 try_module_get(nl_table[protocol].module))
1218 module = nl_table[protocol].module;
1220 err = -EPROTONOSUPPORT;
1221 cb_mutex = nl_table[protocol].cb_mutex;
1222 bind = nl_table[protocol].bind;
1223 unbind = nl_table[protocol].unbind;
1224 netlink_unlock_table();
1229 err = __netlink_create(net, sock, cb_mutex, protocol);
1234 sock_prot_inuse_add(net, &netlink_proto, 1);
1237 nlk = nlk_sk(sock->sk);
1238 nlk->module = module;
1239 nlk->netlink_bind = bind;
1240 nlk->netlink_unbind = unbind;
1249 static void deferred_put_nlk_sk(struct rcu_head *head)
1251 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
1256 static int netlink_release(struct socket *sock)
1258 struct sock *sk = sock->sk;
1259 struct netlink_sock *nlk;
1269 * OK. Socket is unlinked, any packets that arrive now
1273 /* must not acquire netlink_table_lock in any way again before unbind
1274 * and notifying genetlink is done as otherwise it might deadlock
1276 if (nlk->netlink_unbind) {
1279 for (i = 0; i < nlk->ngroups; i++)
1280 if (test_bit(i, nlk->groups))
1281 nlk->netlink_unbind(sock_net(sk), i + 1);
1283 if (sk->sk_protocol == NETLINK_GENERIC &&
1284 atomic_dec_return(&genl_sk_destructing_cnt) == 0)
1285 wake_up(&genl_sk_destructing_waitq);
1288 wake_up_interruptible_all(&nlk->wait);
1290 skb_queue_purge(&sk->sk_write_queue);
1293 struct netlink_notify n = {
1294 .net = sock_net(sk),
1295 .protocol = sk->sk_protocol,
1296 .portid = nlk->portid,
1298 atomic_notifier_call_chain(&netlink_chain,
1299 NETLINK_URELEASE, &n);
1302 module_put(nlk->module);
1304 if (netlink_is_kernel(sk)) {
1305 netlink_table_grab();
1306 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1307 if (--nl_table[sk->sk_protocol].registered == 0) {
1308 struct listeners *old;
1310 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1311 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1312 kfree_rcu(old, rcu);
1313 nl_table[sk->sk_protocol].module = NULL;
1314 nl_table[sk->sk_protocol].bind = NULL;
1315 nl_table[sk->sk_protocol].unbind = NULL;
1316 nl_table[sk->sk_protocol].flags = 0;
1317 nl_table[sk->sk_protocol].registered = 0;
1319 netlink_table_ungrab();
1326 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
1328 call_rcu(&nlk->rcu, deferred_put_nlk_sk);
1332 static int netlink_autobind(struct socket *sock)
1334 struct sock *sk = sock->sk;
1335 struct net *net = sock_net(sk);
1336 struct netlink_table *table = &nl_table[sk->sk_protocol];
1337 s32 portid = task_tgid_vnr(current);
1339 static s32 rover = -4097;
1344 if (__netlink_lookup(table, portid, net)) {
1345 /* Bind collision, search negative portid values. */
1354 err = netlink_insert(sk, portid);
1355 if (err == -EADDRINUSE)
1358 /* If 2 threads race to autobind, that is fine. */
1366 * __netlink_ns_capable - General netlink message capability test
1367 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1368 * @user_ns: The user namespace of the capability to use
1369 * @cap: The capability to use
1371 * Test to see if the opener of the socket we received the message
1372 * from had when the netlink socket was created and the sender of the
1373 * message has has the capability @cap in the user namespace @user_ns.
1375 bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1376 struct user_namespace *user_ns, int cap)
1378 return ((nsp->flags & NETLINK_SKB_DST) ||
1379 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1380 ns_capable(user_ns, cap);
1382 EXPORT_SYMBOL(__netlink_ns_capable);
1385 * netlink_ns_capable - General netlink message capability test
1386 * @skb: socket buffer holding a netlink command from userspace
1387 * @user_ns: The user namespace of the capability to use
1388 * @cap: The capability to use
1390 * Test to see if the opener of the socket we received the message
1391 * from had when the netlink socket was created and the sender of the
1392 * message has has the capability @cap in the user namespace @user_ns.
1394 bool netlink_ns_capable(const struct sk_buff *skb,
1395 struct user_namespace *user_ns, int cap)
1397 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
1399 EXPORT_SYMBOL(netlink_ns_capable);
1402 * netlink_capable - Netlink global message capability test
1403 * @skb: socket buffer holding a netlink command from userspace
1404 * @cap: The capability to use
1406 * Test to see if the opener of the socket we received the message
1407 * from had when the netlink socket was created and the sender of the
1408 * message has has the capability @cap in all user namespaces.
1410 bool netlink_capable(const struct sk_buff *skb, int cap)
1412 return netlink_ns_capable(skb, &init_user_ns, cap);
1414 EXPORT_SYMBOL(netlink_capable);
1417 * netlink_net_capable - Netlink network namespace message capability test
1418 * @skb: socket buffer holding a netlink command from userspace
1419 * @cap: The capability to use
1421 * Test to see if the opener of the socket we received the message
1422 * from had when the netlink socket was created and the sender of the
1423 * message has has the capability @cap over the network namespace of
1424 * the socket we received the message from.
1426 bool netlink_net_capable(const struct sk_buff *skb, int cap)
1428 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
1430 EXPORT_SYMBOL(netlink_net_capable);
1432 static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
1434 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
1435 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
1439 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1441 struct netlink_sock *nlk = nlk_sk(sk);
1443 if (nlk->subscriptions && !subscriptions)
1444 __sk_del_bind_node(sk);
1445 else if (!nlk->subscriptions && subscriptions)
1446 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1447 nlk->subscriptions = subscriptions;
1450 static int netlink_realloc_groups(struct sock *sk)
1452 struct netlink_sock *nlk = nlk_sk(sk);
1453 unsigned int groups;
1454 unsigned long *new_groups;
1457 netlink_table_grab();
1459 groups = nl_table[sk->sk_protocol].groups;
1460 if (!nl_table[sk->sk_protocol].registered) {
1465 if (nlk->ngroups >= groups)
1468 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1469 if (new_groups == NULL) {
1473 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
1474 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1476 nlk->groups = new_groups;
1477 nlk->ngroups = groups;
1479 netlink_table_ungrab();
1483 static void netlink_undo_bind(int group, long unsigned int groups,
1486 struct netlink_sock *nlk = nlk_sk(sk);
1489 if (!nlk->netlink_unbind)
1492 for (undo = 0; undo < group; undo++)
1493 if (test_bit(undo, &groups))
1494 nlk->netlink_unbind(sock_net(sk), undo + 1);
1497 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1500 struct sock *sk = sock->sk;
1501 struct net *net = sock_net(sk);
1502 struct netlink_sock *nlk = nlk_sk(sk);
1503 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1505 long unsigned int groups = nladdr->nl_groups;
1508 if (addr_len < sizeof(struct sockaddr_nl))
1511 if (nladdr->nl_family != AF_NETLINK)
1514 /* Only superuser is allowed to listen multicasts */
1516 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1518 err = netlink_realloc_groups(sk);
1525 /* Ensure nlk->portid is up-to-date. */
1528 if (nladdr->nl_pid != nlk->portid)
1532 if (nlk->netlink_bind && groups) {
1535 for (group = 0; group < nlk->ngroups; group++) {
1536 if (!test_bit(group, &groups))
1538 err = nlk->netlink_bind(net, group + 1);
1541 netlink_undo_bind(group, groups, sk);
1546 /* No need for barriers here as we return to user-space without
1547 * using any of the bound attributes.
1550 err = nladdr->nl_pid ?
1551 netlink_insert(sk, nladdr->nl_pid) :
1552 netlink_autobind(sock);
1554 netlink_undo_bind(nlk->ngroups, groups, sk);
1559 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1562 netlink_table_grab();
1563 netlink_update_subscriptions(sk, nlk->subscriptions +
1565 hweight32(nlk->groups[0]));
1566 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
1567 netlink_update_listeners(sk);
1568 netlink_table_ungrab();
1573 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1574 int alen, int flags)
1577 struct sock *sk = sock->sk;
1578 struct netlink_sock *nlk = nlk_sk(sk);
1579 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1581 if (alen < sizeof(addr->sa_family))
1584 if (addr->sa_family == AF_UNSPEC) {
1585 sk->sk_state = NETLINK_UNCONNECTED;
1586 nlk->dst_portid = 0;
1590 if (addr->sa_family != AF_NETLINK)
1593 if ((nladdr->nl_groups || nladdr->nl_pid) &&
1594 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1597 /* No need for barriers here as we return to user-space without
1598 * using any of the bound attributes.
1601 err = netlink_autobind(sock);
1604 sk->sk_state = NETLINK_CONNECTED;
1605 nlk->dst_portid = nladdr->nl_pid;
1606 nlk->dst_group = ffs(nladdr->nl_groups);
1612 static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1613 int *addr_len, int peer)
1615 struct sock *sk = sock->sk;
1616 struct netlink_sock *nlk = nlk_sk(sk);
1617 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
1619 nladdr->nl_family = AF_NETLINK;
1621 *addr_len = sizeof(*nladdr);
1624 nladdr->nl_pid = nlk->dst_portid;
1625 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1627 nladdr->nl_pid = nlk->portid;
1628 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1633 static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1636 struct netlink_sock *nlk;
1638 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1640 return ERR_PTR(-ECONNREFUSED);
1642 /* Don't bother queuing skb if kernel socket has no input function */
1644 if (sock->sk_state == NETLINK_CONNECTED &&
1645 nlk->dst_portid != nlk_sk(ssk)->portid) {
1647 return ERR_PTR(-ECONNREFUSED);
1652 struct sock *netlink_getsockbyfilp(struct file *filp)
1654 struct inode *inode = file_inode(filp);
1657 if (!S_ISSOCK(inode->i_mode))
1658 return ERR_PTR(-ENOTSOCK);
1660 sock = SOCKET_I(inode)->sk;
1661 if (sock->sk_family != AF_NETLINK)
1662 return ERR_PTR(-EINVAL);
1668 static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1671 struct sk_buff *skb;
1674 if (size <= NLMSG_GOODSIZE || broadcast)
1675 return alloc_skb(size, GFP_KERNEL);
1677 size = SKB_DATA_ALIGN(size) +
1678 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1680 data = vmalloc(size);
1684 skb = __build_skb(data, size);
1688 skb->destructor = netlink_skb_destructor;
1694 * Attach a skb to a netlink socket.
1695 * The caller must hold a reference to the destination socket. On error, the
1696 * reference is dropped. The skb is not send to the destination, just all
1697 * all error checks are performed and memory in the queue is reserved.
1699 * < 0: error. skb freed, reference to sock dropped.
1701 * 1: repeat lookup - reference dropped while waiting for socket memory.
1703 int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
1704 long *timeo, struct sock *ssk)
1706 struct netlink_sock *nlk;
1710 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1711 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1712 !netlink_skb_is_mmaped(skb)) {
1713 DECLARE_WAITQUEUE(wait, current);
1715 if (!ssk || netlink_is_kernel(ssk))
1716 netlink_overrun(sk);
1722 __set_current_state(TASK_INTERRUPTIBLE);
1723 add_wait_queue(&nlk->wait, &wait);
1725 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1726 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1727 !sock_flag(sk, SOCK_DEAD))
1728 *timeo = schedule_timeout(*timeo);
1730 __set_current_state(TASK_RUNNING);
1731 remove_wait_queue(&nlk->wait, &wait);
1734 if (signal_pending(current)) {
1736 return sock_intr_errno(*timeo);
1740 netlink_skb_set_owner_r(skb, sk);
1744 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1748 netlink_deliver_tap(skb);
1750 #ifdef CONFIG_NETLINK_MMAP
1751 if (netlink_skb_is_mmaped(skb))
1752 netlink_queue_mmaped_skb(sk, skb);
1753 else if (netlink_rx_is_mmaped(sk))
1754 netlink_ring_set_copied(sk, skb);
1756 #endif /* CONFIG_NETLINK_MMAP */
1757 skb_queue_tail(&sk->sk_receive_queue, skb);
1758 sk->sk_data_ready(sk);
1762 int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1764 int len = __netlink_sendskb(sk, skb);
1770 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1776 static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1780 WARN_ON(skb->sk != NULL);
1781 if (netlink_skb_is_mmaped(skb))
1784 delta = skb->end - skb->tail;
1785 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1788 if (skb_shared(skb)) {
1789 struct sk_buff *nskb = skb_clone(skb, allocation);
1796 if (!pskb_expand_head(skb, 0, -delta, allocation))
1797 skb->truesize -= delta;
1802 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1806 struct netlink_sock *nlk = nlk_sk(sk);
1808 ret = -ECONNREFUSED;
1809 if (nlk->netlink_rcv != NULL) {
1811 netlink_skb_set_owner_r(skb, sk);
1812 NETLINK_CB(skb).sk = ssk;
1813 netlink_deliver_tap_kernel(sk, ssk, skb);
1814 nlk->netlink_rcv(skb);
1823 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
1824 u32 portid, int nonblock)
1830 skb = netlink_trim(skb, gfp_any());
1832 timeo = sock_sndtimeo(ssk, nonblock);
1834 sk = netlink_getsockbyportid(ssk, portid);
1839 if (netlink_is_kernel(sk))
1840 return netlink_unicast_kernel(sk, skb, ssk);
1842 if (sk_filter(sk, skb)) {
1849 err = netlink_attachskb(sk, skb, &timeo, ssk);
1855 return netlink_sendskb(sk, skb);
1857 EXPORT_SYMBOL(netlink_unicast);
1859 struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1860 u32 dst_portid, gfp_t gfp_mask)
1862 #ifdef CONFIG_NETLINK_MMAP
1863 struct sock *sk = NULL;
1864 struct sk_buff *skb;
1865 struct netlink_ring *ring;
1866 struct nl_mmap_hdr *hdr;
1867 unsigned int maxlen;
1869 sk = netlink_getsockbyportid(ssk, dst_portid);
1873 ring = &nlk_sk(sk)->rx_ring;
1874 /* fast-path without atomic ops for common case: non-mmaped receiver */
1875 if (ring->pg_vec == NULL)
1878 if (ring->frame_size - NL_MMAP_HDRLEN < size)
1881 skb = alloc_skb_head(gfp_mask);
1885 spin_lock_bh(&sk->sk_receive_queue.lock);
1886 /* check again under lock */
1887 if (ring->pg_vec == NULL)
1890 /* check again under lock */
1891 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1895 netlink_forward_ring(ring);
1896 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1899 netlink_ring_setup_skb(skb, sk, ring, hdr);
1900 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1901 atomic_inc(&ring->pending);
1902 netlink_increment_head(ring);
1904 spin_unlock_bh(&sk->sk_receive_queue.lock);
1909 spin_unlock_bh(&sk->sk_receive_queue.lock);
1910 netlink_overrun(sk);
1917 spin_unlock_bh(&sk->sk_receive_queue.lock);
1922 return alloc_skb(size, gfp_mask);
1924 EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1926 int netlink_has_listeners(struct sock *sk, unsigned int group)
1929 struct listeners *listeners;
1931 BUG_ON(!netlink_is_kernel(sk));
1934 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1936 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
1937 res = test_bit(group - 1, listeners->masks);
1943 EXPORT_SYMBOL_GPL(netlink_has_listeners);
1945 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1947 struct netlink_sock *nlk = nlk_sk(sk);
1949 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
1950 !test_bit(NETLINK_CONGESTED, &nlk->state)) {
1951 netlink_skb_set_owner_r(skb, sk);
1952 __netlink_sendskb(sk, skb);
1953 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1958 struct netlink_broadcast_data {
1959 struct sock *exclude_sk;
1964 int delivery_failure;
1968 struct sk_buff *skb, *skb2;
1969 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1973 static void do_one_broadcast(struct sock *sk,
1974 struct netlink_broadcast_data *p)
1976 struct netlink_sock *nlk = nlk_sk(sk);
1979 if (p->exclude_sk == sk)
1982 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1983 !test_bit(p->group - 1, nlk->groups))
1986 if (!net_eq(sock_net(sk), p->net))
1990 netlink_overrun(sk);
1995 if (p->skb2 == NULL) {
1996 if (skb_shared(p->skb)) {
1997 p->skb2 = skb_clone(p->skb, p->allocation);
1999 p->skb2 = skb_get(p->skb);
2001 * skb ownership may have been set when
2002 * delivered to a previous socket.
2004 skb_orphan(p->skb2);
2007 if (p->skb2 == NULL) {
2008 netlink_overrun(sk);
2009 /* Clone failed. Notify ALL listeners. */
2011 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
2012 p->delivery_failure = 1;
2013 } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
2016 } else if (sk_filter(sk, p->skb2)) {
2019 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
2020 netlink_overrun(sk);
2021 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
2022 p->delivery_failure = 1;
2024 p->congested |= val;
2031 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
2032 u32 group, gfp_t allocation,
2033 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
2036 struct net *net = sock_net(ssk);
2037 struct netlink_broadcast_data info;
2040 skb = netlink_trim(skb, allocation);
2042 info.exclude_sk = ssk;
2044 info.portid = portid;
2047 info.delivery_failure = 0;
2050 info.allocation = allocation;
2053 info.tx_filter = filter;
2054 info.tx_data = filter_data;
2056 /* While we sleep in clone, do not allow to change socket list */
2058 netlink_lock_table();
2060 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
2061 do_one_broadcast(sk, &info);
2065 netlink_unlock_table();
2067 if (info.delivery_failure) {
2068 kfree_skb(info.skb2);
2071 consume_skb(info.skb2);
2073 if (info.delivered) {
2074 if (info.congested && (allocation & __GFP_WAIT))
2080 EXPORT_SYMBOL(netlink_broadcast_filtered);
2082 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
2083 u32 group, gfp_t allocation)
2085 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
2088 EXPORT_SYMBOL(netlink_broadcast);
2090 struct netlink_set_err_data {
2091 struct sock *exclude_sk;
2097 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
2099 struct netlink_sock *nlk = nlk_sk(sk);
2102 if (sk == p->exclude_sk)
2105 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
2108 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
2109 !test_bit(p->group - 1, nlk->groups))
2112 if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
2117 sk->sk_err = p->code;
2118 sk->sk_error_report(sk);
2124 * netlink_set_err - report error to broadcast listeners
2125 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
2126 * @portid: the PORTID of a process that we want to skip (if any)
2127 * @group: the broadcast group that will notice the error
2128 * @code: error code, must be negative (as usual in kernelspace)
2130 * This function returns the number of broadcast listeners that have set the
2131 * NETLINK_RECV_NO_ENOBUFS socket option.
2133 int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
2135 struct netlink_set_err_data info;
2139 info.exclude_sk = ssk;
2140 info.portid = portid;
2142 /* sk->sk_err wants a positive error value */
2145 read_lock(&nl_table_lock);
2147 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
2148 ret += do_one_set_err(sk, &info);
2150 read_unlock(&nl_table_lock);
2153 EXPORT_SYMBOL(netlink_set_err);
2155 /* must be called with netlink table grabbed */
2156 static void netlink_update_socket_mc(struct netlink_sock *nlk,
2160 int old, new = !!is_new, subscriptions;
2162 old = test_bit(group - 1, nlk->groups);
2163 subscriptions = nlk->subscriptions - old + new;
2165 __set_bit(group - 1, nlk->groups);
2167 __clear_bit(group - 1, nlk->groups);
2168 netlink_update_subscriptions(&nlk->sk, subscriptions);
2169 netlink_update_listeners(&nlk->sk);
2172 static int netlink_setsockopt(struct socket *sock, int level, int optname,
2173 char __user *optval, unsigned int optlen)
2175 struct sock *sk = sock->sk;
2176 struct netlink_sock *nlk = nlk_sk(sk);
2177 unsigned int val = 0;
2180 if (level != SOL_NETLINK)
2181 return -ENOPROTOOPT;
2183 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2184 optlen >= sizeof(int) &&
2185 get_user(val, (unsigned int __user *)optval))
2189 case NETLINK_PKTINFO:
2191 nlk->flags |= NETLINK_RECV_PKTINFO;
2193 nlk->flags &= ~NETLINK_RECV_PKTINFO;
2196 case NETLINK_ADD_MEMBERSHIP:
2197 case NETLINK_DROP_MEMBERSHIP: {
2198 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
2200 err = netlink_realloc_groups(sk);
2203 if (!val || val - 1 >= nlk->ngroups)
2205 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
2206 err = nlk->netlink_bind(sock_net(sk), val);
2210 netlink_table_grab();
2211 netlink_update_socket_mc(nlk, val,
2212 optname == NETLINK_ADD_MEMBERSHIP);
2213 netlink_table_ungrab();
2214 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
2215 nlk->netlink_unbind(sock_net(sk), val);
2220 case NETLINK_BROADCAST_ERROR:
2222 nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
2224 nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
2227 case NETLINK_NO_ENOBUFS:
2229 nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
2230 clear_bit(NETLINK_CONGESTED, &nlk->state);
2231 wake_up_interruptible(&nlk->wait);
2233 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
2237 #ifdef CONFIG_NETLINK_MMAP
2238 case NETLINK_RX_RING:
2239 case NETLINK_TX_RING: {
2240 struct nl_mmap_req req;
2242 /* Rings might consume more memory than queue limits, require
2245 if (!capable(CAP_NET_ADMIN))
2247 if (optlen < sizeof(req))
2249 if (copy_from_user(&req, optval, sizeof(req)))
2251 err = netlink_set_ring(sk, &req,
2252 optname == NETLINK_TX_RING);
2255 #endif /* CONFIG_NETLINK_MMAP */
2262 static int netlink_getsockopt(struct socket *sock, int level, int optname,
2263 char __user *optval, int __user *optlen)
2265 struct sock *sk = sock->sk;
2266 struct netlink_sock *nlk = nlk_sk(sk);
2269 if (level != SOL_NETLINK)
2270 return -ENOPROTOOPT;
2272 if (get_user(len, optlen))
2278 case NETLINK_PKTINFO:
2279 if (len < sizeof(int))
2282 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
2283 if (put_user(len, optlen) ||
2284 put_user(val, optval))
2288 case NETLINK_BROADCAST_ERROR:
2289 if (len < sizeof(int))
2292 val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
2293 if (put_user(len, optlen) ||
2294 put_user(val, optval))
2298 case NETLINK_NO_ENOBUFS:
2299 if (len < sizeof(int))
2302 val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
2303 if (put_user(len, optlen) ||
2304 put_user(val, optval))
2314 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2316 struct nl_pktinfo info;
2318 info.group = NETLINK_CB(skb).dst_group;
2319 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2322 static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2324 struct sock *sk = sock->sk;
2325 struct netlink_sock *nlk = nlk_sk(sk);
2326 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
2329 struct sk_buff *skb;
2331 struct scm_cookie scm;
2332 u32 netlink_skb_flags = 0;
2334 if (msg->msg_flags&MSG_OOB)
2337 err = scm_send(sock, msg, &scm, true);
2341 if (msg->msg_namelen) {
2343 if (addr->nl_family != AF_NETLINK)
2345 dst_portid = addr->nl_pid;
2346 dst_group = ffs(addr->nl_groups);
2348 if ((dst_group || dst_portid) &&
2349 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
2351 netlink_skb_flags |= NETLINK_SKB_DST;
2353 dst_portid = nlk->dst_portid;
2354 dst_group = nlk->dst_group;
2358 err = netlink_autobind(sock);
2362 /* Ensure nlk is hashed and visible. */
2366 /* It's a really convoluted way for userland to ask for mmaped
2367 * sendmsg(), but that's what we've got...
2369 if (netlink_tx_is_mmaped(sk) &&
2370 msg->msg_iter.type == ITER_IOVEC &&
2371 msg->msg_iter.nr_segs == 1 &&
2372 msg->msg_iter.iov->iov_base == NULL) {
2373 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
2379 if (len > sk->sk_sndbuf - 32)
2382 skb = netlink_alloc_large_skb(len, dst_group);
2386 NETLINK_CB(skb).portid = nlk->portid;
2387 NETLINK_CB(skb).dst_group = dst_group;
2388 NETLINK_CB(skb).creds = scm.creds;
2389 NETLINK_CB(skb).flags = netlink_skb_flags;
2392 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
2397 err = security_netlink_send(sk, skb);
2404 atomic_inc(&skb->users);
2405 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
2407 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
2414 static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2417 struct scm_cookie scm;
2418 struct sock *sk = sock->sk;
2419 struct netlink_sock *nlk = nlk_sk(sk);
2420 int noblock = flags&MSG_DONTWAIT;
2422 struct sk_buff *skb, *data_skb;
2430 skb = skb_recv_datagram(sk, flags, noblock, &err);
2436 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2437 if (unlikely(skb_shinfo(skb)->frag_list)) {
2439 * If this skb has a frag_list, then here that means that we
2440 * will have to use the frag_list skb's data for compat tasks
2441 * and the regular skb's data for normal (non-compat) tasks.
2443 * If we need to send the compat skb, assign it to the
2444 * 'data_skb' variable so that it will be used below for data
2445 * copying. We keep 'skb' for everything else, including
2446 * freeing both later.
2448 if (flags & MSG_CMSG_COMPAT)
2449 data_skb = skb_shinfo(skb)->frag_list;
2453 /* Record the max length of recvmsg() calls for future allocations */
2454 nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
2455 nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
2458 copied = data_skb->len;
2460 msg->msg_flags |= MSG_TRUNC;
2464 skb_reset_transport_header(data_skb);
2465 err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
2467 if (msg->msg_name) {
2468 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
2469 addr->nl_family = AF_NETLINK;
2471 addr->nl_pid = NETLINK_CB(skb).portid;
2472 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
2473 msg->msg_namelen = sizeof(*addr);
2476 if (nlk->flags & NETLINK_RECV_PKTINFO)
2477 netlink_cmsg_recv_pktinfo(msg, skb);
2479 memset(&scm, 0, sizeof(scm));
2480 scm.creds = *NETLINK_CREDS(skb);
2481 if (flags & MSG_TRUNC)
2482 copied = data_skb->len;
2484 skb_free_datagram(sk, skb);
2486 if (nlk->cb_running &&
2487 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
2488 ret = netlink_dump(sk);
2491 sk->sk_error_report(sk);
2495 scm_recv(sock, msg, &scm, flags);
2497 netlink_rcv_wake(sk);
2498 return err ? : copied;
2501 static void netlink_data_ready(struct sock *sk)
2507 * We export these functions to other modules. They provide a
2508 * complete set of kernel non-blocking support for message
2513 __netlink_kernel_create(struct net *net, int unit, struct module *module,
2514 struct netlink_kernel_cfg *cfg)
2516 struct socket *sock;
2518 struct netlink_sock *nlk;
2519 struct listeners *listeners = NULL;
2520 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2521 unsigned int groups;
2525 if (unit < 0 || unit >= MAX_LINKS)
2528 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2532 * We have to just have a reference on the net from sk, but don't
2533 * get_net it. Besides, we cannot get and then put the net here.
2534 * So we create one inside init_net and the move it to net.
2537 if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
2538 goto out_sock_release_nosk;
2541 sk_change_net(sk, net);
2543 if (!cfg || cfg->groups < 32)
2546 groups = cfg->groups;
2548 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2550 goto out_sock_release;
2552 sk->sk_data_ready = netlink_data_ready;
2553 if (cfg && cfg->input)
2554 nlk_sk(sk)->netlink_rcv = cfg->input;
2556 if (netlink_insert(sk, 0))
2557 goto out_sock_release;
2560 nlk->flags |= NETLINK_KERNEL_SOCKET;
2562 netlink_table_grab();
2563 if (!nl_table[unit].registered) {
2564 nl_table[unit].groups = groups;
2565 rcu_assign_pointer(nl_table[unit].listeners, listeners);
2566 nl_table[unit].cb_mutex = cb_mutex;
2567 nl_table[unit].module = module;
2569 nl_table[unit].bind = cfg->bind;
2570 nl_table[unit].unbind = cfg->unbind;
2571 nl_table[unit].flags = cfg->flags;
2573 nl_table[unit].compare = cfg->compare;
2575 nl_table[unit].registered = 1;
2578 nl_table[unit].registered++;
2580 netlink_table_ungrab();
2585 netlink_kernel_release(sk);
2588 out_sock_release_nosk:
2592 EXPORT_SYMBOL(__netlink_kernel_create);
2595 netlink_kernel_release(struct sock *sk)
2597 sk_release_kernel(sk);
2599 EXPORT_SYMBOL(netlink_kernel_release);
2601 int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
2603 struct listeners *new, *old;
2604 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
2609 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
2610 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2613 old = nl_deref_protected(tbl->listeners);
2614 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2615 rcu_assign_pointer(tbl->listeners, new);
2617 kfree_rcu(old, rcu);
2619 tbl->groups = groups;
2625 * netlink_change_ngroups - change number of multicast groups
2627 * This changes the number of multicast groups that are available
2628 * on a certain netlink family. Note that it is not possible to
2629 * change the number of groups to below 32. Also note that it does
2630 * not implicitly call netlink_clear_multicast_users() when the
2631 * number of groups is reduced.
2633 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2634 * @groups: The new number of groups.
2636 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2640 netlink_table_grab();
2641 err = __netlink_change_ngroups(sk, groups);
2642 netlink_table_ungrab();
2647 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2650 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2652 sk_for_each_bound(sk, &tbl->mc_list)
2653 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2657 __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
2659 struct nlmsghdr *nlh;
2660 int size = nlmsg_msg_size(len);
2662 nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
2663 nlh->nlmsg_type = type;
2664 nlh->nlmsg_len = size;
2665 nlh->nlmsg_flags = flags;
2666 nlh->nlmsg_pid = portid;
2667 nlh->nlmsg_seq = seq;
2668 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
2669 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
2672 EXPORT_SYMBOL(__nlmsg_put);
2675 * It looks a bit ugly.
2676 * It would be better to create kernel thread.
2679 static int netlink_dump(struct sock *sk)
2681 struct netlink_sock *nlk = nlk_sk(sk);
2682 struct netlink_callback *cb;
2683 struct sk_buff *skb = NULL;
2684 struct nlmsghdr *nlh;
2685 int len, err = -ENOBUFS;
2688 mutex_lock(nlk->cb_mutex);
2689 if (!nlk->cb_running) {
2695 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2697 if (!netlink_rx_is_mmaped(sk) &&
2698 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2701 /* NLMSG_GOODSIZE is small to avoid high order allocations being
2702 * required, but it makes sense to _attempt_ a 16K bytes allocation
2703 * to reduce number of system calls on dump operations, if user
2704 * ever provided a big enough buffer.
2706 if (alloc_size < nlk->max_recvmsg_len) {
2707 skb = netlink_alloc_skb(sk,
2708 nlk->max_recvmsg_len,
2713 /* available room should be exact amount to avoid MSG_TRUNC */
2715 skb_reserve(skb, skb_tailroom(skb) -
2716 nlk->max_recvmsg_len);
2719 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2723 netlink_skb_set_owner_r(skb, sk);
2725 len = cb->dump(skb, cb);
2728 mutex_unlock(nlk->cb_mutex);
2730 if (sk_filter(sk, skb))
2733 __netlink_sendskb(sk, skb);
2737 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2741 nl_dump_check_consistent(cb, nlh);
2743 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2745 if (sk_filter(sk, skb))
2748 __netlink_sendskb(sk, skb);
2753 nlk->cb_running = false;
2754 mutex_unlock(nlk->cb_mutex);
2755 module_put(cb->module);
2756 consume_skb(cb->skb);
2760 mutex_unlock(nlk->cb_mutex);
2765 int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2766 const struct nlmsghdr *nlh,
2767 struct netlink_dump_control *control)
2769 struct netlink_callback *cb;
2771 struct netlink_sock *nlk;
2774 /* Memory mapped dump requests need to be copied to avoid looping
2775 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2776 * a reference to the skb.
2778 if (netlink_skb_is_mmaped(skb)) {
2779 skb = skb_copy(skb, GFP_KERNEL);
2783 atomic_inc(&skb->users);
2785 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2787 ret = -ECONNREFUSED;
2792 mutex_lock(nlk->cb_mutex);
2793 /* A dump is in progress... */
2794 if (nlk->cb_running) {
2798 /* add reference of module which cb->dump belongs to */
2799 if (!try_module_get(control->module)) {
2800 ret = -EPROTONOSUPPORT;
2805 memset(cb, 0, sizeof(*cb));
2806 cb->dump = control->dump;
2807 cb->done = control->done;
2809 cb->data = control->data;
2810 cb->module = control->module;
2811 cb->min_dump_alloc = control->min_dump_alloc;
2814 nlk->cb_running = true;
2816 mutex_unlock(nlk->cb_mutex);
2818 ret = netlink_dump(sk);
2824 /* We successfully started a dump, by returning -EINTR we
2825 * signal not to send ACK even if it was requested.
2831 mutex_unlock(nlk->cb_mutex);
2836 EXPORT_SYMBOL(__netlink_dump_start);
2838 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2840 struct sk_buff *skb;
2841 struct nlmsghdr *rep;
2842 struct nlmsgerr *errmsg;
2843 size_t payload = sizeof(*errmsg);
2845 /* error messages get the original request appened */
2847 payload += nlmsg_len(nlh);
2849 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2850 NETLINK_CB(in_skb).portid, GFP_KERNEL);
2854 sk = netlink_lookup(sock_net(in_skb->sk),
2855 in_skb->sk->sk_protocol,
2856 NETLINK_CB(in_skb).portid);
2858 sk->sk_err = ENOBUFS;
2859 sk->sk_error_report(sk);
2865 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2866 NLMSG_ERROR, payload, 0);
2867 errmsg = nlmsg_data(rep);
2868 errmsg->error = err;
2869 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
2870 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
2872 EXPORT_SYMBOL(netlink_ack);
2874 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2877 struct nlmsghdr *nlh;
2880 while (skb->len >= nlmsg_total_size(0)) {
2883 nlh = nlmsg_hdr(skb);
2886 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
2889 /* Only requests are handled by the kernel */
2890 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
2893 /* Skip control messages */
2894 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
2902 if (nlh->nlmsg_flags & NLM_F_ACK || err)
2903 netlink_ack(skb, nlh, err);
2906 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
2907 if (msglen > skb->len)
2909 skb_pull(skb, msglen);
2914 EXPORT_SYMBOL(netlink_rcv_skb);
2917 * nlmsg_notify - send a notification netlink message
2918 * @sk: netlink socket to use
2919 * @skb: notification message
2920 * @portid: destination netlink portid for reports or 0
2921 * @group: destination multicast group or 0
2922 * @report: 1 to report back, 0 to disable
2923 * @flags: allocation flags
2925 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
2926 unsigned int group, int report, gfp_t flags)
2931 int exclude_portid = 0;
2934 atomic_inc(&skb->users);
2935 exclude_portid = portid;
2938 /* errors reported via destination sk->sk_err, but propagate
2939 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
2940 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
2946 err2 = nlmsg_unicast(sk, skb, portid);
2947 if (!err || err == -ESRCH)
2953 EXPORT_SYMBOL(nlmsg_notify);
2955 #ifdef CONFIG_PROC_FS
2956 struct nl_seq_iter {
2957 struct seq_net_private p;
2958 struct rhashtable_iter hti;
2962 static int netlink_walk_start(struct nl_seq_iter *iter)
2966 err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti);
2968 iter->link = MAX_LINKS;
2972 err = rhashtable_walk_start(&iter->hti);
2973 return err == -EAGAIN ? 0 : err;
2976 static void netlink_walk_stop(struct nl_seq_iter *iter)
2978 rhashtable_walk_stop(&iter->hti);
2979 rhashtable_walk_exit(&iter->hti);
2982 static void *__netlink_seq_next(struct seq_file *seq)
2984 struct nl_seq_iter *iter = seq->private;
2985 struct netlink_sock *nlk;
2991 nlk = rhashtable_walk_next(&iter->hti);
2994 if (PTR_ERR(nlk) == -EAGAIN)
3003 netlink_walk_stop(iter);
3004 if (++iter->link >= MAX_LINKS)
3007 err = netlink_walk_start(iter);
3009 return ERR_PTR(err);
3011 } while (sock_net(&nlk->sk) != seq_file_net(seq));
3016 static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
3018 struct nl_seq_iter *iter = seq->private;
3019 void *obj = SEQ_START_TOKEN;
3025 err = netlink_walk_start(iter);
3027 return ERR_PTR(err);
3029 for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
3030 obj = __netlink_seq_next(seq);
3035 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3038 return __netlink_seq_next(seq);
3041 static void netlink_seq_stop(struct seq_file *seq, void *v)
3043 struct nl_seq_iter *iter = seq->private;
3045 if (iter->link >= MAX_LINKS)
3048 netlink_walk_stop(iter);
3052 static int netlink_seq_show(struct seq_file *seq, void *v)
3054 if (v == SEQ_START_TOKEN) {
3056 "sk Eth Pid Groups "
3057 "Rmem Wmem Dump Locks Drops Inode\n");
3060 struct netlink_sock *nlk = nlk_sk(s);
3062 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
3066 nlk->groups ? (u32)nlk->groups[0] : 0,
3067 sk_rmem_alloc_get(s),
3068 sk_wmem_alloc_get(s),
3070 atomic_read(&s->sk_refcnt),
3071 atomic_read(&s->sk_drops),
3079 static const struct seq_operations netlink_seq_ops = {
3080 .start = netlink_seq_start,
3081 .next = netlink_seq_next,
3082 .stop = netlink_seq_stop,
3083 .show = netlink_seq_show,
3087 static int netlink_seq_open(struct inode *inode, struct file *file)
3089 return seq_open_net(inode, file, &netlink_seq_ops,
3090 sizeof(struct nl_seq_iter));
3093 static const struct file_operations netlink_seq_fops = {
3094 .owner = THIS_MODULE,
3095 .open = netlink_seq_open,
3097 .llseek = seq_lseek,
3098 .release = seq_release_net,
3103 int netlink_register_notifier(struct notifier_block *nb)
3105 return atomic_notifier_chain_register(&netlink_chain, nb);
3107 EXPORT_SYMBOL(netlink_register_notifier);
3109 int netlink_unregister_notifier(struct notifier_block *nb)
3111 return atomic_notifier_chain_unregister(&netlink_chain, nb);
3113 EXPORT_SYMBOL(netlink_unregister_notifier);
3115 static const struct proto_ops netlink_ops = {
3116 .family = PF_NETLINK,
3117 .owner = THIS_MODULE,
3118 .release = netlink_release,
3119 .bind = netlink_bind,
3120 .connect = netlink_connect,
3121 .socketpair = sock_no_socketpair,
3122 .accept = sock_no_accept,
3123 .getname = netlink_getname,
3124 .poll = netlink_poll,
3125 .ioctl = sock_no_ioctl,
3126 .listen = sock_no_listen,
3127 .shutdown = sock_no_shutdown,
3128 .setsockopt = netlink_setsockopt,
3129 .getsockopt = netlink_getsockopt,
3130 .sendmsg = netlink_sendmsg,
3131 .recvmsg = netlink_recvmsg,
3132 .mmap = netlink_mmap,
3133 .sendpage = sock_no_sendpage,
3136 static const struct net_proto_family netlink_family_ops = {
3137 .family = PF_NETLINK,
3138 .create = netlink_create,
3139 .owner = THIS_MODULE, /* for consistency 8) */
3142 static int __net_init netlink_net_init(struct net *net)
3144 #ifdef CONFIG_PROC_FS
3145 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
3151 static void __net_exit netlink_net_exit(struct net *net)
3153 #ifdef CONFIG_PROC_FS
3154 remove_proc_entry("netlink", net->proc_net);
3158 static void __init netlink_add_usersock_entry(void)
3160 struct listeners *listeners;
3163 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
3165 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
3167 netlink_table_grab();
3169 nl_table[NETLINK_USERSOCK].groups = groups;
3170 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
3171 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3172 nl_table[NETLINK_USERSOCK].registered = 1;
3173 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
3175 netlink_table_ungrab();
3178 static struct pernet_operations __net_initdata netlink_net_ops = {
3179 .init = netlink_net_init,
3180 .exit = netlink_net_exit,
3183 static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
3185 const struct netlink_sock *nlk = data;
3186 struct netlink_compare_arg arg;
3188 netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
3189 return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
3192 static const struct rhashtable_params netlink_rhashtable_params = {
3193 .head_offset = offsetof(struct netlink_sock, node),
3194 .key_len = netlink_compare_arg_len,
3195 .obj_hashfn = netlink_hash,
3196 .obj_cmpfn = netlink_compare,
3197 .automatic_shrinking = true,
3200 static int __init netlink_proto_init(void)
3203 int err = proto_register(&netlink_proto, 0);
3208 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
3210 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
3214 for (i = 0; i < MAX_LINKS; i++) {
3215 if (rhashtable_init(&nl_table[i].hash,
3216 &netlink_rhashtable_params) < 0) {
3218 rhashtable_destroy(&nl_table[i].hash);
3224 INIT_LIST_HEAD(&netlink_tap_all);
3226 netlink_add_usersock_entry();
3228 sock_register(&netlink_family_ops);
3229 register_pernet_subsys(&netlink_net_ops);
3230 /* The netlink device handler may be needed early. */
3235 panic("netlink_init: Cannot allocate nl_table\n");
3238 core_initcall(netlink_proto_init);