These changes are a raw update to a vanilla kernel 4.1.10, with the
[kvmfornfv.git] / kernel / net / packet / af_packet.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              PACKET - implements raw packet sockets.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
11  *
12  * Fixes:
13  *              Alan Cox        :       verify_area() now used correctly
14  *              Alan Cox        :       new skbuff lists, look ma no backlogs!
15  *              Alan Cox        :       tidied skbuff lists.
16  *              Alan Cox        :       Now uses generic datagram routines I
17  *                                      added. Also fixed the peek/read crash
18  *                                      from all old Linux datagram code.
19  *              Alan Cox        :       Uses the improved datagram code.
20  *              Alan Cox        :       Added NULL's for socket options.
21  *              Alan Cox        :       Re-commented the code.
22  *              Alan Cox        :       Use new kernel side addressing
23  *              Rob Janssen     :       Correct MTU usage.
24  *              Dave Platt      :       Counter leaks caused by incorrect
25  *                                      interrupt locking and some slightly
26  *                                      dubious gcc output. Can you read
27  *                                      compiler: it said _VOLATILE_
28  *      Richard Kooijman        :       Timestamp fixes.
29  *              Alan Cox        :       New buffers. Use sk->mac.raw.
30  *              Alan Cox        :       sendmsg/recvmsg support.
31  *              Alan Cox        :       Protocol setting support
32  *      Alexey Kuznetsov        :       Untied from IPv4 stack.
33  *      Cyrus Durgin            :       Fixed kerneld for kmod.
34  *      Michal Ostrowski        :       Module initialization cleanup.
35  *         Ulises Alonso        :       Frame number limit removal and
36  *                                      packet_set_ring memory leak.
37  *              Eric Biederman  :       Allow for > 8 byte hardware addresses.
38  *                                      The convention is that longer addresses
39  *                                      will simply extend the hardware address
40  *                                      byte arrays at the end of sockaddr_ll
41  *                                      and packet_mreq.
42  *              Johann Baudy    :       Added TX RING.
43  *              Chetan Loke     :       Implemented TPACKET_V3 block abstraction
44  *                                      layer.
45  *                                      Copyright (C) 2011, <lokec@ccs.neu.edu>
46  *
47  *
48  *              This program is free software; you can redistribute it and/or
49  *              modify it under the terms of the GNU General Public License
50  *              as published by the Free Software Foundation; either version
51  *              2 of the License, or (at your option) any later version.
52  *
53  */
54
55 #include <linux/types.h>
56 #include <linux/mm.h>
57 #include <linux/capability.h>
58 #include <linux/fcntl.h>
59 #include <linux/socket.h>
60 #include <linux/in.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/if_packet.h>
64 #include <linux/wireless.h>
65 #include <linux/kernel.h>
66 #include <linux/delay.h>
67 #include <linux/kmod.h>
68 #include <linux/slab.h>
69 #include <linux/vmalloc.h>
70 #include <net/net_namespace.h>
71 #include <net/ip.h>
72 #include <net/protocol.h>
73 #include <linux/skbuff.h>
74 #include <net/sock.h>
75 #include <linux/errno.h>
76 #include <linux/timer.h>
77 #include <asm/uaccess.h>
78 #include <asm/ioctls.h>
79 #include <asm/page.h>
80 #include <asm/cacheflush.h>
81 #include <asm/io.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84 #include <linux/poll.h>
85 #include <linux/module.h>
86 #include <linux/init.h>
87 #include <linux/mutex.h>
88 #include <linux/if_vlan.h>
89 #include <linux/virtio_net.h>
90 #include <linux/errqueue.h>
91 #include <linux/net_tstamp.h>
92 #include <linux/percpu.h>
93 #ifdef CONFIG_INET
94 #include <net/inet_common.h>
95 #endif
96
97 #include "internal.h"
98
99 /*
100    Assumptions:
101    - if device has no dev->hard_header routine, it adds and removes ll header
102      inside itself. In this case ll header is invisible outside of device,
103      but higher levels still should reserve dev->hard_header_len.
104      Some devices are enough clever to reallocate skb, when header
105      will not fit to reserved space (tunnel), another ones are silly
106      (PPP).
107    - packet socket receives packets with pulled ll header,
108      so that SOCK_RAW should push it back.
109
110 On receive:
111 -----------
112
113 Incoming, dev->hard_header!=NULL
114    mac_header -> ll header
115    data       -> data
116
117 Outgoing, dev->hard_header!=NULL
118    mac_header -> ll header
119    data       -> ll header
120
121 Incoming, dev->hard_header==NULL
122    mac_header -> UNKNOWN position. It is very likely, that it points to ll
123                  header.  PPP makes it, that is wrong, because introduce
124                  assymetry between rx and tx paths.
125    data       -> data
126
127 Outgoing, dev->hard_header==NULL
128    mac_header -> data. ll header is still not built!
129    data       -> data
130
131 Resume
132   If dev->hard_header==NULL we are unlikely to restore sensible ll header.
133
134
135 On transmit:
136 ------------
137
138 dev->hard_header != NULL
139    mac_header -> ll header
140    data       -> ll header
141
142 dev->hard_header == NULL (ll header is added by device, we cannot control it)
143    mac_header -> data
144    data       -> data
145
146    We should set nh.raw on output to correct posistion,
147    packet classifier depends on it.
148  */
149
150 /* Private packet socket structures. */
151
152 /* identical to struct packet_mreq except it has
153  * a longer address field.
154  */
155 struct packet_mreq_max {
156         int             mr_ifindex;
157         unsigned short  mr_type;
158         unsigned short  mr_alen;
159         unsigned char   mr_address[MAX_ADDR_LEN];
160 };
161
162 union tpacket_uhdr {
163         struct tpacket_hdr  *h1;
164         struct tpacket2_hdr *h2;
165         struct tpacket3_hdr *h3;
166         void *raw;
167 };
168
169 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
170                 int closing, int tx_ring);
171
172 #define V3_ALIGNMENT    (8)
173
174 #define BLK_HDR_LEN     (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
175
176 #define BLK_PLUS_PRIV(sz_of_priv) \
177         (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
178
179 #define PGV_FROM_VMALLOC 1
180
181 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
182 #define BLOCK_NUM_PKTS(x)       ((x)->hdr.bh1.num_pkts)
183 #define BLOCK_O2FP(x)           ((x)->hdr.bh1.offset_to_first_pkt)
184 #define BLOCK_LEN(x)            ((x)->hdr.bh1.blk_len)
185 #define BLOCK_SNUM(x)           ((x)->hdr.bh1.seq_num)
186 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
187 #define BLOCK_PRIV(x)           ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
188
189 struct packet_sock;
190 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
191 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
192                        struct packet_type *pt, struct net_device *orig_dev);
193
194 static void *packet_previous_frame(struct packet_sock *po,
195                 struct packet_ring_buffer *rb,
196                 int status);
197 static void packet_increment_head(struct packet_ring_buffer *buff);
198 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
199                         struct tpacket_block_desc *);
200 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
201                         struct packet_sock *);
202 static void prb_retire_current_block(struct tpacket_kbdq_core *,
203                 struct packet_sock *, unsigned int status);
204 static int prb_queue_frozen(struct tpacket_kbdq_core *);
205 static void prb_open_block(struct tpacket_kbdq_core *,
206                 struct tpacket_block_desc *);
207 static void prb_retire_rx_blk_timer_expired(unsigned long);
208 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
209 static void prb_init_blk_timer(struct packet_sock *,
210                 struct tpacket_kbdq_core *,
211                 void (*func) (unsigned long));
212 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
213 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
214                 struct tpacket3_hdr *);
215 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
216                 struct tpacket3_hdr *);
217 static void packet_flush_mclist(struct sock *sk);
218
219 struct packet_skb_cb {
220         union {
221                 struct sockaddr_pkt pkt;
222                 union {
223                         /* Trick: alias skb original length with
224                          * ll.sll_family and ll.protocol in order
225                          * to save room.
226                          */
227                         unsigned int origlen;
228                         struct sockaddr_ll ll;
229                 };
230         } sa;
231 };
232
233 #define PACKET_SKB_CB(__skb)    ((struct packet_skb_cb *)((__skb)->cb))
234
235 #define GET_PBDQC_FROM_RB(x)    ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
236 #define GET_PBLOCK_DESC(x, bid) \
237         ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
238 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x)       \
239         ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
240 #define GET_NEXT_PRB_BLK_NUM(x) \
241         (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
242         ((x)->kactive_blk_num+1) : 0)
243
244 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
245 static void __fanout_link(struct sock *sk, struct packet_sock *po);
246
247 static int packet_direct_xmit(struct sk_buff *skb)
248 {
249         struct net_device *dev = skb->dev;
250         netdev_features_t features;
251         struct netdev_queue *txq;
252         int ret = NETDEV_TX_BUSY;
253
254         if (unlikely(!netif_running(dev) ||
255                      !netif_carrier_ok(dev)))
256                 goto drop;
257
258         features = netif_skb_features(skb);
259         if (skb_needs_linearize(skb, features) &&
260             __skb_linearize(skb))
261                 goto drop;
262
263         txq = skb_get_tx_queue(dev, skb);
264
265         local_bh_disable();
266
267         HARD_TX_LOCK(dev, txq, smp_processor_id());
268         if (!netif_xmit_frozen_or_drv_stopped(txq))
269                 ret = netdev_start_xmit(skb, dev, txq, false);
270         HARD_TX_UNLOCK(dev, txq);
271
272         local_bh_enable();
273
274         if (!dev_xmit_complete(ret))
275                 kfree_skb(skb);
276
277         return ret;
278 drop:
279         atomic_long_inc(&dev->tx_dropped);
280         kfree_skb(skb);
281         return NET_XMIT_DROP;
282 }
283
284 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
285 {
286         struct net_device *dev;
287
288         rcu_read_lock();
289         dev = rcu_dereference(po->cached_dev);
290         if (likely(dev))
291                 dev_hold(dev);
292         rcu_read_unlock();
293
294         return dev;
295 }
296
297 static void packet_cached_dev_assign(struct packet_sock *po,
298                                      struct net_device *dev)
299 {
300         rcu_assign_pointer(po->cached_dev, dev);
301 }
302
303 static void packet_cached_dev_reset(struct packet_sock *po)
304 {
305         RCU_INIT_POINTER(po->cached_dev, NULL);
306 }
307
308 static bool packet_use_direct_xmit(const struct packet_sock *po)
309 {
310         return po->xmit == packet_direct_xmit;
311 }
312
313 static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
314 {
315         return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
316 }
317
318 static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
319 {
320         const struct net_device_ops *ops = dev->netdev_ops;
321         u16 queue_index;
322
323         if (ops->ndo_select_queue) {
324                 queue_index = ops->ndo_select_queue(dev, skb, NULL,
325                                                     __packet_pick_tx_queue);
326                 queue_index = netdev_cap_txqueue(dev, queue_index);
327         } else {
328                 queue_index = __packet_pick_tx_queue(dev, skb);
329         }
330
331         skb_set_queue_mapping(skb, queue_index);
332 }
333
334 /* register_prot_hook must be invoked with the po->bind_lock held,
335  * or from a context in which asynchronous accesses to the packet
336  * socket is not possible (packet_create()).
337  */
338 static void register_prot_hook(struct sock *sk)
339 {
340         struct packet_sock *po = pkt_sk(sk);
341
342         if (!po->running) {
343                 if (po->fanout)
344                         __fanout_link(sk, po);
345                 else
346                         dev_add_pack(&po->prot_hook);
347
348                 sock_hold(sk);
349                 po->running = 1;
350         }
351 }
352
353 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
354  * held.   If the sync parameter is true, we will temporarily drop
355  * the po->bind_lock and do a synchronize_net to make sure no
356  * asynchronous packet processing paths still refer to the elements
357  * of po->prot_hook.  If the sync parameter is false, it is the
358  * callers responsibility to take care of this.
359  */
360 static void __unregister_prot_hook(struct sock *sk, bool sync)
361 {
362         struct packet_sock *po = pkt_sk(sk);
363
364         po->running = 0;
365
366         if (po->fanout)
367                 __fanout_unlink(sk, po);
368         else
369                 __dev_remove_pack(&po->prot_hook);
370
371         __sock_put(sk);
372
373         if (sync) {
374                 spin_unlock(&po->bind_lock);
375                 synchronize_net();
376                 spin_lock(&po->bind_lock);
377         }
378 }
379
380 static void unregister_prot_hook(struct sock *sk, bool sync)
381 {
382         struct packet_sock *po = pkt_sk(sk);
383
384         if (po->running)
385                 __unregister_prot_hook(sk, sync);
386 }
387
388 static inline struct page * __pure pgv_to_page(void *addr)
389 {
390         if (is_vmalloc_addr(addr))
391                 return vmalloc_to_page(addr);
392         return virt_to_page(addr);
393 }
394
395 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
396 {
397         union tpacket_uhdr h;
398
399         h.raw = frame;
400         switch (po->tp_version) {
401         case TPACKET_V1:
402                 h.h1->tp_status = status;
403                 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
404                 break;
405         case TPACKET_V2:
406                 h.h2->tp_status = status;
407                 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
408                 break;
409         case TPACKET_V3:
410         default:
411                 WARN(1, "TPACKET version not supported.\n");
412                 BUG();
413         }
414
415         smp_wmb();
416 }
417
418 static int __packet_get_status(struct packet_sock *po, void *frame)
419 {
420         union tpacket_uhdr h;
421
422         smp_rmb();
423
424         h.raw = frame;
425         switch (po->tp_version) {
426         case TPACKET_V1:
427                 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
428                 return h.h1->tp_status;
429         case TPACKET_V2:
430                 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
431                 return h.h2->tp_status;
432         case TPACKET_V3:
433         default:
434                 WARN(1, "TPACKET version not supported.\n");
435                 BUG();
436                 return 0;
437         }
438 }
439
440 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
441                                    unsigned int flags)
442 {
443         struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
444
445         if (shhwtstamps &&
446             (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
447             ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
448                 return TP_STATUS_TS_RAW_HARDWARE;
449
450         if (ktime_to_timespec_cond(skb->tstamp, ts))
451                 return TP_STATUS_TS_SOFTWARE;
452
453         return 0;
454 }
455
456 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
457                                     struct sk_buff *skb)
458 {
459         union tpacket_uhdr h;
460         struct timespec ts;
461         __u32 ts_status;
462
463         if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
464                 return 0;
465
466         h.raw = frame;
467         switch (po->tp_version) {
468         case TPACKET_V1:
469                 h.h1->tp_sec = ts.tv_sec;
470                 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
471                 break;
472         case TPACKET_V2:
473                 h.h2->tp_sec = ts.tv_sec;
474                 h.h2->tp_nsec = ts.tv_nsec;
475                 break;
476         case TPACKET_V3:
477         default:
478                 WARN(1, "TPACKET version not supported.\n");
479                 BUG();
480         }
481
482         /* one flush is safe, as both fields always lie on the same cacheline */
483         flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
484         smp_wmb();
485
486         return ts_status;
487 }
488
489 static void *packet_lookup_frame(struct packet_sock *po,
490                 struct packet_ring_buffer *rb,
491                 unsigned int position,
492                 int status)
493 {
494         unsigned int pg_vec_pos, frame_offset;
495         union tpacket_uhdr h;
496
497         pg_vec_pos = position / rb->frames_per_block;
498         frame_offset = position % rb->frames_per_block;
499
500         h.raw = rb->pg_vec[pg_vec_pos].buffer +
501                 (frame_offset * rb->frame_size);
502
503         if (status != __packet_get_status(po, h.raw))
504                 return NULL;
505
506         return h.raw;
507 }
508
509 static void *packet_current_frame(struct packet_sock *po,
510                 struct packet_ring_buffer *rb,
511                 int status)
512 {
513         return packet_lookup_frame(po, rb, rb->head, status);
514 }
515
516 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
517 {
518         del_timer_sync(&pkc->retire_blk_timer);
519 }
520
521 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
522                 int tx_ring,
523                 struct sk_buff_head *rb_queue)
524 {
525         struct tpacket_kbdq_core *pkc;
526
527         pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
528                         GET_PBDQC_FROM_RB(&po->rx_ring);
529
530         spin_lock_bh(&rb_queue->lock);
531         pkc->delete_blk_timer = 1;
532         spin_unlock_bh(&rb_queue->lock);
533
534         prb_del_retire_blk_timer(pkc);
535 }
536
537 static void prb_init_blk_timer(struct packet_sock *po,
538                 struct tpacket_kbdq_core *pkc,
539                 void (*func) (unsigned long))
540 {
541         init_timer(&pkc->retire_blk_timer);
542         pkc->retire_blk_timer.data = (long)po;
543         pkc->retire_blk_timer.function = func;
544         pkc->retire_blk_timer.expires = jiffies;
545 }
546
547 static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
548 {
549         struct tpacket_kbdq_core *pkc;
550
551         if (tx_ring)
552                 BUG();
553
554         pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
555                         GET_PBDQC_FROM_RB(&po->rx_ring);
556         prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
557 }
558
559 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
560                                 int blk_size_in_bytes)
561 {
562         struct net_device *dev;
563         unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
564         struct ethtool_cmd ecmd;
565         int err;
566         u32 speed;
567
568         rtnl_lock();
569         dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
570         if (unlikely(!dev)) {
571                 rtnl_unlock();
572                 return DEFAULT_PRB_RETIRE_TOV;
573         }
574         err = __ethtool_get_settings(dev, &ecmd);
575         speed = ethtool_cmd_speed(&ecmd);
576         rtnl_unlock();
577         if (!err) {
578                 /*
579                  * If the link speed is so slow you don't really
580                  * need to worry about perf anyways
581                  */
582                 if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
583                         return DEFAULT_PRB_RETIRE_TOV;
584                 } else {
585                         msec = 1;
586                         div = speed / 1000;
587                 }
588         }
589
590         mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
591
592         if (div)
593                 mbits /= div;
594
595         tmo = mbits * msec;
596
597         if (div)
598                 return tmo+1;
599         return tmo;
600 }
601
602 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
603                         union tpacket_req_u *req_u)
604 {
605         p1->feature_req_word = req_u->req3.tp_feature_req_word;
606 }
607
608 static void init_prb_bdqc(struct packet_sock *po,
609                         struct packet_ring_buffer *rb,
610                         struct pgv *pg_vec,
611                         union tpacket_req_u *req_u, int tx_ring)
612 {
613         struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
614         struct tpacket_block_desc *pbd;
615
616         memset(p1, 0x0, sizeof(*p1));
617
618         p1->knxt_seq_num = 1;
619         p1->pkbdq = pg_vec;
620         pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
621         p1->pkblk_start = pg_vec[0].buffer;
622         p1->kblk_size = req_u->req3.tp_block_size;
623         p1->knum_blocks = req_u->req3.tp_block_nr;
624         p1->hdrlen = po->tp_hdrlen;
625         p1->version = po->tp_version;
626         p1->last_kactive_blk_num = 0;
627         po->stats.stats3.tp_freeze_q_cnt = 0;
628         if (req_u->req3.tp_retire_blk_tov)
629                 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
630         else
631                 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
632                                                 req_u->req3.tp_block_size);
633         p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
634         p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
635
636         p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
637         prb_init_ft_ops(p1, req_u);
638         prb_setup_retire_blk_timer(po, tx_ring);
639         prb_open_block(p1, pbd);
640 }
641
642 /*  Do NOT update the last_blk_num first.
643  *  Assumes sk_buff_head lock is held.
644  */
645 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
646 {
647         mod_timer(&pkc->retire_blk_timer,
648                         jiffies + pkc->tov_in_jiffies);
649         pkc->last_kactive_blk_num = pkc->kactive_blk_num;
650 }
651
652 /*
653  * Timer logic:
654  * 1) We refresh the timer only when we open a block.
655  *    By doing this we don't waste cycles refreshing the timer
656  *        on packet-by-packet basis.
657  *
658  * With a 1MB block-size, on a 1Gbps line, it will take
659  * i) ~8 ms to fill a block + ii) memcpy etc.
660  * In this cut we are not accounting for the memcpy time.
661  *
662  * So, if the user sets the 'tmo' to 10ms then the timer
663  * will never fire while the block is still getting filled
664  * (which is what we want). However, the user could choose
665  * to close a block early and that's fine.
666  *
667  * But when the timer does fire, we check whether or not to refresh it.
668  * Since the tmo granularity is in msecs, it is not too expensive
669  * to refresh the timer, lets say every '8' msecs.
670  * Either the user can set the 'tmo' or we can derive it based on
671  * a) line-speed and b) block-size.
672  * prb_calc_retire_blk_tmo() calculates the tmo.
673  *
674  */
675 static void prb_retire_rx_blk_timer_expired(unsigned long data)
676 {
677         struct packet_sock *po = (struct packet_sock *)data;
678         struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
679         unsigned int frozen;
680         struct tpacket_block_desc *pbd;
681
682         spin_lock(&po->sk.sk_receive_queue.lock);
683
684         frozen = prb_queue_frozen(pkc);
685         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
686
687         if (unlikely(pkc->delete_blk_timer))
688                 goto out;
689
690         /* We only need to plug the race when the block is partially filled.
691          * tpacket_rcv:
692          *              lock(); increment BLOCK_NUM_PKTS; unlock()
693          *              copy_bits() is in progress ...
694          *              timer fires on other cpu:
695          *              we can't retire the current block because copy_bits
696          *              is in progress.
697          *
698          */
699         if (BLOCK_NUM_PKTS(pbd)) {
700                 while (atomic_read(&pkc->blk_fill_in_prog)) {
701                         /* Waiting for skb_copy_bits to finish... */
702                         cpu_chill();
703                 }
704         }
705
706         if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
707                 if (!frozen) {
708                         if (!BLOCK_NUM_PKTS(pbd)) {
709                                 /* An empty block. Just refresh the timer. */
710                                 goto refresh_timer;
711                         }
712                         prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
713                         if (!prb_dispatch_next_block(pkc, po))
714                                 goto refresh_timer;
715                         else
716                                 goto out;
717                 } else {
718                         /* Case 1. Queue was frozen because user-space was
719                          *         lagging behind.
720                          */
721                         if (prb_curr_blk_in_use(pkc, pbd)) {
722                                 /*
723                                  * Ok, user-space is still behind.
724                                  * So just refresh the timer.
725                                  */
726                                 goto refresh_timer;
727                         } else {
728                                /* Case 2. queue was frozen,user-space caught up,
729                                 * now the link went idle && the timer fired.
730                                 * We don't have a block to close.So we open this
731                                 * block and restart the timer.
732                                 * opening a block thaws the queue,restarts timer
733                                 * Thawing/timer-refresh is a side effect.
734                                 */
735                                 prb_open_block(pkc, pbd);
736                                 goto out;
737                         }
738                 }
739         }
740
741 refresh_timer:
742         _prb_refresh_rx_retire_blk_timer(pkc);
743
744 out:
745         spin_unlock(&po->sk.sk_receive_queue.lock);
746 }
747
748 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
749                 struct tpacket_block_desc *pbd1, __u32 status)
750 {
751         /* Flush everything minus the block header */
752
753 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
754         u8 *start, *end;
755
756         start = (u8 *)pbd1;
757
758         /* Skip the block header(we know header WILL fit in 4K) */
759         start += PAGE_SIZE;
760
761         end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
762         for (; start < end; start += PAGE_SIZE)
763                 flush_dcache_page(pgv_to_page(start));
764
765         smp_wmb();
766 #endif
767
768         /* Now update the block status. */
769
770         BLOCK_STATUS(pbd1) = status;
771
772         /* Flush the block header */
773
774 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
775         start = (u8 *)pbd1;
776         flush_dcache_page(pgv_to_page(start));
777
778         smp_wmb();
779 #endif
780 }
781
782 /*
783  * Side effect:
784  *
785  * 1) flush the block
786  * 2) Increment active_blk_num
787  *
788  * Note:We DONT refresh the timer on purpose.
789  *      Because almost always the next block will be opened.
790  */
791 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
792                 struct tpacket_block_desc *pbd1,
793                 struct packet_sock *po, unsigned int stat)
794 {
795         __u32 status = TP_STATUS_USER | stat;
796
797         struct tpacket3_hdr *last_pkt;
798         struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
799         struct sock *sk = &po->sk;
800
801         if (po->stats.stats3.tp_drops)
802                 status |= TP_STATUS_LOSING;
803
804         last_pkt = (struct tpacket3_hdr *)pkc1->prev;
805         last_pkt->tp_next_offset = 0;
806
807         /* Get the ts of the last pkt */
808         if (BLOCK_NUM_PKTS(pbd1)) {
809                 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
810                 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
811         } else {
812                 /* Ok, we tmo'd - so get the current time.
813                  *
814                  * It shouldn't really happen as we don't close empty
815                  * blocks. See prb_retire_rx_blk_timer_expired().
816                  */
817                 struct timespec ts;
818                 getnstimeofday(&ts);
819                 h1->ts_last_pkt.ts_sec = ts.tv_sec;
820                 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
821         }
822
823         smp_wmb();
824
825         /* Flush the block */
826         prb_flush_block(pkc1, pbd1, status);
827
828         sk->sk_data_ready(sk);
829
830         pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
831 }
832
833 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
834 {
835         pkc->reset_pending_on_curr_blk = 0;
836 }
837
838 /*
839  * Side effect of opening a block:
840  *
841  * 1) prb_queue is thawed.
842  * 2) retire_blk_timer is refreshed.
843  *
844  */
845 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
846         struct tpacket_block_desc *pbd1)
847 {
848         struct timespec ts;
849         struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
850
851         smp_rmb();
852
853         /* We could have just memset this but we will lose the
854          * flexibility of making the priv area sticky
855          */
856
857         BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
858         BLOCK_NUM_PKTS(pbd1) = 0;
859         BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
860
861         getnstimeofday(&ts);
862
863         h1->ts_first_pkt.ts_sec = ts.tv_sec;
864         h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
865
866         pkc1->pkblk_start = (char *)pbd1;
867         pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
868
869         BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
870         BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
871
872         pbd1->version = pkc1->version;
873         pkc1->prev = pkc1->nxt_offset;
874         pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
875
876         prb_thaw_queue(pkc1);
877         _prb_refresh_rx_retire_blk_timer(pkc1);
878
879         smp_wmb();
880 }
881
882 /*
883  * Queue freeze logic:
884  * 1) Assume tp_block_nr = 8 blocks.
885  * 2) At time 't0', user opens Rx ring.
886  * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
887  * 4) user-space is either sleeping or processing block '0'.
888  * 5) tpacket_rcv is currently filling block '7', since there is no space left,
889  *    it will close block-7,loop around and try to fill block '0'.
890  *    call-flow:
891  *    __packet_lookup_frame_in_block
892  *      prb_retire_current_block()
893  *      prb_dispatch_next_block()
894  *        |->(BLOCK_STATUS == USER) evaluates to true
895  *    5.1) Since block-0 is currently in-use, we just freeze the queue.
896  * 6) Now there are two cases:
897  *    6.1) Link goes idle right after the queue is frozen.
898  *         But remember, the last open_block() refreshed the timer.
899  *         When this timer expires,it will refresh itself so that we can
900  *         re-open block-0 in near future.
901  *    6.2) Link is busy and keeps on receiving packets. This is a simple
902  *         case and __packet_lookup_frame_in_block will check if block-0
903  *         is free and can now be re-used.
904  */
905 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
906                                   struct packet_sock *po)
907 {
908         pkc->reset_pending_on_curr_blk = 1;
909         po->stats.stats3.tp_freeze_q_cnt++;
910 }
911
912 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
913
914 /*
915  * If the next block is free then we will dispatch it
916  * and return a good offset.
917  * Else, we will freeze the queue.
918  * So, caller must check the return value.
919  */
920 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
921                 struct packet_sock *po)
922 {
923         struct tpacket_block_desc *pbd;
924
925         smp_rmb();
926
927         /* 1. Get current block num */
928         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
929
930         /* 2. If this block is currently in_use then freeze the queue */
931         if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
932                 prb_freeze_queue(pkc, po);
933                 return NULL;
934         }
935
936         /*
937          * 3.
938          * open this block and return the offset where the first packet
939          * needs to get stored.
940          */
941         prb_open_block(pkc, pbd);
942         return (void *)pkc->nxt_offset;
943 }
944
945 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
946                 struct packet_sock *po, unsigned int status)
947 {
948         struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
949
950         /* retire/close the current block */
951         if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
952                 /*
953                  * Plug the case where copy_bits() is in progress on
954                  * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
955                  * have space to copy the pkt in the current block and
956                  * called prb_retire_current_block()
957                  *
958                  * We don't need to worry about the TMO case because
959                  * the timer-handler already handled this case.
960                  */
961                 if (!(status & TP_STATUS_BLK_TMO)) {
962                         while (atomic_read(&pkc->blk_fill_in_prog)) {
963                                 /* Waiting for skb_copy_bits to finish... */
964                                 cpu_chill();
965                         }
966                 }
967                 prb_close_block(pkc, pbd, po, status);
968                 return;
969         }
970 }
971
972 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
973                                       struct tpacket_block_desc *pbd)
974 {
975         return TP_STATUS_USER & BLOCK_STATUS(pbd);
976 }
977
978 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
979 {
980         return pkc->reset_pending_on_curr_blk;
981 }
982
983 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
984 {
985         struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
986         atomic_dec(&pkc->blk_fill_in_prog);
987 }
988
989 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
990                         struct tpacket3_hdr *ppd)
991 {
992         ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
993 }
994
995 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
996                         struct tpacket3_hdr *ppd)
997 {
998         ppd->hv1.tp_rxhash = 0;
999 }
1000
1001 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
1002                         struct tpacket3_hdr *ppd)
1003 {
1004         if (skb_vlan_tag_present(pkc->skb)) {
1005                 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
1006                 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1007                 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1008         } else {
1009                 ppd->hv1.tp_vlan_tci = 0;
1010                 ppd->hv1.tp_vlan_tpid = 0;
1011                 ppd->tp_status = TP_STATUS_AVAILABLE;
1012         }
1013 }
1014
1015 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
1016                         struct tpacket3_hdr *ppd)
1017 {
1018         ppd->hv1.tp_padding = 0;
1019         prb_fill_vlan_info(pkc, ppd);
1020
1021         if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1022                 prb_fill_rxhash(pkc, ppd);
1023         else
1024                 prb_clear_rxhash(pkc, ppd);
1025 }
1026
1027 static void prb_fill_curr_block(char *curr,
1028                                 struct tpacket_kbdq_core *pkc,
1029                                 struct tpacket_block_desc *pbd,
1030                                 unsigned int len)
1031 {
1032         struct tpacket3_hdr *ppd;
1033
1034         ppd  = (struct tpacket3_hdr *)curr;
1035         ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1036         pkc->prev = curr;
1037         pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1038         BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1039         BLOCK_NUM_PKTS(pbd) += 1;
1040         atomic_inc(&pkc->blk_fill_in_prog);
1041         prb_run_all_ft_ops(pkc, ppd);
1042 }
1043
1044 /* Assumes caller has the sk->rx_queue.lock */
1045 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1046                                             struct sk_buff *skb,
1047                                                 int status,
1048                                             unsigned int len
1049                                             )
1050 {
1051         struct tpacket_kbdq_core *pkc;
1052         struct tpacket_block_desc *pbd;
1053         char *curr, *end;
1054
1055         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1056         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1057
1058         /* Queue is frozen when user space is lagging behind */
1059         if (prb_queue_frozen(pkc)) {
1060                 /*
1061                  * Check if that last block which caused the queue to freeze,
1062                  * is still in_use by user-space.
1063                  */
1064                 if (prb_curr_blk_in_use(pkc, pbd)) {
1065                         /* Can't record this packet */
1066                         return NULL;
1067                 } else {
1068                         /*
1069                          * Ok, the block was released by user-space.
1070                          * Now let's open that block.
1071                          * opening a block also thaws the queue.
1072                          * Thawing is a side effect.
1073                          */
1074                         prb_open_block(pkc, pbd);
1075                 }
1076         }
1077
1078         smp_mb();
1079         curr = pkc->nxt_offset;
1080         pkc->skb = skb;
1081         end = (char *)pbd + pkc->kblk_size;
1082
1083         /* first try the current block */
1084         if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1085                 prb_fill_curr_block(curr, pkc, pbd, len);
1086                 return (void *)curr;
1087         }
1088
1089         /* Ok, close the current block */
1090         prb_retire_current_block(pkc, po, 0);
1091
1092         /* Now, try to dispatch the next block */
1093         curr = (char *)prb_dispatch_next_block(pkc, po);
1094         if (curr) {
1095                 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1096                 prb_fill_curr_block(curr, pkc, pbd, len);
1097                 return (void *)curr;
1098         }
1099
1100         /*
1101          * No free blocks are available.user_space hasn't caught up yet.
1102          * Queue was just frozen and now this packet will get dropped.
1103          */
1104         return NULL;
1105 }
1106
1107 static void *packet_current_rx_frame(struct packet_sock *po,
1108                                             struct sk_buff *skb,
1109                                             int status, unsigned int len)
1110 {
1111         char *curr = NULL;
1112         switch (po->tp_version) {
1113         case TPACKET_V1:
1114         case TPACKET_V2:
1115                 curr = packet_lookup_frame(po, &po->rx_ring,
1116                                         po->rx_ring.head, status);
1117                 return curr;
1118         case TPACKET_V3:
1119                 return __packet_lookup_frame_in_block(po, skb, status, len);
1120         default:
1121                 WARN(1, "TPACKET version not supported\n");
1122                 BUG();
1123                 return NULL;
1124         }
1125 }
1126
1127 static void *prb_lookup_block(struct packet_sock *po,
1128                                      struct packet_ring_buffer *rb,
1129                                      unsigned int idx,
1130                                      int status)
1131 {
1132         struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1133         struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1134
1135         if (status != BLOCK_STATUS(pbd))
1136                 return NULL;
1137         return pbd;
1138 }
1139
1140 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1141 {
1142         unsigned int prev;
1143         if (rb->prb_bdqc.kactive_blk_num)
1144                 prev = rb->prb_bdqc.kactive_blk_num-1;
1145         else
1146                 prev = rb->prb_bdqc.knum_blocks-1;
1147         return prev;
1148 }
1149
1150 /* Assumes caller has held the rx_queue.lock */
1151 static void *__prb_previous_block(struct packet_sock *po,
1152                                          struct packet_ring_buffer *rb,
1153                                          int status)
1154 {
1155         unsigned int previous = prb_previous_blk_num(rb);
1156         return prb_lookup_block(po, rb, previous, status);
1157 }
1158
1159 static void *packet_previous_rx_frame(struct packet_sock *po,
1160                                              struct packet_ring_buffer *rb,
1161                                              int status)
1162 {
1163         if (po->tp_version <= TPACKET_V2)
1164                 return packet_previous_frame(po, rb, status);
1165
1166         return __prb_previous_block(po, rb, status);
1167 }
1168
1169 static void packet_increment_rx_head(struct packet_sock *po,
1170                                             struct packet_ring_buffer *rb)
1171 {
1172         switch (po->tp_version) {
1173         case TPACKET_V1:
1174         case TPACKET_V2:
1175                 return packet_increment_head(rb);
1176         case TPACKET_V3:
1177         default:
1178                 WARN(1, "TPACKET version not supported.\n");
1179                 BUG();
1180                 return;
1181         }
1182 }
1183
1184 static void *packet_previous_frame(struct packet_sock *po,
1185                 struct packet_ring_buffer *rb,
1186                 int status)
1187 {
1188         unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1189         return packet_lookup_frame(po, rb, previous, status);
1190 }
1191
1192 static void packet_increment_head(struct packet_ring_buffer *buff)
1193 {
1194         buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1195 }
1196
1197 static void packet_inc_pending(struct packet_ring_buffer *rb)
1198 {
1199         this_cpu_inc(*rb->pending_refcnt);
1200 }
1201
1202 static void packet_dec_pending(struct packet_ring_buffer *rb)
1203 {
1204         this_cpu_dec(*rb->pending_refcnt);
1205 }
1206
1207 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1208 {
1209         unsigned int refcnt = 0;
1210         int cpu;
1211
1212         /* We don't use pending refcount in rx_ring. */
1213         if (rb->pending_refcnt == NULL)
1214                 return 0;
1215
1216         for_each_possible_cpu(cpu)
1217                 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1218
1219         return refcnt;
1220 }
1221
1222 static int packet_alloc_pending(struct packet_sock *po)
1223 {
1224         po->rx_ring.pending_refcnt = NULL;
1225
1226         po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1227         if (unlikely(po->tx_ring.pending_refcnt == NULL))
1228                 return -ENOBUFS;
1229
1230         return 0;
1231 }
1232
1233 static void packet_free_pending(struct packet_sock *po)
1234 {
1235         free_percpu(po->tx_ring.pending_refcnt);
1236 }
1237
1238 static bool packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1239 {
1240         struct sock *sk = &po->sk;
1241         bool has_room;
1242
1243         if (po->prot_hook.func != tpacket_rcv)
1244                 return (atomic_read(&sk->sk_rmem_alloc) + skb->truesize)
1245                         <= sk->sk_rcvbuf;
1246
1247         spin_lock(&sk->sk_receive_queue.lock);
1248         if (po->tp_version == TPACKET_V3)
1249                 has_room = prb_lookup_block(po, &po->rx_ring,
1250                                             po->rx_ring.prb_bdqc.kactive_blk_num,
1251                                             TP_STATUS_KERNEL);
1252         else
1253                 has_room = packet_lookup_frame(po, &po->rx_ring,
1254                                                po->rx_ring.head,
1255                                                TP_STATUS_KERNEL);
1256         spin_unlock(&sk->sk_receive_queue.lock);
1257
1258         return has_room;
1259 }
1260
1261 static void packet_sock_destruct(struct sock *sk)
1262 {
1263         skb_queue_purge(&sk->sk_error_queue);
1264
1265         WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1266         WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1267
1268         if (!sock_flag(sk, SOCK_DEAD)) {
1269                 pr_err("Attempt to release alive packet socket: %p\n", sk);
1270                 return;
1271         }
1272
1273         sk_refcnt_debug_dec(sk);
1274 }
1275
1276 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1277                                       struct sk_buff *skb,
1278                                       unsigned int num)
1279 {
1280         return reciprocal_scale(skb_get_hash(skb), num);
1281 }
1282
1283 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1284                                     struct sk_buff *skb,
1285                                     unsigned int num)
1286 {
1287         unsigned int val = atomic_inc_return(&f->rr_cur);
1288
1289         return val % num;
1290 }
1291
1292 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1293                                      struct sk_buff *skb,
1294                                      unsigned int num)
1295 {
1296         return smp_processor_id() % num;
1297 }
1298
1299 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1300                                      struct sk_buff *skb,
1301                                      unsigned int num)
1302 {
1303         return prandom_u32_max(num);
1304 }
1305
1306 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1307                                           struct sk_buff *skb,
1308                                           unsigned int idx, unsigned int skip,
1309                                           unsigned int num)
1310 {
1311         unsigned int i, j;
1312
1313         i = j = min_t(int, f->next[idx], num - 1);
1314         do {
1315                 if (i != skip && packet_rcv_has_room(pkt_sk(f->arr[i]), skb)) {
1316                         if (i != j)
1317                                 f->next[idx] = i;
1318                         return i;
1319                 }
1320                 if (++i == num)
1321                         i = 0;
1322         } while (i != j);
1323
1324         return idx;
1325 }
1326
1327 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1328                                     struct sk_buff *skb,
1329                                     unsigned int num)
1330 {
1331         return skb_get_queue_mapping(skb) % num;
1332 }
1333
1334 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1335 {
1336         return f->flags & (flag >> 8);
1337 }
1338
1339 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1340                              struct packet_type *pt, struct net_device *orig_dev)
1341 {
1342         struct packet_fanout *f = pt->af_packet_priv;
1343         unsigned int num = READ_ONCE(f->num_members);
1344         struct packet_sock *po;
1345         unsigned int idx;
1346
1347         if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1348             !num) {
1349                 kfree_skb(skb);
1350                 return 0;
1351         }
1352
1353         if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1354                 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
1355                 if (!skb)
1356                         return 0;
1357         }
1358         switch (f->type) {
1359         case PACKET_FANOUT_HASH:
1360         default:
1361                 idx = fanout_demux_hash(f, skb, num);
1362                 break;
1363         case PACKET_FANOUT_LB:
1364                 idx = fanout_demux_lb(f, skb, num);
1365                 break;
1366         case PACKET_FANOUT_CPU:
1367                 idx = fanout_demux_cpu(f, skb, num);
1368                 break;
1369         case PACKET_FANOUT_RND:
1370                 idx = fanout_demux_rnd(f, skb, num);
1371                 break;
1372         case PACKET_FANOUT_QM:
1373                 idx = fanout_demux_qm(f, skb, num);
1374                 break;
1375         case PACKET_FANOUT_ROLLOVER:
1376                 idx = fanout_demux_rollover(f, skb, 0, (unsigned int) -1, num);
1377                 break;
1378         }
1379
1380         po = pkt_sk(f->arr[idx]);
1381         if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER) &&
1382             unlikely(!packet_rcv_has_room(po, skb))) {
1383                 idx = fanout_demux_rollover(f, skb, idx, idx, num);
1384                 po = pkt_sk(f->arr[idx]);
1385         }
1386
1387         return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1388 }
1389
1390 DEFINE_MUTEX(fanout_mutex);
1391 EXPORT_SYMBOL_GPL(fanout_mutex);
1392 static LIST_HEAD(fanout_list);
1393
1394 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1395 {
1396         struct packet_fanout *f = po->fanout;
1397
1398         spin_lock(&f->lock);
1399         f->arr[f->num_members] = sk;
1400         smp_wmb();
1401         f->num_members++;
1402         spin_unlock(&f->lock);
1403 }
1404
1405 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1406 {
1407         struct packet_fanout *f = po->fanout;
1408         int i;
1409
1410         spin_lock(&f->lock);
1411         for (i = 0; i < f->num_members; i++) {
1412                 if (f->arr[i] == sk)
1413                         break;
1414         }
1415         BUG_ON(i >= f->num_members);
1416         f->arr[i] = f->arr[f->num_members - 1];
1417         f->num_members--;
1418         spin_unlock(&f->lock);
1419 }
1420
1421 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1422 {
1423         if (ptype->af_packet_priv == (void *)((struct packet_sock *)sk)->fanout)
1424                 return true;
1425
1426         return false;
1427 }
1428
1429 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1430 {
1431         struct packet_sock *po = pkt_sk(sk);
1432         struct packet_fanout *f, *match;
1433         u8 type = type_flags & 0xff;
1434         u8 flags = type_flags >> 8;
1435         int err;
1436
1437         switch (type) {
1438         case PACKET_FANOUT_ROLLOVER:
1439                 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1440                         return -EINVAL;
1441         case PACKET_FANOUT_HASH:
1442         case PACKET_FANOUT_LB:
1443         case PACKET_FANOUT_CPU:
1444         case PACKET_FANOUT_RND:
1445         case PACKET_FANOUT_QM:
1446                 break;
1447         default:
1448                 return -EINVAL;
1449         }
1450
1451         if (!po->running)
1452                 return -EINVAL;
1453
1454         if (po->fanout)
1455                 return -EALREADY;
1456
1457         mutex_lock(&fanout_mutex);
1458         match = NULL;
1459         list_for_each_entry(f, &fanout_list, list) {
1460                 if (f->id == id &&
1461                     read_pnet(&f->net) == sock_net(sk)) {
1462                         match = f;
1463                         break;
1464                 }
1465         }
1466         err = -EINVAL;
1467         if (match && match->flags != flags)
1468                 goto out;
1469         if (!match) {
1470                 err = -ENOMEM;
1471                 match = kzalloc(sizeof(*match), GFP_KERNEL);
1472                 if (!match)
1473                         goto out;
1474                 write_pnet(&match->net, sock_net(sk));
1475                 match->id = id;
1476                 match->type = type;
1477                 match->flags = flags;
1478                 atomic_set(&match->rr_cur, 0);
1479                 INIT_LIST_HEAD(&match->list);
1480                 spin_lock_init(&match->lock);
1481                 atomic_set(&match->sk_ref, 0);
1482                 match->prot_hook.type = po->prot_hook.type;
1483                 match->prot_hook.dev = po->prot_hook.dev;
1484                 match->prot_hook.func = packet_rcv_fanout;
1485                 match->prot_hook.af_packet_priv = match;
1486                 match->prot_hook.id_match = match_fanout_group;
1487                 dev_add_pack(&match->prot_hook);
1488                 list_add(&match->list, &fanout_list);
1489         }
1490         err = -EINVAL;
1491         if (match->type == type &&
1492             match->prot_hook.type == po->prot_hook.type &&
1493             match->prot_hook.dev == po->prot_hook.dev) {
1494                 err = -ENOSPC;
1495                 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1496                         __dev_remove_pack(&po->prot_hook);
1497                         po->fanout = match;
1498                         atomic_inc(&match->sk_ref);
1499                         __fanout_link(sk, po);
1500                         err = 0;
1501                 }
1502         }
1503 out:
1504         mutex_unlock(&fanout_mutex);
1505         return err;
1506 }
1507
1508 static void fanout_release(struct sock *sk)
1509 {
1510         struct packet_sock *po = pkt_sk(sk);
1511         struct packet_fanout *f;
1512
1513         f = po->fanout;
1514         if (!f)
1515                 return;
1516
1517         mutex_lock(&fanout_mutex);
1518         po->fanout = NULL;
1519
1520         if (atomic_dec_and_test(&f->sk_ref)) {
1521                 list_del(&f->list);
1522                 dev_remove_pack(&f->prot_hook);
1523                 kfree(f);
1524         }
1525         mutex_unlock(&fanout_mutex);
1526 }
1527
1528 static const struct proto_ops packet_ops;
1529
1530 static const struct proto_ops packet_ops_spkt;
1531
1532 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1533                            struct packet_type *pt, struct net_device *orig_dev)
1534 {
1535         struct sock *sk;
1536         struct sockaddr_pkt *spkt;
1537
1538         /*
1539          *      When we registered the protocol we saved the socket in the data
1540          *      field for just this event.
1541          */
1542
1543         sk = pt->af_packet_priv;
1544
1545         /*
1546          *      Yank back the headers [hope the device set this
1547          *      right or kerboom...]
1548          *
1549          *      Incoming packets have ll header pulled,
1550          *      push it back.
1551          *
1552          *      For outgoing ones skb->data == skb_mac_header(skb)
1553          *      so that this procedure is noop.
1554          */
1555
1556         if (skb->pkt_type == PACKET_LOOPBACK)
1557                 goto out;
1558
1559         if (!net_eq(dev_net(dev), sock_net(sk)))
1560                 goto out;
1561
1562         skb = skb_share_check(skb, GFP_ATOMIC);
1563         if (skb == NULL)
1564                 goto oom;
1565
1566         /* drop any routing info */
1567         skb_dst_drop(skb);
1568
1569         /* drop conntrack reference */
1570         nf_reset(skb);
1571
1572         spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1573
1574         skb_push(skb, skb->data - skb_mac_header(skb));
1575
1576         /*
1577          *      The SOCK_PACKET socket receives _all_ frames.
1578          */
1579
1580         spkt->spkt_family = dev->type;
1581         strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1582         spkt->spkt_protocol = skb->protocol;
1583
1584         /*
1585          *      Charge the memory to the socket. This is done specifically
1586          *      to prevent sockets using all the memory up.
1587          */
1588
1589         if (sock_queue_rcv_skb(sk, skb) == 0)
1590                 return 0;
1591
1592 out:
1593         kfree_skb(skb);
1594 oom:
1595         return 0;
1596 }
1597
1598
1599 /*
1600  *      Output a raw packet to a device layer. This bypasses all the other
1601  *      protocol layers and you must therefore supply it with a complete frame
1602  */
1603
1604 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1605                                size_t len)
1606 {
1607         struct sock *sk = sock->sk;
1608         DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1609         struct sk_buff *skb = NULL;
1610         struct net_device *dev;
1611         __be16 proto = 0;
1612         int err;
1613         int extra_len = 0;
1614
1615         /*
1616          *      Get and verify the address.
1617          */
1618
1619         if (saddr) {
1620                 if (msg->msg_namelen < sizeof(struct sockaddr))
1621                         return -EINVAL;
1622                 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1623                         proto = saddr->spkt_protocol;
1624         } else
1625                 return -ENOTCONN;       /* SOCK_PACKET must be sent giving an address */
1626
1627         /*
1628          *      Find the device first to size check it
1629          */
1630
1631         saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1632 retry:
1633         rcu_read_lock();
1634         dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1635         err = -ENODEV;
1636         if (dev == NULL)
1637                 goto out_unlock;
1638
1639         err = -ENETDOWN;
1640         if (!(dev->flags & IFF_UP))
1641                 goto out_unlock;
1642
1643         /*
1644          * You may not queue a frame bigger than the mtu. This is the lowest level
1645          * raw protocol and you must do your own fragmentation at this level.
1646          */
1647
1648         if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1649                 if (!netif_supports_nofcs(dev)) {
1650                         err = -EPROTONOSUPPORT;
1651                         goto out_unlock;
1652                 }
1653                 extra_len = 4; /* We're doing our own CRC */
1654         }
1655
1656         err = -EMSGSIZE;
1657         if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1658                 goto out_unlock;
1659
1660         if (!skb) {
1661                 size_t reserved = LL_RESERVED_SPACE(dev);
1662                 int tlen = dev->needed_tailroom;
1663                 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1664
1665                 rcu_read_unlock();
1666                 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1667                 if (skb == NULL)
1668                         return -ENOBUFS;
1669                 /* FIXME: Save some space for broken drivers that write a hard
1670                  * header at transmission time by themselves. PPP is the notable
1671                  * one here. This should really be fixed at the driver level.
1672                  */
1673                 skb_reserve(skb, reserved);
1674                 skb_reset_network_header(skb);
1675
1676                 /* Try to align data part correctly */
1677                 if (hhlen) {
1678                         skb->data -= hhlen;
1679                         skb->tail -= hhlen;
1680                         if (len < hhlen)
1681                                 skb_reset_network_header(skb);
1682                 }
1683                 err = memcpy_from_msg(skb_put(skb, len), msg, len);
1684                 if (err)
1685                         goto out_free;
1686                 goto retry;
1687         }
1688
1689         if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
1690                 /* Earlier code assumed this would be a VLAN pkt,
1691                  * double-check this now that we have the actual
1692                  * packet in hand.
1693                  */
1694                 struct ethhdr *ehdr;
1695                 skb_reset_mac_header(skb);
1696                 ehdr = eth_hdr(skb);
1697                 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1698                         err = -EMSGSIZE;
1699                         goto out_unlock;
1700                 }
1701         }
1702
1703         skb->protocol = proto;
1704         skb->dev = dev;
1705         skb->priority = sk->sk_priority;
1706         skb->mark = sk->sk_mark;
1707
1708         sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1709
1710         if (unlikely(extra_len == 4))
1711                 skb->no_fcs = 1;
1712
1713         skb_probe_transport_header(skb, 0);
1714
1715         dev_queue_xmit(skb);
1716         rcu_read_unlock();
1717         return len;
1718
1719 out_unlock:
1720         rcu_read_unlock();
1721 out_free:
1722         kfree_skb(skb);
1723         return err;
1724 }
1725
1726 static unsigned int run_filter(const struct sk_buff *skb,
1727                                       const struct sock *sk,
1728                                       unsigned int res)
1729 {
1730         struct sk_filter *filter;
1731
1732         rcu_read_lock();
1733         filter = rcu_dereference(sk->sk_filter);
1734         if (filter != NULL)
1735                 res = SK_RUN_FILTER(filter, skb);
1736         rcu_read_unlock();
1737
1738         return res;
1739 }
1740
1741 /*
1742  * This function makes lazy skb cloning in hope that most of packets
1743  * are discarded by BPF.
1744  *
1745  * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1746  * and skb->cb are mangled. It works because (and until) packets
1747  * falling here are owned by current CPU. Output packets are cloned
1748  * by dev_queue_xmit_nit(), input packets are processed by net_bh
1749  * sequencially, so that if we return skb to original state on exit,
1750  * we will not harm anyone.
1751  */
1752
1753 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1754                       struct packet_type *pt, struct net_device *orig_dev)
1755 {
1756         struct sock *sk;
1757         struct sockaddr_ll *sll;
1758         struct packet_sock *po;
1759         u8 *skb_head = skb->data;
1760         int skb_len = skb->len;
1761         unsigned int snaplen, res;
1762
1763         if (skb->pkt_type == PACKET_LOOPBACK)
1764                 goto drop;
1765
1766         sk = pt->af_packet_priv;
1767         po = pkt_sk(sk);
1768
1769         if (!net_eq(dev_net(dev), sock_net(sk)))
1770                 goto drop;
1771
1772         skb->dev = dev;
1773
1774         if (dev->header_ops) {
1775                 /* The device has an explicit notion of ll header,
1776                  * exported to higher levels.
1777                  *
1778                  * Otherwise, the device hides details of its frame
1779                  * structure, so that corresponding packet head is
1780                  * never delivered to user.
1781                  */
1782                 if (sk->sk_type != SOCK_DGRAM)
1783                         skb_push(skb, skb->data - skb_mac_header(skb));
1784                 else if (skb->pkt_type == PACKET_OUTGOING) {
1785                         /* Special case: outgoing packets have ll header at head */
1786                         skb_pull(skb, skb_network_offset(skb));
1787                 }
1788         }
1789
1790         snaplen = skb->len;
1791
1792         res = run_filter(skb, sk, snaplen);
1793         if (!res)
1794                 goto drop_n_restore;
1795         if (snaplen > res)
1796                 snaplen = res;
1797
1798         if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1799                 goto drop_n_acct;
1800
1801         if (skb_shared(skb)) {
1802                 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1803                 if (nskb == NULL)
1804                         goto drop_n_acct;
1805
1806                 if (skb_head != skb->data) {
1807                         skb->data = skb_head;
1808                         skb->len = skb_len;
1809                 }
1810                 consume_skb(skb);
1811                 skb = nskb;
1812         }
1813
1814         sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
1815
1816         sll = &PACKET_SKB_CB(skb)->sa.ll;
1817         sll->sll_hatype = dev->type;
1818         sll->sll_pkttype = skb->pkt_type;
1819         if (unlikely(po->origdev))
1820                 sll->sll_ifindex = orig_dev->ifindex;
1821         else
1822                 sll->sll_ifindex = dev->ifindex;
1823
1824         sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1825
1826         /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
1827          * Use their space for storing the original skb length.
1828          */
1829         PACKET_SKB_CB(skb)->sa.origlen = skb->len;
1830
1831         if (pskb_trim(skb, snaplen))
1832                 goto drop_n_acct;
1833
1834         skb_set_owner_r(skb, sk);
1835         skb->dev = NULL;
1836         skb_dst_drop(skb);
1837
1838         /* drop conntrack reference */
1839         nf_reset(skb);
1840
1841         spin_lock(&sk->sk_receive_queue.lock);
1842         po->stats.stats1.tp_packets++;
1843         sock_skb_set_dropcount(sk, skb);
1844         __skb_queue_tail(&sk->sk_receive_queue, skb);
1845         spin_unlock(&sk->sk_receive_queue.lock);
1846         sk->sk_data_ready(sk);
1847         return 0;
1848
1849 drop_n_acct:
1850         spin_lock(&sk->sk_receive_queue.lock);
1851         po->stats.stats1.tp_drops++;
1852         atomic_inc(&sk->sk_drops);
1853         spin_unlock(&sk->sk_receive_queue.lock);
1854
1855 drop_n_restore:
1856         if (skb_head != skb->data && skb_shared(skb)) {
1857                 skb->data = skb_head;
1858                 skb->len = skb_len;
1859         }
1860 drop:
1861         consume_skb(skb);
1862         return 0;
1863 }
1864
1865 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1866                        struct packet_type *pt, struct net_device *orig_dev)
1867 {
1868         struct sock *sk;
1869         struct packet_sock *po;
1870         struct sockaddr_ll *sll;
1871         union tpacket_uhdr h;
1872         u8 *skb_head = skb->data;
1873         int skb_len = skb->len;
1874         unsigned int snaplen, res;
1875         unsigned long status = TP_STATUS_USER;
1876         unsigned short macoff, netoff, hdrlen;
1877         struct sk_buff *copy_skb = NULL;
1878         struct timespec ts;
1879         __u32 ts_status;
1880
1881         /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
1882          * We may add members to them until current aligned size without forcing
1883          * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
1884          */
1885         BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
1886         BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
1887
1888         if (skb->pkt_type == PACKET_LOOPBACK)
1889                 goto drop;
1890
1891         sk = pt->af_packet_priv;
1892         po = pkt_sk(sk);
1893
1894         if (!net_eq(dev_net(dev), sock_net(sk)))
1895                 goto drop;
1896
1897         if (dev->header_ops) {
1898                 if (sk->sk_type != SOCK_DGRAM)
1899                         skb_push(skb, skb->data - skb_mac_header(skb));
1900                 else if (skb->pkt_type == PACKET_OUTGOING) {
1901                         /* Special case: outgoing packets have ll header at head */
1902                         skb_pull(skb, skb_network_offset(skb));
1903                 }
1904         }
1905
1906         snaplen = skb->len;
1907
1908         res = run_filter(skb, sk, snaplen);
1909         if (!res)
1910                 goto drop_n_restore;
1911
1912         if (skb->ip_summed == CHECKSUM_PARTIAL)
1913                 status |= TP_STATUS_CSUMNOTREADY;
1914         else if (skb->pkt_type != PACKET_OUTGOING &&
1915                  (skb->ip_summed == CHECKSUM_COMPLETE ||
1916                   skb_csum_unnecessary(skb)))
1917                 status |= TP_STATUS_CSUM_VALID;
1918
1919         if (snaplen > res)
1920                 snaplen = res;
1921
1922         if (sk->sk_type == SOCK_DGRAM) {
1923                 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1924                                   po->tp_reserve;
1925         } else {
1926                 unsigned int maclen = skb_network_offset(skb);
1927                 netoff = TPACKET_ALIGN(po->tp_hdrlen +
1928                                        (maclen < 16 ? 16 : maclen)) +
1929                         po->tp_reserve;
1930                 macoff = netoff - maclen;
1931         }
1932         if (po->tp_version <= TPACKET_V2) {
1933                 if (macoff + snaplen > po->rx_ring.frame_size) {
1934                         if (po->copy_thresh &&
1935                             atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1936                                 if (skb_shared(skb)) {
1937                                         copy_skb = skb_clone(skb, GFP_ATOMIC);
1938                                 } else {
1939                                         copy_skb = skb_get(skb);
1940                                         skb_head = skb->data;
1941                                 }
1942                                 if (copy_skb)
1943                                         skb_set_owner_r(copy_skb, sk);
1944                         }
1945                         snaplen = po->rx_ring.frame_size - macoff;
1946                         if ((int)snaplen < 0)
1947                                 snaplen = 0;
1948                 }
1949         } else if (unlikely(macoff + snaplen >
1950                             GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
1951                 u32 nval;
1952
1953                 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
1954                 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
1955                             snaplen, nval, macoff);
1956                 snaplen = nval;
1957                 if (unlikely((int)snaplen < 0)) {
1958                         snaplen = 0;
1959                         macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
1960                 }
1961         }
1962         spin_lock(&sk->sk_receive_queue.lock);
1963         h.raw = packet_current_rx_frame(po, skb,
1964                                         TP_STATUS_KERNEL, (macoff+snaplen));
1965         if (!h.raw)
1966                 goto ring_is_full;
1967         if (po->tp_version <= TPACKET_V2) {
1968                 packet_increment_rx_head(po, &po->rx_ring);
1969         /*
1970          * LOSING will be reported till you read the stats,
1971          * because it's COR - Clear On Read.
1972          * Anyways, moving it for V1/V2 only as V3 doesn't need this
1973          * at packet level.
1974          */
1975                 if (po->stats.stats1.tp_drops)
1976                         status |= TP_STATUS_LOSING;
1977         }
1978         po->stats.stats1.tp_packets++;
1979         if (copy_skb) {
1980                 status |= TP_STATUS_COPY;
1981                 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1982         }
1983         spin_unlock(&sk->sk_receive_queue.lock);
1984
1985         skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
1986
1987         if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
1988                 getnstimeofday(&ts);
1989
1990         status |= ts_status;
1991
1992         switch (po->tp_version) {
1993         case TPACKET_V1:
1994                 h.h1->tp_len = skb->len;
1995                 h.h1->tp_snaplen = snaplen;
1996                 h.h1->tp_mac = macoff;
1997                 h.h1->tp_net = netoff;
1998                 h.h1->tp_sec = ts.tv_sec;
1999                 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2000                 hdrlen = sizeof(*h.h1);
2001                 break;
2002         case TPACKET_V2:
2003                 h.h2->tp_len = skb->len;
2004                 h.h2->tp_snaplen = snaplen;
2005                 h.h2->tp_mac = macoff;
2006                 h.h2->tp_net = netoff;
2007                 h.h2->tp_sec = ts.tv_sec;
2008                 h.h2->tp_nsec = ts.tv_nsec;
2009                 if (skb_vlan_tag_present(skb)) {
2010                         h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2011                         h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2012                         status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2013                 } else {
2014                         h.h2->tp_vlan_tci = 0;
2015                         h.h2->tp_vlan_tpid = 0;
2016                 }
2017                 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2018                 hdrlen = sizeof(*h.h2);
2019                 break;
2020         case TPACKET_V3:
2021                 /* tp_nxt_offset,vlan are already populated above.
2022                  * So DONT clear those fields here
2023                  */
2024                 h.h3->tp_status |= status;
2025                 h.h3->tp_len = skb->len;
2026                 h.h3->tp_snaplen = snaplen;
2027                 h.h3->tp_mac = macoff;
2028                 h.h3->tp_net = netoff;
2029                 h.h3->tp_sec  = ts.tv_sec;
2030                 h.h3->tp_nsec = ts.tv_nsec;
2031                 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2032                 hdrlen = sizeof(*h.h3);
2033                 break;
2034         default:
2035                 BUG();
2036         }
2037
2038         sll = h.raw + TPACKET_ALIGN(hdrlen);
2039         sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2040         sll->sll_family = AF_PACKET;
2041         sll->sll_hatype = dev->type;
2042         sll->sll_protocol = skb->protocol;
2043         sll->sll_pkttype = skb->pkt_type;
2044         if (unlikely(po->origdev))
2045                 sll->sll_ifindex = orig_dev->ifindex;
2046         else
2047                 sll->sll_ifindex = dev->ifindex;
2048
2049         smp_mb();
2050
2051 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2052         if (po->tp_version <= TPACKET_V2) {
2053                 u8 *start, *end;
2054
2055                 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2056                                         macoff + snaplen);
2057
2058                 for (start = h.raw; start < end; start += PAGE_SIZE)
2059                         flush_dcache_page(pgv_to_page(start));
2060         }
2061         smp_wmb();
2062 #endif
2063
2064         if (po->tp_version <= TPACKET_V2) {
2065                 __packet_set_status(po, h.raw, status);
2066                 sk->sk_data_ready(sk);
2067         } else {
2068                 prb_clear_blk_fill_status(&po->rx_ring);
2069         }
2070
2071 drop_n_restore:
2072         if (skb_head != skb->data && skb_shared(skb)) {
2073                 skb->data = skb_head;
2074                 skb->len = skb_len;
2075         }
2076 drop:
2077         kfree_skb(skb);
2078         return 0;
2079
2080 ring_is_full:
2081         po->stats.stats1.tp_drops++;
2082         spin_unlock(&sk->sk_receive_queue.lock);
2083
2084         sk->sk_data_ready(sk);
2085         kfree_skb(copy_skb);
2086         goto drop_n_restore;
2087 }
2088
2089 static void tpacket_destruct_skb(struct sk_buff *skb)
2090 {
2091         struct packet_sock *po = pkt_sk(skb->sk);
2092
2093         if (likely(po->tx_ring.pg_vec)) {
2094                 void *ph;
2095                 __u32 ts;
2096
2097                 ph = skb_shinfo(skb)->destructor_arg;
2098                 packet_dec_pending(&po->tx_ring);
2099
2100                 ts = __packet_set_timestamp(po, ph, skb);
2101                 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2102         }
2103
2104         sock_wfree(skb);
2105 }
2106
2107 static bool ll_header_truncated(const struct net_device *dev, int len)
2108 {
2109         /* net device doesn't like empty head */
2110         if (unlikely(len <= dev->hard_header_len)) {
2111                 net_warn_ratelimited("%s: packet size is too short (%d <= %d)\n",
2112                                      current->comm, len, dev->hard_header_len);
2113                 return true;
2114         }
2115
2116         return false;
2117 }
2118
2119 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2120                 void *frame, struct net_device *dev, int size_max,
2121                 __be16 proto, unsigned char *addr, int hlen)
2122 {
2123         union tpacket_uhdr ph;
2124         int to_write, offset, len, tp_len, nr_frags, len_max;
2125         struct socket *sock = po->sk.sk_socket;
2126         struct page *page;
2127         void *data;
2128         int err;
2129
2130         ph.raw = frame;
2131
2132         skb->protocol = proto;
2133         skb->dev = dev;
2134         skb->priority = po->sk.sk_priority;
2135         skb->mark = po->sk.sk_mark;
2136         sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags);
2137         skb_shinfo(skb)->destructor_arg = ph.raw;
2138
2139         switch (po->tp_version) {
2140         case TPACKET_V2:
2141                 tp_len = ph.h2->tp_len;
2142                 break;
2143         default:
2144                 tp_len = ph.h1->tp_len;
2145                 break;
2146         }
2147         if (unlikely(tp_len > size_max)) {
2148                 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2149                 return -EMSGSIZE;
2150         }
2151
2152         skb_reserve(skb, hlen);
2153         skb_reset_network_header(skb);
2154
2155         if (!packet_use_direct_xmit(po))
2156                 skb_probe_transport_header(skb, 0);
2157         if (unlikely(po->tp_tx_has_off)) {
2158                 int off_min, off_max, off;
2159                 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2160                 off_max = po->tx_ring.frame_size - tp_len;
2161                 if (sock->type == SOCK_DGRAM) {
2162                         switch (po->tp_version) {
2163                         case TPACKET_V2:
2164                                 off = ph.h2->tp_net;
2165                                 break;
2166                         default:
2167                                 off = ph.h1->tp_net;
2168                                 break;
2169                         }
2170                 } else {
2171                         switch (po->tp_version) {
2172                         case TPACKET_V2:
2173                                 off = ph.h2->tp_mac;
2174                                 break;
2175                         default:
2176                                 off = ph.h1->tp_mac;
2177                                 break;
2178                         }
2179                 }
2180                 if (unlikely((off < off_min) || (off_max < off)))
2181                         return -EINVAL;
2182                 data = ph.raw + off;
2183         } else {
2184                 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
2185         }
2186         to_write = tp_len;
2187
2188         if (sock->type == SOCK_DGRAM) {
2189                 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2190                                 NULL, tp_len);
2191                 if (unlikely(err < 0))
2192                         return -EINVAL;
2193         } else if (dev->hard_header_len) {
2194                 if (ll_header_truncated(dev, tp_len))
2195                         return -EINVAL;
2196
2197                 skb_push(skb, dev->hard_header_len);
2198                 err = skb_store_bits(skb, 0, data,
2199                                 dev->hard_header_len);
2200                 if (unlikely(err))
2201                         return err;
2202
2203                 data += dev->hard_header_len;
2204                 to_write -= dev->hard_header_len;
2205         }
2206
2207         offset = offset_in_page(data);
2208         len_max = PAGE_SIZE - offset;
2209         len = ((to_write > len_max) ? len_max : to_write);
2210
2211         skb->data_len = to_write;
2212         skb->len += to_write;
2213         skb->truesize += to_write;
2214         atomic_add(to_write, &po->sk.sk_wmem_alloc);
2215
2216         while (likely(to_write)) {
2217                 nr_frags = skb_shinfo(skb)->nr_frags;
2218
2219                 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2220                         pr_err("Packet exceed the number of skb frags(%lu)\n",
2221                                MAX_SKB_FRAGS);
2222                         return -EFAULT;
2223                 }
2224
2225                 page = pgv_to_page(data);
2226                 data += len;
2227                 flush_dcache_page(page);
2228                 get_page(page);
2229                 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2230                 to_write -= len;
2231                 offset = 0;
2232                 len_max = PAGE_SIZE;
2233                 len = ((to_write > len_max) ? len_max : to_write);
2234         }
2235
2236         return tp_len;
2237 }
2238
2239 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2240 {
2241         struct sk_buff *skb;
2242         struct net_device *dev;
2243         __be16 proto;
2244         int err, reserve = 0;
2245         void *ph;
2246         DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2247         bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2248         int tp_len, size_max;
2249         unsigned char *addr;
2250         int len_sum = 0;
2251         int status = TP_STATUS_AVAILABLE;
2252         int hlen, tlen;
2253
2254         mutex_lock(&po->pg_vec_lock);
2255
2256         if (likely(saddr == NULL)) {
2257                 dev     = packet_cached_dev_get(po);
2258                 proto   = po->num;
2259                 addr    = NULL;
2260         } else {
2261                 err = -EINVAL;
2262                 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2263                         goto out;
2264                 if (msg->msg_namelen < (saddr->sll_halen
2265                                         + offsetof(struct sockaddr_ll,
2266                                                 sll_addr)))
2267                         goto out;
2268                 proto   = saddr->sll_protocol;
2269                 addr    = saddr->sll_addr;
2270                 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2271         }
2272
2273         err = -ENXIO;
2274         if (unlikely(dev == NULL))
2275                 goto out;
2276         err = -ENETDOWN;
2277         if (unlikely(!(dev->flags & IFF_UP)))
2278                 goto out_put;
2279
2280         reserve = dev->hard_header_len + VLAN_HLEN;
2281         size_max = po->tx_ring.frame_size
2282                 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2283
2284         if (size_max > dev->mtu + reserve)
2285                 size_max = dev->mtu + reserve;
2286
2287         do {
2288                 ph = packet_current_frame(po, &po->tx_ring,
2289                                           TP_STATUS_SEND_REQUEST);
2290                 if (unlikely(ph == NULL)) {
2291                         if (need_wait && need_resched())
2292                                 schedule();
2293                         continue;
2294                 }
2295
2296                 status = TP_STATUS_SEND_REQUEST;
2297                 hlen = LL_RESERVED_SPACE(dev);
2298                 tlen = dev->needed_tailroom;
2299                 skb = sock_alloc_send_skb(&po->sk,
2300                                 hlen + tlen + sizeof(struct sockaddr_ll),
2301                                 !need_wait, &err);
2302
2303                 if (unlikely(skb == NULL)) {
2304                         /* we assume the socket was initially writeable ... */
2305                         if (likely(len_sum > 0))
2306                                 err = len_sum;
2307                         goto out_status;
2308                 }
2309                 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2310                                           addr, hlen);
2311                 if (likely(tp_len >= 0) &&
2312                     tp_len > dev->mtu + dev->hard_header_len) {
2313                         struct ethhdr *ehdr;
2314                         /* Earlier code assumed this would be a VLAN pkt,
2315                          * double-check this now that we have the actual
2316                          * packet in hand.
2317                          */
2318
2319                         skb_reset_mac_header(skb);
2320                         ehdr = eth_hdr(skb);
2321                         if (ehdr->h_proto != htons(ETH_P_8021Q))
2322                                 tp_len = -EMSGSIZE;
2323                 }
2324                 if (unlikely(tp_len < 0)) {
2325                         if (po->tp_loss) {
2326                                 __packet_set_status(po, ph,
2327                                                 TP_STATUS_AVAILABLE);
2328                                 packet_increment_head(&po->tx_ring);
2329                                 kfree_skb(skb);
2330                                 continue;
2331                         } else {
2332                                 status = TP_STATUS_WRONG_FORMAT;
2333                                 err = tp_len;
2334                                 goto out_status;
2335                         }
2336                 }
2337
2338                 packet_pick_tx_queue(dev, skb);
2339
2340                 skb->destructor = tpacket_destruct_skb;
2341                 __packet_set_status(po, ph, TP_STATUS_SENDING);
2342                 packet_inc_pending(&po->tx_ring);
2343
2344                 status = TP_STATUS_SEND_REQUEST;
2345                 err = po->xmit(skb);
2346                 if (unlikely(err > 0)) {
2347                         err = net_xmit_errno(err);
2348                         if (err && __packet_get_status(po, ph) ==
2349                                    TP_STATUS_AVAILABLE) {
2350                                 /* skb was destructed already */
2351                                 skb = NULL;
2352                                 goto out_status;
2353                         }
2354                         /*
2355                          * skb was dropped but not destructed yet;
2356                          * let's treat it like congestion or err < 0
2357                          */
2358                         err = 0;
2359                 }
2360                 packet_increment_head(&po->tx_ring);
2361                 len_sum += tp_len;
2362         } while (likely((ph != NULL) ||
2363                 /* Note: packet_read_pending() might be slow if we have
2364                  * to call it as it's per_cpu variable, but in fast-path
2365                  * we already short-circuit the loop with the first
2366                  * condition, and luckily don't have to go that path
2367                  * anyway.
2368                  */
2369                  (need_wait && packet_read_pending(&po->tx_ring))));
2370
2371         err = len_sum;
2372         goto out_put;
2373
2374 out_status:
2375         __packet_set_status(po, ph, status);
2376         kfree_skb(skb);
2377 out_put:
2378         dev_put(dev);
2379 out:
2380         mutex_unlock(&po->pg_vec_lock);
2381         return err;
2382 }
2383
2384 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2385                                         size_t reserve, size_t len,
2386                                         size_t linear, int noblock,
2387                                         int *err)
2388 {
2389         struct sk_buff *skb;
2390
2391         /* Under a page?  Don't bother with paged skb. */
2392         if (prepad + len < PAGE_SIZE || !linear)
2393                 linear = len;
2394
2395         skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2396                                    err, 0);
2397         if (!skb)
2398                 return NULL;
2399
2400         skb_reserve(skb, reserve);
2401         skb_put(skb, linear);
2402         skb->data_len = len - linear;
2403         skb->len += len - linear;
2404
2405         return skb;
2406 }
2407
2408 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2409 {
2410         struct sock *sk = sock->sk;
2411         DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2412         struct sk_buff *skb;
2413         struct net_device *dev;
2414         __be16 proto;
2415         unsigned char *addr;
2416         int err, reserve = 0;
2417         struct virtio_net_hdr vnet_hdr = { 0 };
2418         int offset = 0;
2419         int vnet_hdr_len;
2420         struct packet_sock *po = pkt_sk(sk);
2421         unsigned short gso_type = 0;
2422         int hlen, tlen;
2423         int extra_len = 0;
2424         ssize_t n;
2425
2426         /*
2427          *      Get and verify the address.
2428          */
2429
2430         if (likely(saddr == NULL)) {
2431                 dev     = packet_cached_dev_get(po);
2432                 proto   = po->num;
2433                 addr    = NULL;
2434         } else {
2435                 err = -EINVAL;
2436                 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2437                         goto out;
2438                 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2439                         goto out;
2440                 proto   = saddr->sll_protocol;
2441                 addr    = saddr->sll_addr;
2442                 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2443         }
2444
2445         err = -ENXIO;
2446         if (unlikely(dev == NULL))
2447                 goto out_unlock;
2448         err = -ENETDOWN;
2449         if (unlikely(!(dev->flags & IFF_UP)))
2450                 goto out_unlock;
2451
2452         if (sock->type == SOCK_RAW)
2453                 reserve = dev->hard_header_len;
2454         if (po->has_vnet_hdr) {
2455                 vnet_hdr_len = sizeof(vnet_hdr);
2456
2457                 err = -EINVAL;
2458                 if (len < vnet_hdr_len)
2459                         goto out_unlock;
2460
2461                 len -= vnet_hdr_len;
2462
2463                 err = -EFAULT;
2464                 n = copy_from_iter(&vnet_hdr, vnet_hdr_len, &msg->msg_iter);
2465                 if (n != vnet_hdr_len)
2466                         goto out_unlock;
2467
2468                 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2469                     (__virtio16_to_cpu(false, vnet_hdr.csum_start) +
2470                      __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2 >
2471                       __virtio16_to_cpu(false, vnet_hdr.hdr_len)))
2472                         vnet_hdr.hdr_len = __cpu_to_virtio16(false,
2473                                  __virtio16_to_cpu(false, vnet_hdr.csum_start) +
2474                                 __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2);
2475
2476                 err = -EINVAL;
2477                 if (__virtio16_to_cpu(false, vnet_hdr.hdr_len) > len)
2478                         goto out_unlock;
2479
2480                 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2481                         switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2482                         case VIRTIO_NET_HDR_GSO_TCPV4:
2483                                 gso_type = SKB_GSO_TCPV4;
2484                                 break;
2485                         case VIRTIO_NET_HDR_GSO_TCPV6:
2486                                 gso_type = SKB_GSO_TCPV6;
2487                                 break;
2488                         case VIRTIO_NET_HDR_GSO_UDP:
2489                                 gso_type = SKB_GSO_UDP;
2490                                 break;
2491                         default:
2492                                 goto out_unlock;
2493                         }
2494
2495                         if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2496                                 gso_type |= SKB_GSO_TCP_ECN;
2497
2498                         if (vnet_hdr.gso_size == 0)
2499                                 goto out_unlock;
2500
2501                 }
2502         }
2503
2504         if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2505                 if (!netif_supports_nofcs(dev)) {
2506                         err = -EPROTONOSUPPORT;
2507                         goto out_unlock;
2508                 }
2509                 extra_len = 4; /* We're doing our own CRC */
2510         }
2511
2512         err = -EMSGSIZE;
2513         if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2514                 goto out_unlock;
2515
2516         err = -ENOBUFS;
2517         hlen = LL_RESERVED_SPACE(dev);
2518         tlen = dev->needed_tailroom;
2519         skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
2520                                __virtio16_to_cpu(false, vnet_hdr.hdr_len),
2521                                msg->msg_flags & MSG_DONTWAIT, &err);
2522         if (skb == NULL)
2523                 goto out_unlock;
2524
2525         skb_set_network_header(skb, reserve);
2526
2527         err = -EINVAL;
2528         if (sock->type == SOCK_DGRAM) {
2529                 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2530                 if (unlikely(offset < 0))
2531                         goto out_free;
2532         } else {
2533                 if (ll_header_truncated(dev, len))
2534                         goto out_free;
2535         }
2536
2537         /* Returns -EFAULT on error */
2538         err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2539         if (err)
2540                 goto out_free;
2541
2542         sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2543
2544         if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
2545                 /* Earlier code assumed this would be a VLAN pkt,
2546                  * double-check this now that we have the actual
2547                  * packet in hand.
2548                  */
2549                 struct ethhdr *ehdr;
2550                 skb_reset_mac_header(skb);
2551                 ehdr = eth_hdr(skb);
2552                 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2553                         err = -EMSGSIZE;
2554                         goto out_free;
2555                 }
2556         }
2557
2558         skb->protocol = proto;
2559         skb->dev = dev;
2560         skb->priority = sk->sk_priority;
2561         skb->mark = sk->sk_mark;
2562
2563         packet_pick_tx_queue(dev, skb);
2564
2565         if (po->has_vnet_hdr) {
2566                 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2567                         u16 s = __virtio16_to_cpu(false, vnet_hdr.csum_start);
2568                         u16 o = __virtio16_to_cpu(false, vnet_hdr.csum_offset);
2569                         if (!skb_partial_csum_set(skb, s, o)) {
2570                                 err = -EINVAL;
2571                                 goto out_free;
2572                         }
2573                 }
2574
2575                 skb_shinfo(skb)->gso_size =
2576                         __virtio16_to_cpu(false, vnet_hdr.gso_size);
2577                 skb_shinfo(skb)->gso_type = gso_type;
2578
2579                 /* Header must be checked, and gso_segs computed. */
2580                 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2581                 skb_shinfo(skb)->gso_segs = 0;
2582
2583                 len += vnet_hdr_len;
2584         }
2585
2586         if (!packet_use_direct_xmit(po))
2587                 skb_probe_transport_header(skb, reserve);
2588         if (unlikely(extra_len == 4))
2589                 skb->no_fcs = 1;
2590
2591         err = po->xmit(skb);
2592         if (err > 0 && (err = net_xmit_errno(err)) != 0)
2593                 goto out_unlock;
2594
2595         dev_put(dev);
2596
2597         return len;
2598
2599 out_free:
2600         kfree_skb(skb);
2601 out_unlock:
2602         if (dev)
2603                 dev_put(dev);
2604 out:
2605         return err;
2606 }
2607
2608 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2609 {
2610         struct sock *sk = sock->sk;
2611         struct packet_sock *po = pkt_sk(sk);
2612
2613         if (po->tx_ring.pg_vec)
2614                 return tpacket_snd(po, msg);
2615         else
2616                 return packet_snd(sock, msg, len);
2617 }
2618
2619 /*
2620  *      Close a PACKET socket. This is fairly simple. We immediately go
2621  *      to 'closed' state and remove our protocol entry in the device list.
2622  */
2623
2624 static int packet_release(struct socket *sock)
2625 {
2626         struct sock *sk = sock->sk;
2627         struct packet_sock *po;
2628         struct net *net;
2629         union tpacket_req_u req_u;
2630
2631         if (!sk)
2632                 return 0;
2633
2634         net = sock_net(sk);
2635         po = pkt_sk(sk);
2636
2637         mutex_lock(&net->packet.sklist_lock);
2638         sk_del_node_init_rcu(sk);
2639         mutex_unlock(&net->packet.sklist_lock);
2640
2641         preempt_disable();
2642         sock_prot_inuse_add(net, sk->sk_prot, -1);
2643         preempt_enable();
2644
2645         spin_lock(&po->bind_lock);
2646         unregister_prot_hook(sk, false);
2647         packet_cached_dev_reset(po);
2648
2649         if (po->prot_hook.dev) {
2650                 dev_put(po->prot_hook.dev);
2651                 po->prot_hook.dev = NULL;
2652         }
2653         spin_unlock(&po->bind_lock);
2654
2655         packet_flush_mclist(sk);
2656
2657         if (po->rx_ring.pg_vec) {
2658                 memset(&req_u, 0, sizeof(req_u));
2659                 packet_set_ring(sk, &req_u, 1, 0);
2660         }
2661
2662         if (po->tx_ring.pg_vec) {
2663                 memset(&req_u, 0, sizeof(req_u));
2664                 packet_set_ring(sk, &req_u, 1, 1);
2665         }
2666
2667         fanout_release(sk);
2668
2669         synchronize_net();
2670         /*
2671          *      Now the socket is dead. No more input will appear.
2672          */
2673         sock_orphan(sk);
2674         sock->sk = NULL;
2675
2676         /* Purge queues */
2677
2678         skb_queue_purge(&sk->sk_receive_queue);
2679         packet_free_pending(po);
2680         sk_refcnt_debug_release(sk);
2681
2682         sock_put(sk);
2683         return 0;
2684 }
2685
2686 /*
2687  *      Attach a packet hook.
2688  */
2689
2690 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
2691 {
2692         struct packet_sock *po = pkt_sk(sk);
2693         struct net_device *dev_curr;
2694         __be16 proto_curr;
2695         bool need_rehook;
2696
2697         if (po->fanout) {
2698                 if (dev)
2699                         dev_put(dev);
2700
2701                 return -EINVAL;
2702         }
2703
2704         lock_sock(sk);
2705         spin_lock(&po->bind_lock);
2706
2707         proto_curr = po->prot_hook.type;
2708         dev_curr = po->prot_hook.dev;
2709
2710         need_rehook = proto_curr != proto || dev_curr != dev;
2711
2712         if (need_rehook) {
2713                 unregister_prot_hook(sk, true);
2714
2715                 po->num = proto;
2716                 po->prot_hook.type = proto;
2717                 po->prot_hook.dev = dev;
2718
2719                 po->ifindex = dev ? dev->ifindex : 0;
2720                 packet_cached_dev_assign(po, dev);
2721         }
2722         if (dev_curr)
2723                 dev_put(dev_curr);
2724
2725         if (proto == 0 || !need_rehook)
2726                 goto out_unlock;
2727
2728         if (!dev || (dev->flags & IFF_UP)) {
2729                 register_prot_hook(sk);
2730         } else {
2731                 sk->sk_err = ENETDOWN;
2732                 if (!sock_flag(sk, SOCK_DEAD))
2733                         sk->sk_error_report(sk);
2734         }
2735
2736 out_unlock:
2737         spin_unlock(&po->bind_lock);
2738         release_sock(sk);
2739         return 0;
2740 }
2741
2742 /*
2743  *      Bind a packet socket to a device
2744  */
2745
2746 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2747                             int addr_len)
2748 {
2749         struct sock *sk = sock->sk;
2750         char name[15];
2751         struct net_device *dev;
2752         int err = -ENODEV;
2753
2754         /*
2755          *      Check legality
2756          */
2757
2758         if (addr_len != sizeof(struct sockaddr))
2759                 return -EINVAL;
2760         strlcpy(name, uaddr->sa_data, sizeof(name));
2761
2762         dev = dev_get_by_name(sock_net(sk), name);
2763         if (dev)
2764                 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
2765         return err;
2766 }
2767
2768 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2769 {
2770         struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2771         struct sock *sk = sock->sk;
2772         struct net_device *dev = NULL;
2773         int err;
2774
2775
2776         /*
2777          *      Check legality
2778          */
2779
2780         if (addr_len < sizeof(struct sockaddr_ll))
2781                 return -EINVAL;
2782         if (sll->sll_family != AF_PACKET)
2783                 return -EINVAL;
2784
2785         if (sll->sll_ifindex) {
2786                 err = -ENODEV;
2787                 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
2788                 if (dev == NULL)
2789                         goto out;
2790         }
2791         err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
2792
2793 out:
2794         return err;
2795 }
2796
2797 static struct proto packet_proto = {
2798         .name     = "PACKET",
2799         .owner    = THIS_MODULE,
2800         .obj_size = sizeof(struct packet_sock),
2801 };
2802
2803 /*
2804  *      Create a packet of type SOCK_PACKET.
2805  */
2806
2807 static int packet_create(struct net *net, struct socket *sock, int protocol,
2808                          int kern)
2809 {
2810         struct sock *sk;
2811         struct packet_sock *po;
2812         __be16 proto = (__force __be16)protocol; /* weird, but documented */
2813         int err;
2814
2815         if (!ns_capable(net->user_ns, CAP_NET_RAW))
2816                 return -EPERM;
2817         if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2818             sock->type != SOCK_PACKET)
2819                 return -ESOCKTNOSUPPORT;
2820
2821         sock->state = SS_UNCONNECTED;
2822
2823         err = -ENOBUFS;
2824         sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
2825         if (sk == NULL)
2826                 goto out;
2827
2828         sock->ops = &packet_ops;
2829         if (sock->type == SOCK_PACKET)
2830                 sock->ops = &packet_ops_spkt;
2831
2832         sock_init_data(sock, sk);
2833
2834         po = pkt_sk(sk);
2835         sk->sk_family = PF_PACKET;
2836         po->num = proto;
2837         po->xmit = dev_queue_xmit;
2838
2839         err = packet_alloc_pending(po);
2840         if (err)
2841                 goto out2;
2842
2843         packet_cached_dev_reset(po);
2844
2845         sk->sk_destruct = packet_sock_destruct;
2846         sk_refcnt_debug_inc(sk);
2847
2848         /*
2849          *      Attach a protocol block
2850          */
2851
2852         spin_lock_init(&po->bind_lock);
2853         mutex_init(&po->pg_vec_lock);
2854         po->prot_hook.func = packet_rcv;
2855
2856         if (sock->type == SOCK_PACKET)
2857                 po->prot_hook.func = packet_rcv_spkt;
2858
2859         po->prot_hook.af_packet_priv = sk;
2860
2861         if (proto) {
2862                 po->prot_hook.type = proto;
2863                 register_prot_hook(sk);
2864         }
2865
2866         mutex_lock(&net->packet.sklist_lock);
2867         sk_add_node_rcu(sk, &net->packet.sklist);
2868         mutex_unlock(&net->packet.sklist_lock);
2869
2870         preempt_disable();
2871         sock_prot_inuse_add(net, &packet_proto, 1);
2872         preempt_enable();
2873
2874         return 0;
2875 out2:
2876         sk_free(sk);
2877 out:
2878         return err;
2879 }
2880
2881 /*
2882  *      Pull a packet from our receive queue and hand it to the user.
2883  *      If necessary we block.
2884  */
2885
2886 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2887                           int flags)
2888 {
2889         struct sock *sk = sock->sk;
2890         struct sk_buff *skb;
2891         int copied, err;
2892         int vnet_hdr_len = 0;
2893         unsigned int origlen = 0;
2894
2895         err = -EINVAL;
2896         if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
2897                 goto out;
2898
2899 #if 0
2900         /* What error should we return now? EUNATTACH? */
2901         if (pkt_sk(sk)->ifindex < 0)
2902                 return -ENODEV;
2903 #endif
2904
2905         if (flags & MSG_ERRQUEUE) {
2906                 err = sock_recv_errqueue(sk, msg, len,
2907                                          SOL_PACKET, PACKET_TX_TIMESTAMP);
2908                 goto out;
2909         }
2910
2911         /*
2912          *      Call the generic datagram receiver. This handles all sorts
2913          *      of horrible races and re-entrancy so we can forget about it
2914          *      in the protocol layers.
2915          *
2916          *      Now it will return ENETDOWN, if device have just gone down,
2917          *      but then it will block.
2918          */
2919
2920         skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
2921
2922         /*
2923          *      An error occurred so return it. Because skb_recv_datagram()
2924          *      handles the blocking we don't see and worry about blocking
2925          *      retries.
2926          */
2927
2928         if (skb == NULL)
2929                 goto out;
2930
2931         if (pkt_sk(sk)->has_vnet_hdr) {
2932                 struct virtio_net_hdr vnet_hdr = { 0 };
2933
2934                 err = -EINVAL;
2935                 vnet_hdr_len = sizeof(vnet_hdr);
2936                 if (len < vnet_hdr_len)
2937                         goto out_free;
2938
2939                 len -= vnet_hdr_len;
2940
2941                 if (skb_is_gso(skb)) {
2942                         struct skb_shared_info *sinfo = skb_shinfo(skb);
2943
2944                         /* This is a hint as to how much should be linear. */
2945                         vnet_hdr.hdr_len =
2946                                 __cpu_to_virtio16(false, skb_headlen(skb));
2947                         vnet_hdr.gso_size =
2948                                 __cpu_to_virtio16(false, sinfo->gso_size);
2949                         if (sinfo->gso_type & SKB_GSO_TCPV4)
2950                                 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2951                         else if (sinfo->gso_type & SKB_GSO_TCPV6)
2952                                 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2953                         else if (sinfo->gso_type & SKB_GSO_UDP)
2954                                 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2955                         else if (sinfo->gso_type & SKB_GSO_FCOE)
2956                                 goto out_free;
2957                         else
2958                                 BUG();
2959                         if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2960                                 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2961                 } else
2962                         vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2963
2964                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2965                         vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
2966                         vnet_hdr.csum_start = __cpu_to_virtio16(false,
2967                                           skb_checksum_start_offset(skb));
2968                         vnet_hdr.csum_offset = __cpu_to_virtio16(false,
2969                                                          skb->csum_offset);
2970                 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2971                         vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
2972                 } /* else everything is zero */
2973
2974                 err = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_len);
2975                 if (err < 0)
2976                         goto out_free;
2977         }
2978
2979         /* You lose any data beyond the buffer you gave. If it worries
2980          * a user program they can ask the device for its MTU
2981          * anyway.
2982          */
2983         copied = skb->len;
2984         if (copied > len) {
2985                 copied = len;
2986                 msg->msg_flags |= MSG_TRUNC;
2987         }
2988
2989         err = skb_copy_datagram_msg(skb, 0, msg, copied);
2990         if (err)
2991                 goto out_free;
2992
2993         if (sock->type != SOCK_PACKET) {
2994                 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
2995
2996                 /* Original length was stored in sockaddr_ll fields */
2997                 origlen = PACKET_SKB_CB(skb)->sa.origlen;
2998                 sll->sll_family = AF_PACKET;
2999                 sll->sll_protocol = skb->protocol;
3000         }
3001
3002         sock_recv_ts_and_drops(msg, sk, skb);
3003
3004         if (msg->msg_name) {
3005                 /* If the address length field is there to be filled
3006                  * in, we fill it in now.
3007                  */
3008                 if (sock->type == SOCK_PACKET) {
3009                         __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3010                         msg->msg_namelen = sizeof(struct sockaddr_pkt);
3011                 } else {
3012                         struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3013
3014                         msg->msg_namelen = sll->sll_halen +
3015                                 offsetof(struct sockaddr_ll, sll_addr);
3016                 }
3017                 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
3018                        msg->msg_namelen);
3019         }
3020
3021         if (pkt_sk(sk)->auxdata) {
3022                 struct tpacket_auxdata aux;
3023
3024                 aux.tp_status = TP_STATUS_USER;
3025                 if (skb->ip_summed == CHECKSUM_PARTIAL)
3026                         aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3027                 else if (skb->pkt_type != PACKET_OUTGOING &&
3028                          (skb->ip_summed == CHECKSUM_COMPLETE ||
3029                           skb_csum_unnecessary(skb)))
3030                         aux.tp_status |= TP_STATUS_CSUM_VALID;
3031
3032                 aux.tp_len = origlen;
3033                 aux.tp_snaplen = skb->len;
3034                 aux.tp_mac = 0;
3035                 aux.tp_net = skb_network_offset(skb);
3036                 if (skb_vlan_tag_present(skb)) {
3037                         aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3038                         aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3039                         aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3040                 } else {
3041                         aux.tp_vlan_tci = 0;
3042                         aux.tp_vlan_tpid = 0;
3043                 }
3044                 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3045         }
3046
3047         /*
3048          *      Free or return the buffer as appropriate. Again this
3049          *      hides all the races and re-entrancy issues from us.
3050          */
3051         err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3052
3053 out_free:
3054         skb_free_datagram(sk, skb);
3055 out:
3056         return err;
3057 }
3058
3059 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3060                                int *uaddr_len, int peer)
3061 {
3062         struct net_device *dev;
3063         struct sock *sk = sock->sk;
3064
3065         if (peer)
3066                 return -EOPNOTSUPP;
3067
3068         uaddr->sa_family = AF_PACKET;
3069         memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3070         rcu_read_lock();
3071         dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3072         if (dev)
3073                 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3074         rcu_read_unlock();
3075         *uaddr_len = sizeof(*uaddr);
3076
3077         return 0;
3078 }
3079
3080 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3081                           int *uaddr_len, int peer)
3082 {
3083         struct net_device *dev;
3084         struct sock *sk = sock->sk;
3085         struct packet_sock *po = pkt_sk(sk);
3086         DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3087
3088         if (peer)
3089                 return -EOPNOTSUPP;
3090
3091         sll->sll_family = AF_PACKET;
3092         sll->sll_ifindex = po->ifindex;
3093         sll->sll_protocol = po->num;
3094         sll->sll_pkttype = 0;
3095         rcu_read_lock();
3096         dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3097         if (dev) {
3098                 sll->sll_hatype = dev->type;
3099                 sll->sll_halen = dev->addr_len;
3100                 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3101         } else {
3102                 sll->sll_hatype = 0;    /* Bad: we have no ARPHRD_UNSPEC */
3103                 sll->sll_halen = 0;
3104         }
3105         rcu_read_unlock();
3106         *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3107
3108         return 0;
3109 }
3110
3111 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3112                          int what)
3113 {
3114         switch (i->type) {
3115         case PACKET_MR_MULTICAST:
3116                 if (i->alen != dev->addr_len)
3117                         return -EINVAL;
3118                 if (what > 0)
3119                         return dev_mc_add(dev, i->addr);
3120                 else
3121                         return dev_mc_del(dev, i->addr);
3122                 break;
3123         case PACKET_MR_PROMISC:
3124                 return dev_set_promiscuity(dev, what);
3125         case PACKET_MR_ALLMULTI:
3126                 return dev_set_allmulti(dev, what);
3127         case PACKET_MR_UNICAST:
3128                 if (i->alen != dev->addr_len)
3129                         return -EINVAL;
3130                 if (what > 0)
3131                         return dev_uc_add(dev, i->addr);
3132                 else
3133                         return dev_uc_del(dev, i->addr);
3134                 break;
3135         default:
3136                 break;
3137         }
3138         return 0;
3139 }
3140
3141 static void packet_dev_mclist_delete(struct net_device *dev,
3142                                      struct packet_mclist **mlp)
3143 {
3144         struct packet_mclist *ml;
3145
3146         while ((ml = *mlp) != NULL) {
3147                 if (ml->ifindex == dev->ifindex) {
3148                         packet_dev_mc(dev, ml, -1);
3149                         *mlp = ml->next;
3150                         kfree(ml);
3151                 } else
3152                         mlp = &ml->next;
3153         }
3154 }
3155
3156 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3157 {
3158         struct packet_sock *po = pkt_sk(sk);
3159         struct packet_mclist *ml, *i;
3160         struct net_device *dev;
3161         int err;
3162
3163         rtnl_lock();
3164
3165         err = -ENODEV;
3166         dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3167         if (!dev)
3168                 goto done;
3169
3170         err = -EINVAL;
3171         if (mreq->mr_alen > dev->addr_len)
3172                 goto done;
3173
3174         err = -ENOBUFS;
3175         i = kmalloc(sizeof(*i), GFP_KERNEL);
3176         if (i == NULL)
3177                 goto done;
3178
3179         err = 0;
3180         for (ml = po->mclist; ml; ml = ml->next) {
3181                 if (ml->ifindex == mreq->mr_ifindex &&
3182                     ml->type == mreq->mr_type &&
3183                     ml->alen == mreq->mr_alen &&
3184                     memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3185                         ml->count++;
3186                         /* Free the new element ... */
3187                         kfree(i);
3188                         goto done;
3189                 }
3190         }
3191
3192         i->type = mreq->mr_type;
3193         i->ifindex = mreq->mr_ifindex;
3194         i->alen = mreq->mr_alen;
3195         memcpy(i->addr, mreq->mr_address, i->alen);
3196         i->count = 1;
3197         i->next = po->mclist;
3198         po->mclist = i;
3199         err = packet_dev_mc(dev, i, 1);
3200         if (err) {
3201                 po->mclist = i->next;
3202                 kfree(i);
3203         }
3204
3205 done:
3206         rtnl_unlock();
3207         return err;
3208 }
3209
3210 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3211 {
3212         struct packet_mclist *ml, **mlp;
3213
3214         rtnl_lock();
3215
3216         for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3217                 if (ml->ifindex == mreq->mr_ifindex &&
3218                     ml->type == mreq->mr_type &&
3219                     ml->alen == mreq->mr_alen &&
3220                     memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3221                         if (--ml->count == 0) {
3222                                 struct net_device *dev;
3223                                 *mlp = ml->next;
3224                                 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3225                                 if (dev)
3226                                         packet_dev_mc(dev, ml, -1);
3227                                 kfree(ml);
3228                         }
3229                         break;
3230                 }
3231         }
3232         rtnl_unlock();
3233         return 0;
3234 }
3235
3236 static void packet_flush_mclist(struct sock *sk)
3237 {
3238         struct packet_sock *po = pkt_sk(sk);
3239         struct packet_mclist *ml;
3240
3241         if (!po->mclist)
3242                 return;
3243
3244         rtnl_lock();
3245         while ((ml = po->mclist) != NULL) {
3246                 struct net_device *dev;
3247
3248                 po->mclist = ml->next;
3249                 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3250                 if (dev != NULL)
3251                         packet_dev_mc(dev, ml, -1);
3252                 kfree(ml);
3253         }
3254         rtnl_unlock();
3255 }
3256
3257 static int
3258 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
3259 {
3260         struct sock *sk = sock->sk;
3261         struct packet_sock *po = pkt_sk(sk);
3262         int ret;
3263
3264         if (level != SOL_PACKET)
3265                 return -ENOPROTOOPT;
3266
3267         switch (optname) {
3268         case PACKET_ADD_MEMBERSHIP:
3269         case PACKET_DROP_MEMBERSHIP:
3270         {
3271                 struct packet_mreq_max mreq;
3272                 int len = optlen;
3273                 memset(&mreq, 0, sizeof(mreq));
3274                 if (len < sizeof(struct packet_mreq))
3275                         return -EINVAL;
3276                 if (len > sizeof(mreq))
3277                         len = sizeof(mreq);
3278                 if (copy_from_user(&mreq, optval, len))
3279                         return -EFAULT;
3280                 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3281                         return -EINVAL;
3282                 if (optname == PACKET_ADD_MEMBERSHIP)
3283                         ret = packet_mc_add(sk, &mreq);
3284                 else
3285                         ret = packet_mc_drop(sk, &mreq);
3286                 return ret;
3287         }
3288
3289         case PACKET_RX_RING:
3290         case PACKET_TX_RING:
3291         {
3292                 union tpacket_req_u req_u;
3293                 int len;
3294
3295                 switch (po->tp_version) {
3296                 case TPACKET_V1:
3297                 case TPACKET_V2:
3298                         len = sizeof(req_u.req);
3299                         break;
3300                 case TPACKET_V3:
3301                 default:
3302                         len = sizeof(req_u.req3);
3303                         break;
3304                 }
3305                 if (optlen < len)
3306                         return -EINVAL;
3307                 if (pkt_sk(sk)->has_vnet_hdr)
3308                         return -EINVAL;
3309                 if (copy_from_user(&req_u.req, optval, len))
3310                         return -EFAULT;
3311                 return packet_set_ring(sk, &req_u, 0,
3312                         optname == PACKET_TX_RING);
3313         }
3314         case PACKET_COPY_THRESH:
3315         {
3316                 int val;
3317
3318                 if (optlen != sizeof(val))
3319                         return -EINVAL;
3320                 if (copy_from_user(&val, optval, sizeof(val)))
3321                         return -EFAULT;
3322
3323                 pkt_sk(sk)->copy_thresh = val;
3324                 return 0;
3325         }
3326         case PACKET_VERSION:
3327         {
3328                 int val;
3329
3330                 if (optlen != sizeof(val))
3331                         return -EINVAL;
3332                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3333                         return -EBUSY;
3334                 if (copy_from_user(&val, optval, sizeof(val)))
3335                         return -EFAULT;
3336                 switch (val) {
3337                 case TPACKET_V1:
3338                 case TPACKET_V2:
3339                 case TPACKET_V3:
3340                         po->tp_version = val;
3341                         return 0;
3342                 default:
3343                         return -EINVAL;
3344                 }
3345         }
3346         case PACKET_RESERVE:
3347         {
3348                 unsigned int val;
3349
3350                 if (optlen != sizeof(val))
3351                         return -EINVAL;
3352                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3353                         return -EBUSY;
3354                 if (copy_from_user(&val, optval, sizeof(val)))
3355                         return -EFAULT;
3356                 po->tp_reserve = val;
3357                 return 0;
3358         }
3359         case PACKET_LOSS:
3360         {
3361                 unsigned int val;
3362
3363                 if (optlen != sizeof(val))
3364                         return -EINVAL;
3365                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3366                         return -EBUSY;
3367                 if (copy_from_user(&val, optval, sizeof(val)))
3368                         return -EFAULT;
3369                 po->tp_loss = !!val;
3370                 return 0;
3371         }
3372         case PACKET_AUXDATA:
3373         {
3374                 int val;
3375
3376                 if (optlen < sizeof(val))
3377                         return -EINVAL;
3378                 if (copy_from_user(&val, optval, sizeof(val)))
3379                         return -EFAULT;
3380
3381                 po->auxdata = !!val;
3382                 return 0;
3383         }
3384         case PACKET_ORIGDEV:
3385         {
3386                 int val;
3387
3388                 if (optlen < sizeof(val))
3389                         return -EINVAL;
3390                 if (copy_from_user(&val, optval, sizeof(val)))
3391                         return -EFAULT;
3392
3393                 po->origdev = !!val;
3394                 return 0;
3395         }
3396         case PACKET_VNET_HDR:
3397         {
3398                 int val;
3399
3400                 if (sock->type != SOCK_RAW)
3401                         return -EINVAL;
3402                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3403                         return -EBUSY;
3404                 if (optlen < sizeof(val))
3405                         return -EINVAL;
3406                 if (copy_from_user(&val, optval, sizeof(val)))
3407                         return -EFAULT;
3408
3409                 po->has_vnet_hdr = !!val;
3410                 return 0;
3411         }
3412         case PACKET_TIMESTAMP:
3413         {
3414                 int val;
3415
3416                 if (optlen != sizeof(val))
3417                         return -EINVAL;
3418                 if (copy_from_user(&val, optval, sizeof(val)))
3419                         return -EFAULT;
3420
3421                 po->tp_tstamp = val;
3422                 return 0;
3423         }
3424         case PACKET_FANOUT:
3425         {
3426                 int val;
3427
3428                 if (optlen != sizeof(val))
3429                         return -EINVAL;
3430                 if (copy_from_user(&val, optval, sizeof(val)))
3431                         return -EFAULT;
3432
3433                 return fanout_add(sk, val & 0xffff, val >> 16);
3434         }
3435         case PACKET_TX_HAS_OFF:
3436         {
3437                 unsigned int val;
3438
3439                 if (optlen != sizeof(val))
3440                         return -EINVAL;
3441                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3442                         return -EBUSY;
3443                 if (copy_from_user(&val, optval, sizeof(val)))
3444                         return -EFAULT;
3445                 po->tp_tx_has_off = !!val;
3446                 return 0;
3447         }
3448         case PACKET_QDISC_BYPASS:
3449         {
3450                 int val;
3451
3452                 if (optlen != sizeof(val))
3453                         return -EINVAL;
3454                 if (copy_from_user(&val, optval, sizeof(val)))
3455                         return -EFAULT;
3456
3457                 po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3458                 return 0;
3459         }
3460         default:
3461                 return -ENOPROTOOPT;
3462         }
3463 }
3464
3465 static int packet_getsockopt(struct socket *sock, int level, int optname,
3466                              char __user *optval, int __user *optlen)
3467 {
3468         int len;
3469         int val, lv = sizeof(val);
3470         struct sock *sk = sock->sk;
3471         struct packet_sock *po = pkt_sk(sk);
3472         void *data = &val;
3473         union tpacket_stats_u st;
3474
3475         if (level != SOL_PACKET)
3476                 return -ENOPROTOOPT;
3477
3478         if (get_user(len, optlen))
3479                 return -EFAULT;
3480
3481         if (len < 0)
3482                 return -EINVAL;
3483
3484         switch (optname) {
3485         case PACKET_STATISTICS:
3486                 spin_lock_bh(&sk->sk_receive_queue.lock);
3487                 memcpy(&st, &po->stats, sizeof(st));
3488                 memset(&po->stats, 0, sizeof(po->stats));
3489                 spin_unlock_bh(&sk->sk_receive_queue.lock);
3490
3491                 if (po->tp_version == TPACKET_V3) {
3492                         lv = sizeof(struct tpacket_stats_v3);
3493                         st.stats3.tp_packets += st.stats3.tp_drops;
3494                         data = &st.stats3;
3495                 } else {
3496                         lv = sizeof(struct tpacket_stats);
3497                         st.stats1.tp_packets += st.stats1.tp_drops;
3498                         data = &st.stats1;
3499                 }
3500
3501                 break;
3502         case PACKET_AUXDATA:
3503                 val = po->auxdata;
3504                 break;
3505         case PACKET_ORIGDEV:
3506                 val = po->origdev;
3507                 break;
3508         case PACKET_VNET_HDR:
3509                 val = po->has_vnet_hdr;
3510                 break;
3511         case PACKET_VERSION:
3512                 val = po->tp_version;
3513                 break;
3514         case PACKET_HDRLEN:
3515                 if (len > sizeof(int))
3516                         len = sizeof(int);
3517                 if (copy_from_user(&val, optval, len))
3518                         return -EFAULT;
3519                 switch (val) {
3520                 case TPACKET_V1:
3521                         val = sizeof(struct tpacket_hdr);
3522                         break;
3523                 case TPACKET_V2:
3524                         val = sizeof(struct tpacket2_hdr);
3525                         break;
3526                 case TPACKET_V3:
3527                         val = sizeof(struct tpacket3_hdr);
3528                         break;
3529                 default:
3530                         return -EINVAL;
3531                 }
3532                 break;
3533         case PACKET_RESERVE:
3534                 val = po->tp_reserve;
3535                 break;
3536         case PACKET_LOSS:
3537                 val = po->tp_loss;
3538                 break;
3539         case PACKET_TIMESTAMP:
3540                 val = po->tp_tstamp;
3541                 break;
3542         case PACKET_FANOUT:
3543                 val = (po->fanout ?
3544                        ((u32)po->fanout->id |
3545                         ((u32)po->fanout->type << 16) |
3546                         ((u32)po->fanout->flags << 24)) :
3547                        0);
3548                 break;
3549         case PACKET_TX_HAS_OFF:
3550                 val = po->tp_tx_has_off;
3551                 break;
3552         case PACKET_QDISC_BYPASS:
3553                 val = packet_use_direct_xmit(po);
3554                 break;
3555         default:
3556                 return -ENOPROTOOPT;
3557         }
3558
3559         if (len > lv)
3560                 len = lv;
3561         if (put_user(len, optlen))
3562                 return -EFAULT;
3563         if (copy_to_user(optval, data, len))
3564                 return -EFAULT;
3565         return 0;
3566 }
3567
3568
3569 static int packet_notifier(struct notifier_block *this,
3570                            unsigned long msg, void *ptr)
3571 {
3572         struct sock *sk;
3573         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3574         struct net *net = dev_net(dev);
3575
3576         rcu_read_lock();
3577         sk_for_each_rcu(sk, &net->packet.sklist) {
3578                 struct packet_sock *po = pkt_sk(sk);
3579
3580                 switch (msg) {
3581                 case NETDEV_UNREGISTER:
3582                         if (po->mclist)
3583                                 packet_dev_mclist_delete(dev, &po->mclist);
3584                         /* fallthrough */
3585
3586                 case NETDEV_DOWN:
3587                         if (dev->ifindex == po->ifindex) {
3588                                 spin_lock(&po->bind_lock);
3589                                 if (po->running) {
3590                                         __unregister_prot_hook(sk, false);
3591                                         sk->sk_err = ENETDOWN;
3592                                         if (!sock_flag(sk, SOCK_DEAD))
3593                                                 sk->sk_error_report(sk);
3594                                 }
3595                                 if (msg == NETDEV_UNREGISTER) {
3596                                         packet_cached_dev_reset(po);
3597                                         po->ifindex = -1;
3598                                         if (po->prot_hook.dev)
3599                                                 dev_put(po->prot_hook.dev);
3600                                         po->prot_hook.dev = NULL;
3601                                 }
3602                                 spin_unlock(&po->bind_lock);
3603                         }
3604                         break;
3605                 case NETDEV_UP:
3606                         if (dev->ifindex == po->ifindex) {
3607                                 spin_lock(&po->bind_lock);
3608                                 if (po->num)
3609                                         register_prot_hook(sk);
3610                                 spin_unlock(&po->bind_lock);
3611                         }
3612                         break;
3613                 }
3614         }
3615         rcu_read_unlock();
3616         return NOTIFY_DONE;
3617 }
3618
3619
3620 static int packet_ioctl(struct socket *sock, unsigned int cmd,
3621                         unsigned long arg)
3622 {
3623         struct sock *sk = sock->sk;
3624
3625         switch (cmd) {
3626         case SIOCOUTQ:
3627         {
3628                 int amount = sk_wmem_alloc_get(sk);
3629
3630                 return put_user(amount, (int __user *)arg);
3631         }
3632         case SIOCINQ:
3633         {
3634                 struct sk_buff *skb;
3635                 int amount = 0;
3636
3637                 spin_lock_bh(&sk->sk_receive_queue.lock);
3638                 skb = skb_peek(&sk->sk_receive_queue);
3639                 if (skb)
3640                         amount = skb->len;
3641                 spin_unlock_bh(&sk->sk_receive_queue.lock);
3642                 return put_user(amount, (int __user *)arg);
3643         }
3644         case SIOCGSTAMP:
3645                 return sock_get_timestamp(sk, (struct timeval __user *)arg);
3646         case SIOCGSTAMPNS:
3647                 return sock_get_timestampns(sk, (struct timespec __user *)arg);
3648
3649 #ifdef CONFIG_INET
3650         case SIOCADDRT:
3651         case SIOCDELRT:
3652         case SIOCDARP:
3653         case SIOCGARP:
3654         case SIOCSARP:
3655         case SIOCGIFADDR:
3656         case SIOCSIFADDR:
3657         case SIOCGIFBRDADDR:
3658         case SIOCSIFBRDADDR:
3659         case SIOCGIFNETMASK:
3660         case SIOCSIFNETMASK:
3661         case SIOCGIFDSTADDR:
3662         case SIOCSIFDSTADDR:
3663         case SIOCSIFFLAGS:
3664                 return inet_dgram_ops.ioctl(sock, cmd, arg);
3665 #endif
3666
3667         default:
3668                 return -ENOIOCTLCMD;
3669         }
3670         return 0;
3671 }
3672
3673 static unsigned int packet_poll(struct file *file, struct socket *sock,
3674                                 poll_table *wait)
3675 {
3676         struct sock *sk = sock->sk;
3677         struct packet_sock *po = pkt_sk(sk);
3678         unsigned int mask = datagram_poll(file, sock, wait);
3679
3680         spin_lock_bh(&sk->sk_receive_queue.lock);
3681         if (po->rx_ring.pg_vec) {
3682                 if (!packet_previous_rx_frame(po, &po->rx_ring,
3683                         TP_STATUS_KERNEL))
3684                         mask |= POLLIN | POLLRDNORM;
3685         }
3686         spin_unlock_bh(&sk->sk_receive_queue.lock);
3687         spin_lock_bh(&sk->sk_write_queue.lock);
3688         if (po->tx_ring.pg_vec) {
3689                 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3690                         mask |= POLLOUT | POLLWRNORM;
3691         }
3692         spin_unlock_bh(&sk->sk_write_queue.lock);
3693         return mask;
3694 }
3695
3696
3697 /* Dirty? Well, I still did not learn better way to account
3698  * for user mmaps.
3699  */
3700
3701 static void packet_mm_open(struct vm_area_struct *vma)
3702 {
3703         struct file *file = vma->vm_file;
3704         struct socket *sock = file->private_data;
3705         struct sock *sk = sock->sk;
3706
3707         if (sk)
3708                 atomic_inc(&pkt_sk(sk)->mapped);
3709 }
3710
3711 static void packet_mm_close(struct vm_area_struct *vma)
3712 {
3713         struct file *file = vma->vm_file;
3714         struct socket *sock = file->private_data;
3715         struct sock *sk = sock->sk;
3716
3717         if (sk)
3718                 atomic_dec(&pkt_sk(sk)->mapped);
3719 }
3720
3721 static const struct vm_operations_struct packet_mmap_ops = {
3722         .open   =       packet_mm_open,
3723         .close  =       packet_mm_close,
3724 };
3725
3726 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3727                         unsigned int len)
3728 {
3729         int i;
3730
3731         for (i = 0; i < len; i++) {
3732                 if (likely(pg_vec[i].buffer)) {
3733                         if (is_vmalloc_addr(pg_vec[i].buffer))
3734                                 vfree(pg_vec[i].buffer);
3735                         else
3736                                 free_pages((unsigned long)pg_vec[i].buffer,
3737                                            order);
3738                         pg_vec[i].buffer = NULL;
3739                 }
3740         }
3741         kfree(pg_vec);
3742 }
3743
3744 static char *alloc_one_pg_vec_page(unsigned long order)
3745 {
3746         char *buffer;
3747         gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3748                           __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
3749
3750         buffer = (char *) __get_free_pages(gfp_flags, order);
3751         if (buffer)
3752                 return buffer;
3753
3754         /* __get_free_pages failed, fall back to vmalloc */
3755         buffer = vzalloc((1 << order) * PAGE_SIZE);
3756         if (buffer)
3757                 return buffer;
3758
3759         /* vmalloc failed, lets dig into swap here */
3760         gfp_flags &= ~__GFP_NORETRY;
3761         buffer = (char *) __get_free_pages(gfp_flags, order);
3762         if (buffer)
3763                 return buffer;
3764
3765         /* complete and utter failure */
3766         return NULL;
3767 }
3768
3769 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
3770 {
3771         unsigned int block_nr = req->tp_block_nr;
3772         struct pgv *pg_vec;
3773         int i;
3774
3775         pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
3776         if (unlikely(!pg_vec))
3777                 goto out;
3778
3779         for (i = 0; i < block_nr; i++) {
3780                 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
3781                 if (unlikely(!pg_vec[i].buffer))
3782                         goto out_free_pgvec;
3783         }
3784
3785 out:
3786         return pg_vec;
3787
3788 out_free_pgvec:
3789         free_pg_vec(pg_vec, order, block_nr);
3790         pg_vec = NULL;
3791         goto out;
3792 }
3793
3794 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3795                 int closing, int tx_ring)
3796 {
3797         struct pgv *pg_vec = NULL;
3798         struct packet_sock *po = pkt_sk(sk);
3799         int was_running, order = 0;
3800         struct packet_ring_buffer *rb;
3801         struct sk_buff_head *rb_queue;
3802         __be16 num;
3803         int err = -EINVAL;
3804         /* Added to avoid minimal code churn */
3805         struct tpacket_req *req = &req_u->req;
3806
3807         /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3808         if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3809                 WARN(1, "Tx-ring is not supported.\n");
3810                 goto out;
3811         }
3812
3813         rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3814         rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
3815
3816         err = -EBUSY;
3817         if (!closing) {
3818                 if (atomic_read(&po->mapped))
3819                         goto out;
3820                 if (packet_read_pending(rb))
3821                         goto out;
3822         }
3823
3824         if (req->tp_block_nr) {
3825                 /* Sanity tests and some calculations */
3826                 err = -EBUSY;
3827                 if (unlikely(rb->pg_vec))
3828                         goto out;
3829
3830                 switch (po->tp_version) {
3831                 case TPACKET_V1:
3832                         po->tp_hdrlen = TPACKET_HDRLEN;
3833                         break;
3834                 case TPACKET_V2:
3835                         po->tp_hdrlen = TPACKET2_HDRLEN;
3836                         break;
3837                 case TPACKET_V3:
3838                         po->tp_hdrlen = TPACKET3_HDRLEN;
3839                         break;
3840                 }
3841
3842                 err = -EINVAL;
3843                 if (unlikely((int)req->tp_block_size <= 0))
3844                         goto out;
3845                 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
3846                         goto out;
3847                 if (po->tp_version >= TPACKET_V3 &&
3848                     (int)(req->tp_block_size -
3849                           BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
3850                         goto out;
3851                 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
3852                                         po->tp_reserve))
3853                         goto out;
3854                 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
3855                         goto out;
3856
3857                 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3858                 if (unlikely(rb->frames_per_block <= 0))
3859                         goto out;
3860                 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3861                                         req->tp_frame_nr))
3862                         goto out;
3863
3864                 err = -ENOMEM;
3865                 order = get_order(req->tp_block_size);
3866                 pg_vec = alloc_pg_vec(req, order);
3867                 if (unlikely(!pg_vec))
3868                         goto out;
3869                 switch (po->tp_version) {
3870                 case TPACKET_V3:
3871                 /* Transmit path is not supported. We checked
3872                  * it above but just being paranoid
3873                  */
3874                         if (!tx_ring)
3875                                 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3876                         break;
3877                 default:
3878                         break;
3879                 }
3880         }
3881         /* Done */
3882         else {
3883                 err = -EINVAL;
3884                 if (unlikely(req->tp_frame_nr))
3885                         goto out;
3886         }
3887
3888         lock_sock(sk);
3889
3890         /* Detach socket from network */
3891         spin_lock(&po->bind_lock);
3892         was_running = po->running;
3893         num = po->num;
3894         if (was_running) {
3895                 po->num = 0;
3896                 __unregister_prot_hook(sk, false);
3897         }
3898         spin_unlock(&po->bind_lock);
3899
3900         synchronize_net();
3901
3902         err = -EBUSY;
3903         mutex_lock(&po->pg_vec_lock);
3904         if (closing || atomic_read(&po->mapped) == 0) {
3905                 err = 0;
3906                 spin_lock_bh(&rb_queue->lock);
3907                 swap(rb->pg_vec, pg_vec);
3908                 rb->frame_max = (req->tp_frame_nr - 1);
3909                 rb->head = 0;
3910                 rb->frame_size = req->tp_frame_size;
3911                 spin_unlock_bh(&rb_queue->lock);
3912
3913                 swap(rb->pg_vec_order, order);
3914                 swap(rb->pg_vec_len, req->tp_block_nr);
3915
3916                 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
3917                 po->prot_hook.func = (po->rx_ring.pg_vec) ?
3918                                                 tpacket_rcv : packet_rcv;
3919                 skb_queue_purge(rb_queue);
3920                 if (atomic_read(&po->mapped))
3921                         pr_err("packet_mmap: vma is busy: %d\n",
3922                                atomic_read(&po->mapped));
3923         }
3924         mutex_unlock(&po->pg_vec_lock);
3925
3926         spin_lock(&po->bind_lock);
3927         if (was_running) {
3928                 po->num = num;
3929                 register_prot_hook(sk);
3930         }
3931         spin_unlock(&po->bind_lock);
3932         if (closing && (po->tp_version > TPACKET_V2)) {
3933                 /* Because we don't support block-based V3 on tx-ring */
3934                 if (!tx_ring)
3935                         prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3936         }
3937         release_sock(sk);
3938
3939         if (pg_vec)
3940                 free_pg_vec(pg_vec, order, req->tp_block_nr);
3941 out:
3942         return err;
3943 }
3944
3945 static int packet_mmap(struct file *file, struct socket *sock,
3946                 struct vm_area_struct *vma)
3947 {
3948         struct sock *sk = sock->sk;
3949         struct packet_sock *po = pkt_sk(sk);
3950         unsigned long size, expected_size;
3951         struct packet_ring_buffer *rb;
3952         unsigned long start;
3953         int err = -EINVAL;
3954         int i;
3955
3956         if (vma->vm_pgoff)
3957                 return -EINVAL;
3958
3959         mutex_lock(&po->pg_vec_lock);
3960
3961         expected_size = 0;
3962         for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3963                 if (rb->pg_vec) {
3964                         expected_size += rb->pg_vec_len
3965                                                 * rb->pg_vec_pages
3966                                                 * PAGE_SIZE;
3967                 }
3968         }
3969
3970         if (expected_size == 0)
3971                 goto out;
3972
3973         size = vma->vm_end - vma->vm_start;
3974         if (size != expected_size)
3975                 goto out;
3976
3977         start = vma->vm_start;
3978         for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3979                 if (rb->pg_vec == NULL)
3980                         continue;
3981
3982                 for (i = 0; i < rb->pg_vec_len; i++) {
3983                         struct page *page;
3984                         void *kaddr = rb->pg_vec[i].buffer;
3985                         int pg_num;
3986
3987                         for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
3988                                 page = pgv_to_page(kaddr);
3989                                 err = vm_insert_page(vma, start, page);
3990                                 if (unlikely(err))
3991                                         goto out;
3992                                 start += PAGE_SIZE;
3993                                 kaddr += PAGE_SIZE;
3994                         }
3995                 }
3996         }
3997
3998         atomic_inc(&po->mapped);
3999         vma->vm_ops = &packet_mmap_ops;
4000         err = 0;
4001
4002 out:
4003         mutex_unlock(&po->pg_vec_lock);
4004         return err;
4005 }
4006
4007 static const struct proto_ops packet_ops_spkt = {
4008         .family =       PF_PACKET,
4009         .owner =        THIS_MODULE,
4010         .release =      packet_release,
4011         .bind =         packet_bind_spkt,
4012         .connect =      sock_no_connect,
4013         .socketpair =   sock_no_socketpair,
4014         .accept =       sock_no_accept,
4015         .getname =      packet_getname_spkt,
4016         .poll =         datagram_poll,
4017         .ioctl =        packet_ioctl,
4018         .listen =       sock_no_listen,
4019         .shutdown =     sock_no_shutdown,
4020         .setsockopt =   sock_no_setsockopt,
4021         .getsockopt =   sock_no_getsockopt,
4022         .sendmsg =      packet_sendmsg_spkt,
4023         .recvmsg =      packet_recvmsg,
4024         .mmap =         sock_no_mmap,
4025         .sendpage =     sock_no_sendpage,
4026 };
4027
4028 static const struct proto_ops packet_ops = {
4029         .family =       PF_PACKET,
4030         .owner =        THIS_MODULE,
4031         .release =      packet_release,
4032         .bind =         packet_bind,
4033         .connect =      sock_no_connect,
4034         .socketpair =   sock_no_socketpair,
4035         .accept =       sock_no_accept,
4036         .getname =      packet_getname,
4037         .poll =         packet_poll,
4038         .ioctl =        packet_ioctl,
4039         .listen =       sock_no_listen,
4040         .shutdown =     sock_no_shutdown,
4041         .setsockopt =   packet_setsockopt,
4042         .getsockopt =   packet_getsockopt,
4043         .sendmsg =      packet_sendmsg,
4044         .recvmsg =      packet_recvmsg,
4045         .mmap =         packet_mmap,
4046         .sendpage =     sock_no_sendpage,
4047 };
4048
4049 static const struct net_proto_family packet_family_ops = {
4050         .family =       PF_PACKET,
4051         .create =       packet_create,
4052         .owner  =       THIS_MODULE,
4053 };
4054
4055 static struct notifier_block packet_netdev_notifier = {
4056         .notifier_call =        packet_notifier,
4057 };
4058
4059 #ifdef CONFIG_PROC_FS
4060
4061 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4062         __acquires(RCU)
4063 {
4064         struct net *net = seq_file_net(seq);
4065
4066         rcu_read_lock();
4067         return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4068 }
4069
4070 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4071 {
4072         struct net *net = seq_file_net(seq);
4073         return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4074 }
4075
4076 static void packet_seq_stop(struct seq_file *seq, void *v)
4077         __releases(RCU)
4078 {
4079         rcu_read_unlock();
4080 }
4081
4082 static int packet_seq_show(struct seq_file *seq, void *v)
4083 {
4084         if (v == SEQ_START_TOKEN)
4085                 seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
4086         else {
4087                 struct sock *s = sk_entry(v);
4088                 const struct packet_sock *po = pkt_sk(s);
4089
4090                 seq_printf(seq,
4091                            "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4092                            s,
4093                            atomic_read(&s->sk_refcnt),
4094                            s->sk_type,
4095                            ntohs(po->num),
4096                            po->ifindex,
4097                            po->running,
4098                            atomic_read(&s->sk_rmem_alloc),
4099                            from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4100                            sock_i_ino(s));
4101         }
4102
4103         return 0;
4104 }
4105
4106 static const struct seq_operations packet_seq_ops = {
4107         .start  = packet_seq_start,
4108         .next   = packet_seq_next,
4109         .stop   = packet_seq_stop,
4110         .show   = packet_seq_show,
4111 };
4112
4113 static int packet_seq_open(struct inode *inode, struct file *file)
4114 {
4115         return seq_open_net(inode, file, &packet_seq_ops,
4116                             sizeof(struct seq_net_private));
4117 }
4118
4119 static const struct file_operations packet_seq_fops = {
4120         .owner          = THIS_MODULE,
4121         .open           = packet_seq_open,
4122         .read           = seq_read,
4123         .llseek         = seq_lseek,
4124         .release        = seq_release_net,
4125 };
4126
4127 #endif
4128
4129 static int __net_init packet_net_init(struct net *net)
4130 {
4131         mutex_init(&net->packet.sklist_lock);
4132         INIT_HLIST_HEAD(&net->packet.sklist);
4133
4134         if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
4135                 return -ENOMEM;
4136
4137         return 0;
4138 }
4139
4140 static void __net_exit packet_net_exit(struct net *net)
4141 {
4142         remove_proc_entry("packet", net->proc_net);
4143 }
4144
4145 static struct pernet_operations packet_net_ops = {
4146         .init = packet_net_init,
4147         .exit = packet_net_exit,
4148 };
4149
4150
4151 static void __exit packet_exit(void)
4152 {
4153         unregister_netdevice_notifier(&packet_netdev_notifier);
4154         unregister_pernet_subsys(&packet_net_ops);
4155         sock_unregister(PF_PACKET);
4156         proto_unregister(&packet_proto);
4157 }
4158
4159 static int __init packet_init(void)
4160 {
4161         int rc = proto_register(&packet_proto, 0);
4162
4163         if (rc != 0)
4164                 goto out;
4165
4166         sock_register(&packet_family_ops);
4167         register_pernet_subsys(&packet_net_ops);
4168         register_netdevice_notifier(&packet_netdev_notifier);
4169 out:
4170         return rc;
4171 }
4172
4173 module_init(packet_init);
4174 module_exit(packet_exit);
4175 MODULE_LICENSE("GPL");
4176 MODULE_ALIAS_NETPROTO(PF_PACKET);