~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/packet/af_packet.c

Version: ~ [ linux-5.16-rc3 ] ~ [ linux-5.15.5 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.82 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.162 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.218 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.256 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.291 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.293 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
  3  *              operating system.  INET is implemented using the  BSD Socket
  4  *              interface as the means of communication with the user level.
  5  *
  6  *              PACKET - implements raw packet sockets.
  7  *
  8  * Authors:     Ross Biro
  9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 10  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
 11  *
 12  * Fixes:
 13  *              Alan Cox        :       verify_area() now used correctly
 14  *              Alan Cox        :       new skbuff lists, look ma no backlogs!
 15  *              Alan Cox        :       tidied skbuff lists.
 16  *              Alan Cox        :       Now uses generic datagram routines I
 17  *                                      added. Also fixed the peek/read crash
 18  *                                      from all old Linux datagram code.
 19  *              Alan Cox        :       Uses the improved datagram code.
 20  *              Alan Cox        :       Added NULL's for socket options.
 21  *              Alan Cox        :       Re-commented the code.
 22  *              Alan Cox        :       Use new kernel side addressing
 23  *              Rob Janssen     :       Correct MTU usage.
 24  *              Dave Platt      :       Counter leaks caused by incorrect
 25  *                                      interrupt locking and some slightly
 26  *                                      dubious gcc output. Can you read
 27  *                                      compiler: it said _VOLATILE_
 28  *      Richard Kooijman        :       Timestamp fixes.
 29  *              Alan Cox        :       New buffers. Use sk->mac.raw.
 30  *              Alan Cox        :       sendmsg/recvmsg support.
 31  *              Alan Cox        :       Protocol setting support
 32  *      Alexey Kuznetsov        :       Untied from IPv4 stack.
 33  *      Cyrus Durgin            :       Fixed kerneld for kmod.
 34  *      Michal Ostrowski        :       Module initialization cleanup.
 35  *         Ulises Alonso        :       Frame number limit removal and
 36  *                                      packet_set_ring memory leak.
 37  *              Eric Biederman  :       Allow for > 8 byte hardware addresses.
 38  *                                      The convention is that longer addresses
 39  *                                      will simply extend the hardware address
 40  *                                      byte arrays at the end of sockaddr_ll
 41  *                                      and packet_mreq.
 42  *              Johann Baudy    :       Added TX RING.
 43  *              Chetan Loke     :       Implemented TPACKET_V3 block abstraction
 44  *                                      layer.
 45  *                                      Copyright (C) 2011, <lokec@ccs.neu.edu>
 46  *
 47  *
 48  *              This program is free software; you can redistribute it and/or
 49  *              modify it under the terms of the GNU General Public License
 50  *              as published by the Free Software Foundation; either version
 51  *              2 of the License, or (at your option) any later version.
 52  *
 53  */
 54 
 55 #include <linux/types.h>
 56 #include <linux/mm.h>
 57 #include <linux/capability.h>
 58 #include <linux/fcntl.h>
 59 #include <linux/socket.h>
 60 #include <linux/in.h>
 61 #include <linux/inet.h>
 62 #include <linux/netdevice.h>
 63 #include <linux/if_packet.h>
 64 #include <linux/wireless.h>
 65 #include <linux/kernel.h>
 66 #include <linux/kmod.h>
 67 #include <linux/slab.h>
 68 #include <linux/vmalloc.h>
 69 #include <net/net_namespace.h>
 70 #include <net/ip.h>
 71 #include <net/protocol.h>
 72 #include <linux/skbuff.h>
 73 #include <net/sock.h>
 74 #include <linux/errno.h>
 75 #include <linux/timer.h>
 76 #include <linux/uaccess.h>
 77 #include <asm/ioctls.h>
 78 #include <asm/page.h>
 79 #include <asm/cacheflush.h>
 80 #include <asm/io.h>
 81 #include <linux/proc_fs.h>
 82 #include <linux/seq_file.h>
 83 #include <linux/poll.h>
 84 #include <linux/module.h>
 85 #include <linux/init.h>
 86 #include <linux/mutex.h>
 87 #include <linux/if_vlan.h>
 88 #include <linux/virtio_net.h>
 89 #include <linux/errqueue.h>
 90 #include <linux/net_tstamp.h>
 91 #include <linux/percpu.h>
 92 #ifdef CONFIG_INET
 93 #include <net/inet_common.h>
 94 #endif
 95 #include <linux/bpf.h>
 96 #include <net/compat.h>
 97 
 98 #include "internal.h"
 99 
100 /*
101    Assumptions:
102    - if device has no dev->hard_header routine, it adds and removes ll header
103      inside itself. In this case ll header is invisible outside of device,
104      but higher levels still should reserve dev->hard_header_len.
105      Some devices are enough clever to reallocate skb, when header
106      will not fit to reserved space (tunnel), another ones are silly
107      (PPP).
108    - packet socket receives packets with pulled ll header,
109      so that SOCK_RAW should push it back.
110 
111 On receive:
112 -----------
113 
114 Incoming, dev->hard_header!=NULL
115    mac_header -> ll header
116    data       -> data
117 
118 Outgoing, dev->hard_header!=NULL
119    mac_header -> ll header
120    data       -> ll header
121 
122 Incoming, dev->hard_header==NULL
123    mac_header -> UNKNOWN position. It is very likely, that it points to ll
124                  header.  PPP makes it, that is wrong, because introduce
125                  assymetry between rx and tx paths.
126    data       -> data
127 
128 Outgoing, dev->hard_header==NULL
129    mac_header -> data. ll header is still not built!
130    data       -> data
131 
132 Resume
133   If dev->hard_header==NULL we are unlikely to restore sensible ll header.
134 
135 
136 On transmit:
137 ------------
138 
139 dev->hard_header != NULL
140    mac_header -> ll header
141    data       -> ll header
142 
143 dev->hard_header == NULL (ll header is added by device, we cannot control it)
144    mac_header -> data
145    data       -> data
146 
147    We should set nh.raw on output to correct posistion,
148    packet classifier depends on it.
149  */
150 
151 /* Private packet socket structures. */
152 
153 /* identical to struct packet_mreq except it has
154  * a longer address field.
155  */
156 struct packet_mreq_max {
157         int             mr_ifindex;
158         unsigned short  mr_type;
159         unsigned short  mr_alen;
160         unsigned char   mr_address[MAX_ADDR_LEN];
161 };
162 
163 union tpacket_uhdr {
164         struct tpacket_hdr  *h1;
165         struct tpacket2_hdr *h2;
166         struct tpacket3_hdr *h3;
167         void *raw;
168 };
169 
170 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
171                 int closing, int tx_ring);
172 
173 #define V3_ALIGNMENT    (8)
174 
175 #define BLK_HDR_LEN     (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
176 
177 #define BLK_PLUS_PRIV(sz_of_priv) \
178         (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
179 
180 #define PGV_FROM_VMALLOC 1
181 
182 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
183 #define BLOCK_NUM_PKTS(x)       ((x)->hdr.bh1.num_pkts)
184 #define BLOCK_O2FP(x)           ((x)->hdr.bh1.offset_to_first_pkt)
185 #define BLOCK_LEN(x)            ((x)->hdr.bh1.blk_len)
186 #define BLOCK_SNUM(x)           ((x)->hdr.bh1.seq_num)
187 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
188 #define BLOCK_PRIV(x)           ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
189 
190 struct packet_sock;
191 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
192                        struct packet_type *pt, struct net_device *orig_dev);
193 
194 static void *packet_previous_frame(struct packet_sock *po,
195                 struct packet_ring_buffer *rb,
196                 int status);
197 static void packet_increment_head(struct packet_ring_buffer *buff);
198 static int prb_curr_blk_in_use(struct tpacket_block_desc *);
199 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
200                         struct packet_sock *);
201 static void prb_retire_current_block(struct tpacket_kbdq_core *,
202                 struct packet_sock *, unsigned int status);
203 static int prb_queue_frozen(struct tpacket_kbdq_core *);
204 static void prb_open_block(struct tpacket_kbdq_core *,
205                 struct tpacket_block_desc *);
206 static void prb_retire_rx_blk_timer_expired(unsigned long);
207 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
208 static void prb_init_blk_timer(struct packet_sock *,
209                 struct tpacket_kbdq_core *,
210                 void (*func) (unsigned long));
211 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
212 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
213                 struct tpacket3_hdr *);
214 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
215                 struct tpacket3_hdr *);
216 static void packet_flush_mclist(struct sock *sk);
217 static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb);
218 
219 struct packet_skb_cb {
220         union {
221                 struct sockaddr_pkt pkt;
222                 union {
223                         /* Trick: alias skb original length with
224                          * ll.sll_family and ll.protocol in order
225                          * to save room.
226                          */
227                         unsigned int origlen;
228                         struct sockaddr_ll ll;
229                 };
230         } sa;
231 };
232 
233 #define vio_le() virtio_legacy_is_little_endian()
234 
235 #define PACKET_SKB_CB(__skb)    ((struct packet_skb_cb *)((__skb)->cb))
236 
237 #define GET_PBDQC_FROM_RB(x)    ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
238 #define GET_PBLOCK_DESC(x, bid) \
239         ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
240 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x)       \
241         ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
242 #define GET_NEXT_PRB_BLK_NUM(x) \
243         (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
244         ((x)->kactive_blk_num+1) : 0)
245 
246 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
247 static void __fanout_link(struct sock *sk, struct packet_sock *po);
248 
249 static int packet_direct_xmit(struct sk_buff *skb)
250 {
251         struct net_device *dev = skb->dev;
252         struct sk_buff *orig_skb = skb;
253         struct netdev_queue *txq;
254         int ret = NETDEV_TX_BUSY;
255 
256         if (unlikely(!netif_running(dev) ||
257                      !netif_carrier_ok(dev)))
258                 goto drop;
259 
260         skb = validate_xmit_skb_list(skb, dev);
261         if (skb != orig_skb)
262                 goto drop;
263 
264         packet_pick_tx_queue(dev, skb);
265         txq = skb_get_tx_queue(dev, skb);
266 
267         local_bh_disable();
268 
269         HARD_TX_LOCK(dev, txq, smp_processor_id());
270         if (!netif_xmit_frozen_or_drv_stopped(txq))
271                 ret = netdev_start_xmit(skb, dev, txq, false);
272         HARD_TX_UNLOCK(dev, txq);
273 
274         local_bh_enable();
275 
276         if (!dev_xmit_complete(ret))
277                 kfree_skb(skb);
278 
279         return ret;
280 drop:
281         atomic_long_inc(&dev->tx_dropped);
282         kfree_skb_list(skb);
283         return NET_XMIT_DROP;
284 }
285 
286 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
287 {
288         struct net_device *dev;
289 
290         rcu_read_lock();
291         dev = rcu_dereference(po->cached_dev);
292         if (likely(dev))
293                 dev_hold(dev);
294         rcu_read_unlock();
295 
296         return dev;
297 }
298 
299 static void packet_cached_dev_assign(struct packet_sock *po,
300                                      struct net_device *dev)
301 {
302         rcu_assign_pointer(po->cached_dev, dev);
303 }
304 
305 static void packet_cached_dev_reset(struct packet_sock *po)
306 {
307         RCU_INIT_POINTER(po->cached_dev, NULL);
308 }
309 
310 static bool packet_use_direct_xmit(const struct packet_sock *po)
311 {
312         return po->xmit == packet_direct_xmit;
313 }
314 
315 static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
316 {
317         return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
318 }
319 
320 static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
321 {
322         const struct net_device_ops *ops = dev->netdev_ops;
323         u16 queue_index;
324 
325         if (ops->ndo_select_queue) {
326                 queue_index = ops->ndo_select_queue(dev, skb, NULL,
327                                                     __packet_pick_tx_queue);
328                 queue_index = netdev_cap_txqueue(dev, queue_index);
329         } else {
330                 queue_index = __packet_pick_tx_queue(dev, skb);
331         }
332 
333         skb_set_queue_mapping(skb, queue_index);
334 }
335 
336 /* register_prot_hook must be invoked with the po->bind_lock held,
337  * or from a context in which asynchronous accesses to the packet
338  * socket is not possible (packet_create()).
339  */
340 static void register_prot_hook(struct sock *sk)
341 {
342         struct packet_sock *po = pkt_sk(sk);
343 
344         if (!po->running) {
345                 if (po->fanout)
346                         __fanout_link(sk, po);
347                 else
348                         dev_add_pack(&po->prot_hook);
349 
350                 sock_hold(sk);
351                 po->running = 1;
352         }
353 }
354 
355 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
356  * held.   If the sync parameter is true, we will temporarily drop
357  * the po->bind_lock and do a synchronize_net to make sure no
358  * asynchronous packet processing paths still refer to the elements
359  * of po->prot_hook.  If the sync parameter is false, it is the
360  * callers responsibility to take care of this.
361  */
362 static void __unregister_prot_hook(struct sock *sk, bool sync)
363 {
364         struct packet_sock *po = pkt_sk(sk);
365 
366         po->running = 0;
367 
368         if (po->fanout)
369                 __fanout_unlink(sk, po);
370         else
371                 __dev_remove_pack(&po->prot_hook);
372 
373         __sock_put(sk);
374 
375         if (sync) {
376                 spin_unlock(&po->bind_lock);
377                 synchronize_net();
378                 spin_lock(&po->bind_lock);
379         }
380 }
381 
382 static void unregister_prot_hook(struct sock *sk, bool sync)
383 {
384         struct packet_sock *po = pkt_sk(sk);
385 
386         if (po->running)
387                 __unregister_prot_hook(sk, sync);
388 }
389 
390 static inline struct page * __pure pgv_to_page(void *addr)
391 {
392         if (is_vmalloc_addr(addr))
393                 return vmalloc_to_page(addr);
394         return virt_to_page(addr);
395 }
396 
397 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
398 {
399         union tpacket_uhdr h;
400 
401         h.raw = frame;
402         switch (po->tp_version) {
403         case TPACKET_V1:
404                 h.h1->tp_status = status;
405                 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
406                 break;
407         case TPACKET_V2:
408                 h.h2->tp_status = status;
409                 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
410                 break;
411         case TPACKET_V3:
412                 h.h3->tp_status = status;
413                 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
414                 break;
415         default:
416                 WARN(1, "TPACKET version not supported.\n");
417                 BUG();
418         }
419 
420         smp_wmb();
421 }
422 
423 static int __packet_get_status(struct packet_sock *po, void *frame)
424 {
425         union tpacket_uhdr h;
426 
427         smp_rmb();
428 
429         h.raw = frame;
430         switch (po->tp_version) {
431         case TPACKET_V1:
432                 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
433                 return h.h1->tp_status;
434         case TPACKET_V2:
435                 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
436                 return h.h2->tp_status;
437         case TPACKET_V3:
438                 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
439                 return h.h3->tp_status;
440         default:
441                 WARN(1, "TPACKET version not supported.\n");
442                 BUG();
443                 return 0;
444         }
445 }
446 
447 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
448                                    unsigned int flags)
449 {
450         struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
451 
452         if (shhwtstamps &&
453             (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
454             ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
455                 return TP_STATUS_TS_RAW_HARDWARE;
456 
457         if (ktime_to_timespec_cond(skb->tstamp, ts))
458                 return TP_STATUS_TS_SOFTWARE;
459 
460         return 0;
461 }
462 
463 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
464                                     struct sk_buff *skb)
465 {
466         union tpacket_uhdr h;
467         struct timespec ts;
468         __u32 ts_status;
469 
470         if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
471                 return 0;
472 
473         h.raw = frame;
474         switch (po->tp_version) {
475         case TPACKET_V1:
476                 h.h1->tp_sec = ts.tv_sec;
477                 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
478                 break;
479         case TPACKET_V2:
480                 h.h2->tp_sec = ts.tv_sec;
481                 h.h2->tp_nsec = ts.tv_nsec;
482                 break;
483         case TPACKET_V3:
484                 h.h3->tp_sec = ts.tv_sec;
485                 h.h3->tp_nsec = ts.tv_nsec;
486                 break;
487         default:
488                 WARN(1, "TPACKET version not supported.\n");
489                 BUG();
490         }
491 
492         /* one flush is safe, as both fields always lie on the same cacheline */
493         flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
494         smp_wmb();
495 
496         return ts_status;
497 }
498 
499 static void *packet_lookup_frame(struct packet_sock *po,
500                 struct packet_ring_buffer *rb,
501                 unsigned int position,
502                 int status)
503 {
504         unsigned int pg_vec_pos, frame_offset;
505         union tpacket_uhdr h;
506 
507         pg_vec_pos = position / rb->frames_per_block;
508         frame_offset = position % rb->frames_per_block;
509 
510         h.raw = rb->pg_vec[pg_vec_pos].buffer +
511                 (frame_offset * rb->frame_size);
512 
513         if (status != __packet_get_status(po, h.raw))
514                 return NULL;
515 
516         return h.raw;
517 }
518 
519 static void *packet_current_frame(struct packet_sock *po,
520                 struct packet_ring_buffer *rb,
521                 int status)
522 {
523         return packet_lookup_frame(po, rb, rb->head, status);
524 }
525 
526 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
527 {
528         del_timer_sync(&pkc->retire_blk_timer);
529 }
530 
531 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
532                 struct sk_buff_head *rb_queue)
533 {
534         struct tpacket_kbdq_core *pkc;
535 
536         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
537 
538         spin_lock_bh(&rb_queue->lock);
539         pkc->delete_blk_timer = 1;
540         spin_unlock_bh(&rb_queue->lock);
541 
542         prb_del_retire_blk_timer(pkc);
543 }
544 
545 static void prb_init_blk_timer(struct packet_sock *po,
546                 struct tpacket_kbdq_core *pkc,
547                 void (*func) (unsigned long))
548 {
549         init_timer(&pkc->retire_blk_timer);
550         pkc->retire_blk_timer.data = (long)po;
551         pkc->retire_blk_timer.function = func;
552         pkc->retire_blk_timer.expires = jiffies;
553 }
554 
555 static void prb_setup_retire_blk_timer(struct packet_sock *po)
556 {
557         struct tpacket_kbdq_core *pkc;
558 
559         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
560         prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
561 }
562 
563 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
564                                 int blk_size_in_bytes)
565 {
566         struct net_device *dev;
567         unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
568         struct ethtool_link_ksettings ecmd;
569         int err;
570 
571         rtnl_lock();
572         dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
573         if (unlikely(!dev)) {
574                 rtnl_unlock();
575                 return DEFAULT_PRB_RETIRE_TOV;
576         }
577         err = __ethtool_get_link_ksettings(dev, &ecmd);
578         rtnl_unlock();
579         if (!err) {
580                 /*
581                  * If the link speed is so slow you don't really
582                  * need to worry about perf anyways
583                  */
584                 if (ecmd.base.speed < SPEED_1000 ||
585                     ecmd.base.speed == SPEED_UNKNOWN) {
586                         return DEFAULT_PRB_RETIRE_TOV;
587                 } else {
588                         msec = 1;
589                         div = ecmd.base.speed / 1000;
590                 }
591         }
592 
593         mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
594 
595         if (div)
596                 mbits /= div;
597 
598         tmo = mbits * msec;
599 
600         if (div)
601                 return tmo+1;
602         return tmo;
603 }
604 
605 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
606                         union tpacket_req_u *req_u)
607 {
608         p1->feature_req_word = req_u->req3.tp_feature_req_word;
609 }
610 
611 static void init_prb_bdqc(struct packet_sock *po,
612                         struct packet_ring_buffer *rb,
613                         struct pgv *pg_vec,
614                         union tpacket_req_u *req_u)
615 {
616         struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
617         struct tpacket_block_desc *pbd;
618 
619         memset(p1, 0x0, sizeof(*p1));
620 
621         p1->knxt_seq_num = 1;
622         p1->pkbdq = pg_vec;
623         pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
624         p1->pkblk_start = pg_vec[0].buffer;
625         p1->kblk_size = req_u->req3.tp_block_size;
626         p1->knum_blocks = req_u->req3.tp_block_nr;
627         p1->hdrlen = po->tp_hdrlen;
628         p1->version = po->tp_version;
629         p1->last_kactive_blk_num = 0;
630         po->stats.stats3.tp_freeze_q_cnt = 0;
631         if (req_u->req3.tp_retire_blk_tov)
632                 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
633         else
634                 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
635                                                 req_u->req3.tp_block_size);
636         p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
637         p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
638 
639         p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
640         prb_init_ft_ops(p1, req_u);
641         prb_setup_retire_blk_timer(po);
642         prb_open_block(p1, pbd);
643 }
644 
645 /*  Do NOT update the last_blk_num first.
646  *  Assumes sk_buff_head lock is held.
647  */
648 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
649 {
650         mod_timer(&pkc->retire_blk_timer,
651                         jiffies + pkc->tov_in_jiffies);
652         pkc->last_kactive_blk_num = pkc->kactive_blk_num;
653 }
654 
655 /*
656  * Timer logic:
657  * 1) We refresh the timer only when we open a block.
658  *    By doing this we don't waste cycles refreshing the timer
659  *        on packet-by-packet basis.
660  *
661  * With a 1MB block-size, on a 1Gbps line, it will take
662  * i) ~8 ms to fill a block + ii) memcpy etc.
663  * In this cut we are not accounting for the memcpy time.
664  *
665  * So, if the user sets the 'tmo' to 10ms then the timer
666  * will never fire while the block is still getting filled
667  * (which is what we want). However, the user could choose
668  * to close a block early and that's fine.
669  *
670  * But when the timer does fire, we check whether or not to refresh it.
671  * Since the tmo granularity is in msecs, it is not too expensive
672  * to refresh the timer, lets say every '8' msecs.
673  * Either the user can set the 'tmo' or we can derive it based on
674  * a) line-speed and b) block-size.
675  * prb_calc_retire_blk_tmo() calculates the tmo.
676  *
677  */
678 static void prb_retire_rx_blk_timer_expired(unsigned long data)
679 {
680         struct packet_sock *po = (struct packet_sock *)data;
681         struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
682         unsigned int frozen;
683         struct tpacket_block_desc *pbd;
684 
685         spin_lock(&po->sk.sk_receive_queue.lock);
686 
687         frozen = prb_queue_frozen(pkc);
688         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
689 
690         if (unlikely(pkc->delete_blk_timer))
691                 goto out;
692 
693         /* We only need to plug the race when the block is partially filled.
694          * tpacket_rcv:
695          *              lock(); increment BLOCK_NUM_PKTS; unlock()
696          *              copy_bits() is in progress ...
697          *              timer fires on other cpu:
698          *              we can't retire the current block because copy_bits
699          *              is in progress.
700          *
701          */
702         if (BLOCK_NUM_PKTS(pbd)) {
703                 while (atomic_read(&pkc->blk_fill_in_prog)) {
704                         /* Waiting for skb_copy_bits to finish... */
705                         cpu_relax();
706                 }
707         }
708 
709         if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
710                 if (!frozen) {
711                         if (!BLOCK_NUM_PKTS(pbd)) {
712                                 /* An empty block. Just refresh the timer. */
713                                 goto refresh_timer;
714                         }
715                         prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
716                         if (!prb_dispatch_next_block(pkc, po))
717                                 goto refresh_timer;
718                         else
719                                 goto out;
720                 } else {
721                         /* Case 1. Queue was frozen because user-space was
722                          *         lagging behind.
723                          */
724                         if (prb_curr_blk_in_use(pbd)) {
725                                 /*
726                                  * Ok, user-space is still behind.
727                                  * So just refresh the timer.
728                                  */
729                                 goto refresh_timer;
730                         } else {
731                                /* Case 2. queue was frozen,user-space caught up,
732                                 * now the link went idle && the timer fired.
733                                 * We don't have a block to close.So we open this
734                                 * block and restart the timer.
735                                 * opening a block thaws the queue,restarts timer
736                                 * Thawing/timer-refresh is a side effect.
737                                 */
738                                 prb_open_block(pkc, pbd);
739                                 goto out;
740                         }
741                 }
742         }
743 
744 refresh_timer:
745         _prb_refresh_rx_retire_blk_timer(pkc);
746 
747 out:
748         spin_unlock(&po->sk.sk_receive_queue.lock);
749 }
750 
751 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
752                 struct tpacket_block_desc *pbd1, __u32 status)
753 {
754         /* Flush everything minus the block header */
755 
756 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
757         u8 *start, *end;
758 
759         start = (u8 *)pbd1;
760 
761         /* Skip the block header(we know header WILL fit in 4K) */
762         start += PAGE_SIZE;
763 
764         end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
765         for (; start < end; start += PAGE_SIZE)
766                 flush_dcache_page(pgv_to_page(start));
767 
768         smp_wmb();
769 #endif
770 
771         /* Now update the block status. */
772 
773         BLOCK_STATUS(pbd1) = status;
774 
775         /* Flush the block header */
776 
777 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
778         start = (u8 *)pbd1;
779         flush_dcache_page(pgv_to_page(start));
780 
781         smp_wmb();
782 #endif
783 }
784 
785 /*
786  * Side effect:
787  *
788  * 1) flush the block
789  * 2) Increment active_blk_num
790  *
791  * Note:We DONT refresh the timer on purpose.
792  *      Because almost always the next block will be opened.
793  */
794 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
795                 struct tpacket_block_desc *pbd1,
796                 struct packet_sock *po, unsigned int stat)
797 {
798         __u32 status = TP_STATUS_USER | stat;
799 
800         struct tpacket3_hdr *last_pkt;
801         struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
802         struct sock *sk = &po->sk;
803 
804         if (po->stats.stats3.tp_drops)
805                 status |= TP_STATUS_LOSING;
806 
807         last_pkt = (struct tpacket3_hdr *)pkc1->prev;
808         last_pkt->tp_next_offset = 0;
809 
810         /* Get the ts of the last pkt */
811         if (BLOCK_NUM_PKTS(pbd1)) {
812                 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
813                 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
814         } else {
815                 /* Ok, we tmo'd - so get the current time.
816                  *
817                  * It shouldn't really happen as we don't close empty
818                  * blocks. See prb_retire_rx_blk_timer_expired().
819                  */
820                 struct timespec ts;
821                 getnstimeofday(&ts);
822                 h1->ts_last_pkt.ts_sec = ts.tv_sec;
823                 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
824         }
825 
826         smp_wmb();
827 
828         /* Flush the block */
829         prb_flush_block(pkc1, pbd1, status);
830 
831         sk->sk_data_ready(sk);
832 
833         pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
834 }
835 
836 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
837 {
838         pkc->reset_pending_on_curr_blk = 0;
839 }
840 
841 /*
842  * Side effect of opening a block:
843  *
844  * 1) prb_queue is thawed.
845  * 2) retire_blk_timer is refreshed.
846  *
847  */
848 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
849         struct tpacket_block_desc *pbd1)
850 {
851         struct timespec ts;
852         struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
853 
854         smp_rmb();
855 
856         /* We could have just memset this but we will lose the
857          * flexibility of making the priv area sticky
858          */
859 
860         BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
861         BLOCK_NUM_PKTS(pbd1) = 0;
862         BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
863 
864         getnstimeofday(&ts);
865 
866         h1->ts_first_pkt.ts_sec = ts.tv_sec;
867         h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
868 
869         pkc1->pkblk_start = (char *)pbd1;
870         pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
871 
872         BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
873         BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
874 
875         pbd1->version = pkc1->version;
876         pkc1->prev = pkc1->nxt_offset;
877         pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
878 
879         prb_thaw_queue(pkc1);
880         _prb_refresh_rx_retire_blk_timer(pkc1);
881 
882         smp_wmb();
883 }
884 
885 /*
886  * Queue freeze logic:
887  * 1) Assume tp_block_nr = 8 blocks.
888  * 2) At time 't0', user opens Rx ring.
889  * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
890  * 4) user-space is either sleeping or processing block ''.
891  * 5) tpacket_rcv is currently filling block '7', since there is no space left,
892  *    it will close block-7,loop around and try to fill block ''.
893  *    call-flow:
894  *    __packet_lookup_frame_in_block
895  *      prb_retire_current_block()
896  *      prb_dispatch_next_block()
897  *        |->(BLOCK_STATUS == USER) evaluates to true
898  *    5.1) Since block-0 is currently in-use, we just freeze the queue.
899  * 6) Now there are two cases:
900  *    6.1) Link goes idle right after the queue is frozen.
901  *         But remember, the last open_block() refreshed the timer.
902  *         When this timer expires,it will refresh itself so that we can
903  *         re-open block-0 in near future.
904  *    6.2) Link is busy and keeps on receiving packets. This is a simple
905  *         case and __packet_lookup_frame_in_block will check if block-0
906  *         is free and can now be re-used.
907  */
908 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
909                                   struct packet_sock *po)
910 {
911         pkc->reset_pending_on_curr_blk = 1;
912         po->stats.stats3.tp_freeze_q_cnt++;
913 }
914 
915 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
916 
917 /*
918  * If the next block is free then we will dispatch it
919  * and return a good offset.
920  * Else, we will freeze the queue.
921  * So, caller must check the return value.
922  */
923 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
924                 struct packet_sock *po)
925 {
926         struct tpacket_block_desc *pbd;
927 
928         smp_rmb();
929 
930         /* 1. Get current block num */
931         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
932 
933         /* 2. If this block is currently in_use then freeze the queue */
934         if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
935                 prb_freeze_queue(pkc, po);
936                 return NULL;
937         }
938 
939         /*
940          * 3.
941          * open this block and return the offset where the first packet
942          * needs to get stored.
943          */
944         prb_open_block(pkc, pbd);
945         return (void *)pkc->nxt_offset;
946 }
947 
948 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
949                 struct packet_sock *po, unsigned int status)
950 {
951         struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
952 
953         /* retire/close the current block */
954         if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
955                 /*
956                  * Plug the case where copy_bits() is in progress on
957                  * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
958                  * have space to copy the pkt in the current block and
959                  * called prb_retire_current_block()
960                  *
961                  * We don't need to worry about the TMO case because
962                  * the timer-handler already handled this case.
963                  */
964                 if (!(status & TP_STATUS_BLK_TMO)) {
965                         while (atomic_read(&pkc->blk_fill_in_prog)) {
966                                 /* Waiting for skb_copy_bits to finish... */
967                                 cpu_relax();
968                         }
969                 }
970                 prb_close_block(pkc, pbd, po, status);
971                 return;
972         }
973 }
974 
975 static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
976 {
977         return TP_STATUS_USER & BLOCK_STATUS(pbd);
978 }
979 
980 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
981 {
982         return pkc->reset_pending_on_curr_blk;
983 }
984 
985 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
986 {
987         struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
988         atomic_dec(&pkc->blk_fill_in_prog);
989 }
990 
991 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
992                         struct tpacket3_hdr *ppd)
993 {
994         ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
995 }
996 
997 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
998                         struct tpacket3_hdr *ppd)
999 {
1000         ppd->hv1.tp_rxhash = 0;
1001 }
1002 
1003 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
1004                         struct tpacket3_hdr *ppd)
1005 {
1006         if (skb_vlan_tag_present(pkc->skb)) {
1007                 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
1008                 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1009                 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1010         } else {
1011                 ppd->hv1.tp_vlan_tci = 0;
1012                 ppd->hv1.tp_vlan_tpid = 0;
1013                 ppd->tp_status = TP_STATUS_AVAILABLE;
1014         }
1015 }
1016 
1017 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
1018                         struct tpacket3_hdr *ppd)
1019 {
1020         ppd->hv1.tp_padding = 0;
1021         prb_fill_vlan_info(pkc, ppd);
1022 
1023         if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1024                 prb_fill_rxhash(pkc, ppd);
1025         else
1026                 prb_clear_rxhash(pkc, ppd);
1027 }
1028 
1029 static void prb_fill_curr_block(char *curr,
1030                                 struct tpacket_kbdq_core *pkc,
1031                                 struct tpacket_block_desc *pbd,
1032                                 unsigned int len)
1033 {
1034         struct tpacket3_hdr *ppd;
1035 
1036         ppd  = (struct tpacket3_hdr *)curr;
1037         ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1038         pkc->prev = curr;
1039         pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1040         BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1041         BLOCK_NUM_PKTS(pbd) += 1;
1042         atomic_inc(&pkc->blk_fill_in_prog);
1043         prb_run_all_ft_ops(pkc, ppd);
1044 }
1045 
1046 /* Assumes caller has the sk->rx_queue.lock */
1047 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1048                                             struct sk_buff *skb,
1049                                                 int status,
1050                                             unsigned int len
1051                                             )
1052 {
1053         struct tpacket_kbdq_core *pkc;
1054         struct tpacket_block_desc *pbd;
1055         char *curr, *end;
1056 
1057         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1058         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1059 
1060         /* Queue is frozen when user space is lagging behind */
1061         if (prb_queue_frozen(pkc)) {
1062                 /*
1063                  * Check if that last block which caused the queue to freeze,
1064                  * is still in_use by user-space.
1065                  */
1066                 if (prb_curr_blk_in_use(pbd)) {
1067                         /* Can't record this packet */
1068                         return NULL;
1069                 } else {
1070                         /*
1071                          * Ok, the block was released by user-space.
1072                          * Now let's open that block.
1073                          * opening a block also thaws the queue.
1074                          * Thawing is a side effect.
1075                          */
1076                         prb_open_block(pkc, pbd);
1077                 }
1078         }
1079 
1080         smp_mb();
1081         curr = pkc->nxt_offset;
1082         pkc->skb = skb;
1083         end = (char *)pbd + pkc->kblk_size;
1084 
1085         /* first try the current block */
1086         if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1087                 prb_fill_curr_block(curr, pkc, pbd, len);
1088                 return (void *)curr;
1089         }
1090 
1091         /* Ok, close the current block */
1092         prb_retire_current_block(pkc, po, 0);
1093 
1094         /* Now, try to dispatch the next block */
1095         curr = (char *)prb_dispatch_next_block(pkc, po);
1096         if (curr) {
1097                 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1098                 prb_fill_curr_block(curr, pkc, pbd, len);
1099                 return (void *)curr;
1100         }
1101 
1102         /*
1103          * No free blocks are available.user_space hasn't caught up yet.
1104          * Queue was just frozen and now this packet will get dropped.
1105          */
1106         return NULL;
1107 }
1108 
1109 static void *packet_current_rx_frame(struct packet_sock *po,
1110                                             struct sk_buff *skb,
1111                                             int status, unsigned int len)
1112 {
1113         char *curr = NULL;
1114         switch (po->tp_version) {
1115         case TPACKET_V1:
1116         case TPACKET_V2:
1117                 curr = packet_lookup_frame(po, &po->rx_ring,
1118                                         po->rx_ring.head, status);
1119                 return curr;
1120         case TPACKET_V3:
1121                 return __packet_lookup_frame_in_block(po, skb, status, len);
1122         default:
1123                 WARN(1, "TPACKET version not supported\n");
1124                 BUG();
1125                 return NULL;
1126         }
1127 }
1128 
1129 static void *prb_lookup_block(struct packet_sock *po,
1130                                      struct packet_ring_buffer *rb,
1131                                      unsigned int idx,
1132                                      int status)
1133 {
1134         struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1135         struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1136 
1137         if (status != BLOCK_STATUS(pbd))
1138                 return NULL;
1139         return pbd;
1140 }
1141 
1142 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1143 {
1144         unsigned int prev;
1145         if (rb->prb_bdqc.kactive_blk_num)
1146                 prev = rb->prb_bdqc.kactive_blk_num-1;
1147         else
1148                 prev = rb->prb_bdqc.knum_blocks-1;
1149         return prev;
1150 }
1151 
1152 /* Assumes caller has held the rx_queue.lock */
1153 static void *__prb_previous_block(struct packet_sock *po,
1154                                          struct packet_ring_buffer *rb,
1155                                          int status)
1156 {
1157         unsigned int previous = prb_previous_blk_num(rb);
1158         return prb_lookup_block(po, rb, previous, status);
1159 }
1160 
1161 static void *packet_previous_rx_frame(struct packet_sock *po,
1162                                              struct packet_ring_buffer *rb,
1163                                              int status)
1164 {
1165         if (po->tp_version <= TPACKET_V2)
1166                 return packet_previous_frame(po, rb, status);
1167 
1168         return __prb_previous_block(po, rb, status);
1169 }
1170 
1171 static void packet_increment_rx_head(struct packet_sock *po,
1172                                             struct packet_ring_buffer *rb)
1173 {
1174         switch (po->tp_version) {
1175         case TPACKET_V1:
1176         case TPACKET_V2:
1177                 return packet_increment_head(rb);
1178         case TPACKET_V3:
1179         default:
1180                 WARN(1, "TPACKET version not supported.\n");
1181                 BUG();
1182                 return;
1183         }
1184 }
1185 
1186 static void *packet_previous_frame(struct packet_sock *po,
1187                 struct packet_ring_buffer *rb,
1188                 int status)
1189 {
1190         unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1191         return packet_lookup_frame(po, rb, previous, status);
1192 }
1193 
1194 static void packet_increment_head(struct packet_ring_buffer *buff)
1195 {
1196         buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1197 }
1198 
1199 static void packet_inc_pending(struct packet_ring_buffer *rb)
1200 {
1201         this_cpu_inc(*rb->pending_refcnt);
1202 }
1203 
1204 static void packet_dec_pending(struct packet_ring_buffer *rb)
1205 {
1206         this_cpu_dec(*rb->pending_refcnt);
1207 }
1208 
1209 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1210 {
1211         unsigned int refcnt = 0;
1212         int cpu;
1213 
1214         /* We don't use pending refcount in rx_ring. */
1215         if (rb->pending_refcnt == NULL)
1216                 return 0;
1217 
1218         for_each_possible_cpu(cpu)
1219                 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1220 
1221         return refcnt;
1222 }
1223 
1224 static int packet_alloc_pending(struct packet_sock *po)
1225 {
1226         po->rx_ring.pending_refcnt = NULL;
1227 
1228         po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1229         if (unlikely(po->tx_ring.pending_refcnt == NULL))
1230                 return -ENOBUFS;
1231 
1232         return 0;
1233 }
1234 
1235 static void packet_free_pending(struct packet_sock *po)
1236 {
1237         free_percpu(po->tx_ring.pending_refcnt);
1238 }
1239 
1240 #define ROOM_POW_OFF    2
1241 #define ROOM_NONE       0x0
1242 #define ROOM_LOW        0x1
1243 #define ROOM_NORMAL     0x2
1244 
1245 static bool __tpacket_has_room(struct packet_sock *po, int pow_off)
1246 {
1247         int idx, len;
1248 
1249         len = po->rx_ring.frame_max + 1;
1250         idx = po->rx_ring.head;
1251         if (pow_off)
1252                 idx += len >> pow_off;
1253         if (idx >= len)
1254                 idx -= len;
1255         return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1256 }
1257 
1258 static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off)
1259 {
1260         int idx, len;
1261 
1262         len = po->rx_ring.prb_bdqc.knum_blocks;
1263         idx = po->rx_ring.prb_bdqc.kactive_blk_num;
1264         if (pow_off)
1265                 idx += len >> pow_off;
1266         if (idx >= len)
1267                 idx -= len;
1268         return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1269 }
1270 
1271 static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1272 {
1273         struct sock *sk = &po->sk;
1274         int ret = ROOM_NONE;
1275 
1276         if (po->prot_hook.func != tpacket_rcv) {
1277                 int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1278                                           - (skb ? skb->truesize : 0);
1279                 if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF))
1280                         return ROOM_NORMAL;
1281                 else if (avail > 0)
1282                         return ROOM_LOW;
1283                 else
1284                         return ROOM_NONE;
1285         }
1286 
1287         if (po->tp_version == TPACKET_V3) {
1288                 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1289                         ret = ROOM_NORMAL;
1290                 else if (__tpacket_v3_has_room(po, 0))
1291                         ret = ROOM_LOW;
1292         } else {
1293                 if (__tpacket_has_room(po, ROOM_POW_OFF))
1294                         ret = ROOM_NORMAL;
1295                 else if (__tpacket_has_room(po, 0))
1296                         ret = ROOM_LOW;
1297         }
1298 
1299         return ret;
1300 }
1301 
1302 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1303 {
1304         int ret;
1305         bool has_room;
1306 
1307         spin_lock_bh(&po->sk.sk_receive_queue.lock);
1308         ret = __packet_rcv_has_room(po, skb);
1309         has_room = ret == ROOM_NORMAL;
1310         if (po->pressure == has_room)
1311                 po->pressure = !has_room;
1312         spin_unlock_bh(&po->sk.sk_receive_queue.lock);
1313 
1314         return ret;
1315 }
1316 
1317 static void packet_sock_destruct(struct sock *sk)
1318 {
1319         skb_queue_purge(&sk->sk_error_queue);
1320 
1321         WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1322         WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1323 
1324         if (!sock_flag(sk, SOCK_DEAD)) {
1325                 pr_err("Attempt to release alive packet socket: %p\n", sk);
1326                 return;
1327         }
1328 
1329         sk_refcnt_debug_dec(sk);
1330 }
1331 
1332 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1333 {
1334         u32 rxhash;
1335         int i, count = 0;
1336 
1337         rxhash = skb_get_hash(skb);
1338         for (i = 0; i < ROLLOVER_HLEN; i++)
1339                 if (po->rollover->history[i] == rxhash)
1340                         count++;
1341 
1342         po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
1343         return count > (ROLLOVER_HLEN >> 1);
1344 }
1345 
1346 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1347                                       struct sk_buff *skb,
1348                                       unsigned int num)
1349 {
1350         return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1351 }
1352 
1353 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1354                                     struct sk_buff *skb,
1355                                     unsigned int num)
1356 {
1357         unsigned int val = atomic_inc_return(&f->rr_cur);
1358 
1359         return val % num;
1360 }
1361 
1362 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1363                                      struct sk_buff *skb,
1364                                      unsigned int num)
1365 {
1366         return smp_processor_id() % num;
1367 }
1368 
1369 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1370                                      struct sk_buff *skb,
1371                                      unsigned int num)
1372 {
1373         return prandom_u32_max(num);
1374 }
1375 
1376 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1377                                           struct sk_buff *skb,
1378                                           unsigned int idx, bool try_self,
1379                                           unsigned int num)
1380 {
1381         struct packet_sock *po, *po_next, *po_skip = NULL;
1382         unsigned int i, j, room = ROOM_NONE;
1383 
1384         po = pkt_sk(f->arr[idx]);
1385 
1386         if (try_self) {
1387                 room = packet_rcv_has_room(po, skb);
1388                 if (room == ROOM_NORMAL ||
1389                     (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1390                         return idx;
1391                 po_skip = po;
1392         }
1393 
1394         i = j = min_t(int, po->rollover->sock, num - 1);
1395         do {
1396                 po_next = pkt_sk(f->arr[i]);
1397                 if (po_next != po_skip && !po_next->pressure &&
1398                     packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1399                         if (i != j)
1400                                 po->rollover->sock = i;
1401                         atomic_long_inc(&po->rollover->num);
1402                         if (room == ROOM_LOW)
1403                                 atomic_long_inc(&po->rollover->num_huge);
1404                         return i;
1405                 }
1406 
1407                 if (++i == num)
1408                         i = 0;
1409         } while (i != j);
1410 
1411         atomic_long_inc(&po->rollover->num_failed);
1412         return idx;
1413 }
1414 
1415 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1416                                     struct sk_buff *skb,
1417                                     unsigned int num)
1418 {
1419         return skb_get_queue_mapping(skb) % num;
1420 }
1421 
1422 static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1423                                      struct sk_buff *skb,
1424                                      unsigned int num)
1425 {
1426         struct bpf_prog *prog;
1427         unsigned int ret = 0;
1428 
1429         rcu_read_lock();
1430         prog = rcu_dereference(f->bpf_prog);
1431         if (prog)
1432                 ret = bpf_prog_run_clear_cb(prog, skb) % num;
1433         rcu_read_unlock();
1434 
1435         return ret;
1436 }
1437 
1438 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1439 {
1440         return f->flags & (flag >> 8);
1441 }
1442 
1443 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1444                              struct packet_type *pt, struct net_device *orig_dev)
1445 {
1446         struct packet_fanout *f = pt->af_packet_priv;
1447         unsigned int num = READ_ONCE(f->num_members);
1448         struct net *net = read_pnet(&f->net);
1449         struct packet_sock *po;
1450         unsigned int idx;
1451 
1452         if (!net_eq(dev_net(dev), net) || !num) {
1453                 kfree_skb(skb);
1454                 return 0;
1455         }
1456 
1457         if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1458                 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1459                 if (!skb)
1460                         return 0;
1461         }
1462         switch (f->type) {
1463         case PACKET_FANOUT_HASH:
1464         default:
1465                 idx = fanout_demux_hash(f, skb, num);
1466                 break;
1467         case PACKET_FANOUT_LB:
1468                 idx = fanout_demux_lb(f, skb, num);
1469                 break;
1470         case PACKET_FANOUT_CPU:
1471                 idx = fanout_demux_cpu(f, skb, num);
1472                 break;
1473         case PACKET_FANOUT_RND:
1474                 idx = fanout_demux_rnd(f, skb, num);
1475                 break;
1476         case PACKET_FANOUT_QM:
1477                 idx = fanout_demux_qm(f, skb, num);
1478                 break;
1479         case PACKET_FANOUT_ROLLOVER:
1480                 idx = fanout_demux_rollover(f, skb, 0, false, num);
1481                 break;
1482         case PACKET_FANOUT_CBPF:
1483         case PACKET_FANOUT_EBPF:
1484                 idx = fanout_demux_bpf(f, skb, num);
1485                 break;
1486         }
1487 
1488         if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1489                 idx = fanout_demux_rollover(f, skb, idx, true, num);
1490 
1491         po = pkt_sk(f->arr[idx]);
1492         return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1493 }
1494 
1495 DEFINE_MUTEX(fanout_mutex);
1496 EXPORT_SYMBOL_GPL(fanout_mutex);
1497 static LIST_HEAD(fanout_list);
1498 static u16 fanout_next_id;
1499 
1500 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1501 {
1502         struct packet_fanout *f = po->fanout;
1503 
1504         spin_lock(&f->lock);
1505         f->arr[f->num_members] = sk;
1506         smp_wmb();
1507         f->num_members++;
1508         if (f->num_members == 1)
1509                 dev_add_pack(&f->prot_hook);
1510         spin_unlock(&f->lock);
1511 }
1512 
1513 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1514 {
1515         struct packet_fanout *f = po->fanout;
1516         int i;
1517 
1518         spin_lock(&f->lock);
1519         for (i = 0; i < f->num_members; i++) {
1520                 if (f->arr[i] == sk)
1521                         break;
1522         }
1523         BUG_ON(i >= f->num_members);
1524         f->arr[i] = f->arr[f->num_members - 1];
1525         f->num_members--;
1526         if (f->num_members == 0)
1527                 __dev_remove_pack(&f->prot_hook);
1528         spin_unlock(&f->lock);
1529 }
1530 
1531 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1532 {
1533         if (sk->sk_family != PF_PACKET)
1534                 return false;
1535 
1536         return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1537 }
1538 
1539 static void fanout_init_data(struct packet_fanout *f)
1540 {
1541         switch (f->type) {
1542         case PACKET_FANOUT_LB:
1543                 atomic_set(&f->rr_cur, 0);
1544                 break;
1545         case PACKET_FANOUT_CBPF:
1546         case PACKET_FANOUT_EBPF:
1547                 RCU_INIT_POINTER(f->bpf_prog, NULL);
1548                 break;
1549         }
1550 }
1551 
1552 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1553 {
1554         struct bpf_prog *old;
1555 
1556         spin_lock(&f->lock);
1557         old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1558         rcu_assign_pointer(f->bpf_prog, new);
1559         spin_unlock(&f->lock);
1560 
1561         if (old) {
1562                 synchronize_net();
1563                 bpf_prog_destroy(old);
1564         }
1565 }
1566 
1567 static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
1568                                 unsigned int len)
1569 {
1570         struct bpf_prog *new;
1571         struct sock_fprog fprog;
1572         int ret;
1573 
1574         if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1575                 return -EPERM;
1576         if (len != sizeof(fprog))
1577                 return -EINVAL;
1578         if (copy_from_user(&fprog, data, len))
1579                 return -EFAULT;
1580 
1581         ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1582         if (ret)
1583                 return ret;
1584 
1585         __fanout_set_data_bpf(po->fanout, new);
1586         return 0;
1587 }
1588 
1589 static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
1590                                 unsigned int len)
1591 {
1592         struct bpf_prog *new;
1593         u32 fd;
1594 
1595         if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1596                 return -EPERM;
1597         if (len != sizeof(fd))
1598                 return -EINVAL;
1599         if (copy_from_user(&fd, data, len))
1600                 return -EFAULT;
1601 
1602         new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1603         if (IS_ERR(new))
1604                 return PTR_ERR(new);
1605 
1606         __fanout_set_data_bpf(po->fanout, new);
1607         return 0;
1608 }
1609 
1610 static int fanout_set_data(struct packet_sock *po, char __user *data,
1611                            unsigned int len)
1612 {
1613         switch (po->fanout->type) {
1614         case PACKET_FANOUT_CBPF:
1615                 return fanout_set_data_cbpf(po, data, len);
1616         case PACKET_FANOUT_EBPF:
1617                 return fanout_set_data_ebpf(po, data, len);
1618         default:
1619                 return -EINVAL;
1620         };
1621 }
1622 
1623 static void fanout_release_data(struct packet_fanout *f)
1624 {
1625         switch (f->type) {
1626         case PACKET_FANOUT_CBPF:
1627         case PACKET_FANOUT_EBPF:
1628                 __fanout_set_data_bpf(f, NULL);
1629         };
1630 }
1631 
1632 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1633 {
1634         struct packet_fanout *f;
1635 
1636         list_for_each_entry(f, &fanout_list, list) {
1637                 if (f->id == candidate_id &&
1638                     read_pnet(&f->net) == sock_net(sk)) {
1639                         return false;
1640                 }
1641         }
1642         return true;
1643 }
1644 
1645 static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1646 {
1647         u16 id = fanout_next_id;
1648 
1649         do {
1650                 if (__fanout_id_is_free(sk, id)) {
1651                         *new_id = id;
1652                         fanout_next_id = id + 1;
1653                         return true;
1654                 }
1655 
1656                 id++;
1657         } while (id != fanout_next_id);
1658 
1659         return false;
1660 }
1661 
1662 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1663 {
1664         struct packet_rollover *rollover = NULL;
1665         struct packet_sock *po = pkt_sk(sk);
1666         struct packet_fanout *f, *match;
1667         u8 type = type_flags & 0xff;
1668         u8 flags = type_flags >> 8;
1669         int err;
1670 
1671         switch (type) {
1672         case PACKET_FANOUT_ROLLOVER:
1673                 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1674                         return -EINVAL;
1675         case PACKET_FANOUT_HASH:
1676         case PACKET_FANOUT_LB:
1677         case PACKET_FANOUT_CPU:
1678         case PACKET_FANOUT_RND:
1679         case PACKET_FANOUT_QM:
1680         case PACKET_FANOUT_CBPF:
1681         case PACKET_FANOUT_EBPF:
1682                 break;
1683         default:
1684                 return -EINVAL;
1685         }
1686 
1687         mutex_lock(&fanout_mutex);
1688 
1689         err = -EALREADY;
1690         if (po->fanout)
1691                 goto out;
1692 
1693         if (type == PACKET_FANOUT_ROLLOVER ||
1694             (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1695                 err = -ENOMEM;
1696                 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1697                 if (!rollover)
1698                         goto out;
1699                 atomic_long_set(&rollover->num, 0);
1700                 atomic_long_set(&rollover->num_huge, 0);
1701                 atomic_long_set(&rollover->num_failed, 0);
1702                 po->rollover = rollover;
1703         }
1704 
1705         if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1706                 if (id != 0) {
1707                         err = -EINVAL;
1708                         goto out;
1709                 }
1710                 if (!fanout_find_new_id(sk, &id)) {
1711                         err = -ENOMEM;
1712                         goto out;
1713                 }
1714                 /* ephemeral flag for the first socket in the group: drop it */
1715                 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1716         }
1717 
1718         match = NULL;
1719         list_for_each_entry(f, &fanout_list, list) {
1720                 if (f->id == id &&
1721                     read_pnet(&f->net) == sock_net(sk)) {
1722                         match = f;
1723                         break;
1724                 }
1725         }
1726         err = -EINVAL;
1727         if (match && match->flags != flags)
1728                 goto out;
1729         if (!match) {
1730                 err = -ENOMEM;
1731                 match = kzalloc(sizeof(*match), GFP_KERNEL);
1732                 if (!match)
1733                         goto out;
1734                 write_pnet(&match->net, sock_net(sk));
1735                 match->id = id;
1736                 match->type = type;
1737                 match->flags = flags;
1738                 INIT_LIST_HEAD(&match->list);
1739                 spin_lock_init(&match->lock);
1740                 refcount_set(&match->sk_ref, 0);
1741                 fanout_init_data(match);
1742                 match->prot_hook.type = po->prot_hook.type;
1743                 match->prot_hook.dev = po->prot_hook.dev;
1744                 match->prot_hook.func = packet_rcv_fanout;
1745                 match->prot_hook.af_packet_priv = match;
1746                 match->prot_hook.id_match = match_fanout_group;
1747                 list_add(&match->list, &fanout_list);
1748         }
1749         err = -EINVAL;
1750 
1751         spin_lock(&po->bind_lock);
1752         if (po->running &&
1753             match->type == type &&
1754             match->prot_hook.type == po->prot_hook.type &&
1755             match->prot_hook.dev == po->prot_hook.dev) {
1756                 err = -ENOSPC;
1757                 if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1758                         __dev_remove_pack(&po->prot_hook);
1759                         po->fanout = match;
1760                         refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1761                         __fanout_link(sk, po);
1762                         err = 0;
1763                 }
1764         }
1765         spin_unlock(&po->bind_lock);
1766 
1767         if (err && !refcount_read(&match->sk_ref)) {
1768                 list_del(&match->list);
1769                 kfree(match);
1770         }
1771 
1772 out:
1773         if (err && rollover) {
1774                 kfree_rcu(rollover, rcu);
1775                 po->rollover = NULL;
1776         }
1777         mutex_unlock(&fanout_mutex);
1778         return err;
1779 }
1780 
1781 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1782  * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1783  * It is the responsibility of the caller to call fanout_release_data() and
1784  * free the returned packet_fanout (after synchronize_net())
1785  */
1786 static struct packet_fanout *fanout_release(struct sock *sk)
1787 {
1788         struct packet_sock *po = pkt_sk(sk);
1789         struct packet_fanout *f;
1790 
1791         mutex_lock(&fanout_mutex);
1792         f = po->fanout;
1793         if (f) {
1794                 po->fanout = NULL;
1795 
1796                 if (refcount_dec_and_test(&f->sk_ref))
1797                         list_del(&f->list);
1798                 else
1799                         f = NULL;
1800 
1801                 if (po->rollover) {
1802                         kfree_rcu(po->rollover, rcu);
1803                         po->rollover = NULL;
1804                 }
1805         }
1806         mutex_unlock(&fanout_mutex);
1807 
1808         return f;
1809 }
1810 
1811 static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1812                                           struct sk_buff *skb)
1813 {
1814         /* Earlier code assumed this would be a VLAN pkt, double-check
1815          * this now that we have the actual packet in hand. We can only
1816          * do this check on Ethernet devices.
1817          */
1818         if (unlikely(dev->type != ARPHRD_ETHER))
1819                 return false;
1820 
1821         skb_reset_mac_header(skb);
1822         return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1823 }
1824 
1825 static const struct proto_ops packet_ops;
1826 
1827 static const struct proto_ops packet_ops_spkt;
1828 
1829 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1830                            struct packet_type *pt, struct net_device *orig_dev)
1831 {
1832         struct sock *sk;
1833         struct sockaddr_pkt *spkt;
1834 
1835         /*
1836          *      When we registered the protocol we saved the socket in the data
1837          *      field for just this event.
1838          */
1839 
1840         sk = pt->af_packet_priv;
1841 
1842         /*
1843          *      Yank back the headers [hope the device set this
1844          *      right or kerboom...]
1845          *
1846          *      Incoming packets have ll header pulled,
1847          *      push it back.
1848          *
1849          *      For outgoing ones skb->data == skb_mac_header(skb)
1850          *      so that this procedure is noop.
1851          */
1852 
1853         if (skb->pkt_type == PACKET_LOOPBACK)
1854                 goto out;
1855 
1856         if (!net_eq(dev_net(dev), sock_net(sk)))
1857                 goto out;
1858 
1859         skb = skb_share_check(skb, GFP_ATOMIC);
1860         if (skb == NULL)
1861                 goto oom;
1862 
1863         /* drop any routing info */
1864         skb_dst_drop(skb);
1865 
1866         /* drop conntrack reference */
1867         nf_reset(skb);
1868 
1869         spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1870 
1871         skb_push(skb, skb->data - skb_mac_header(skb));
1872 
1873         /*
1874          *      The SOCK_PACKET socket receives _all_ frames.
1875          */
1876 
1877         spkt->spkt_family = dev->type;
1878         strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1879         spkt->spkt_protocol = skb->protocol;
1880 
1881         /*
1882          *      Charge the memory to the socket. This is done specifically
1883          *      to prevent sockets using all the memory up.
1884          */
1885 
1886         if (sock_queue_rcv_skb(sk, skb) == 0)
1887                 return 0;
1888 
1889 out:
1890         kfree_skb(skb);
1891 oom:
1892         return 0;
1893 }
1894 
1895 
1896 /*
1897  *      Output a raw packet to a device layer. This bypasses all the other
1898  *      protocol layers and you must therefore supply it with a complete frame
1899  */
1900 
1901 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1902                                size_t len)
1903 {
1904         struct sock *sk = sock->sk;
1905         DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1906         struct sk_buff *skb = NULL;
1907         struct net_device *dev;
1908         struct sockcm_cookie sockc;
1909         __be16 proto = 0;
1910         int err;
1911         int extra_len = 0;
1912 
1913         /*
1914          *      Get and verify the address.
1915          */
1916 
1917         if (saddr) {
1918                 if (msg->msg_namelen < sizeof(struct sockaddr))
1919                         return -EINVAL;
1920                 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1921                         proto = saddr->spkt_protocol;
1922         } else
1923                 return -ENOTCONN;       /* SOCK_PACKET must be sent giving an address */
1924 
1925         /*
1926          *      Find the device first to size check it
1927          */
1928 
1929         saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1930 retry:
1931         rcu_read_lock();
1932         dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1933         err = -ENODEV;
1934         if (dev == NULL)
1935                 goto out_unlock;
1936 
1937         err = -ENETDOWN;
1938         if (!(dev->flags & IFF_UP))
1939                 goto out_unlock;
1940 
1941         /*
1942          * You may not queue a frame bigger than the mtu. This is the lowest level
1943          * raw protocol and you must do your own fragmentation at this level.
1944          */
1945 
1946         if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1947                 if (!netif_supports_nofcs(dev)) {
1948                         err = -EPROTONOSUPPORT;
1949                         goto out_unlock;
1950                 }
1951                 extra_len = 4; /* We're doing our own CRC */
1952         }
1953 
1954         err = -EMSGSIZE;
1955         if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1956                 goto out_unlock;
1957 
1958         if (!skb) {
1959                 size_t reserved = LL_RESERVED_SPACE(dev);
1960                 int tlen = dev->needed_tailroom;
1961                 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1962 
1963                 rcu_read_unlock();
1964                 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1965                 if (skb == NULL)
1966                         return -ENOBUFS;
1967                 /* FIXME: Save some space for broken drivers that write a hard
1968                  * header at transmission time by themselves. PPP is the notable
1969                  * one here. This should really be fixed at the driver level.
1970                  */
1971                 skb_reserve(skb, reserved);
1972                 skb_reset_network_header(skb);
1973 
1974                 /* Try to align data part correctly */
1975                 if (hhlen) {
1976                         skb->data -= hhlen;
1977                         skb->tail -= hhlen;
1978                         if (len < hhlen)
1979                                 skb_reset_network_header(skb);
1980                 }
1981                 err = memcpy_from_msg(skb_put(skb, len), msg, len);
1982                 if (err)
1983                         goto out_free;
1984                 goto retry;
1985         }
1986 
1987         if (!dev_validate_header(dev, skb->data, len)) {
1988                 err = -EINVAL;
1989                 goto out_unlock;
1990         }
1991         if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1992             !packet_extra_vlan_len_allowed(dev, skb)) {
1993                 err = -EMSGSIZE;
1994                 goto out_unlock;
1995         }
1996 
1997         sockc.tsflags = sk->sk_tsflags;
1998         if (msg->msg_controllen) {
1999                 err = sock_cmsg_send(sk, msg, &sockc);
2000                 if (unlikely(err))
2001                         goto out_unlock;
2002         }
2003 
2004         skb->protocol = proto;
2005         skb->dev = dev;
2006         skb->priority = sk->sk_priority;
2007         skb->mark = sk->sk_mark;
2008 
2009         sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
2010 
2011         if (unlikely(extra_len == 4))
2012                 skb->no_fcs = 1;
2013 
2014         skb_probe_transport_header(skb, 0);
2015 
2016         dev_queue_xmit(skb);
2017         rcu_read_unlock();
2018         return len;
2019 
2020 out_unlock:
2021         rcu_read_unlock();
2022 out_free:
2023         kfree_skb(skb);
2024         return err;
2025 }
2026 
2027 static unsigned int run_filter(struct sk_buff *skb,
2028                                const struct sock *sk,
2029                                unsigned int res)
2030 {
2031         struct sk_filter *filter;
2032 
2033         rcu_read_lock();
2034         filter = rcu_dereference(sk->sk_filter);
2035         if (filter != NULL)
2036                 res = bpf_prog_run_clear_cb(filter->prog, skb);
2037         rcu_read_unlock();
2038 
2039         return res;
2040 }
2041 
2042 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2043                            size_t *len)
2044 {
2045         struct virtio_net_hdr vnet_hdr;
2046 
2047         if (*len < sizeof(vnet_hdr))
2048                 return -EINVAL;
2049         *len -= sizeof(vnet_hdr);
2050 
2051         if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true))
2052                 return -EINVAL;
2053 
2054         return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2055 }
2056 
2057 /*
2058  * This function makes lazy skb cloning in hope that most of packets
2059  * are discarded by BPF.
2060  *
2061  * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2062  * and skb->cb are mangled. It works because (and until) packets
2063  * falling here are owned by current CPU. Output packets are cloned
2064  * by dev_queue_xmit_nit(), input packets are processed by net_bh
2065  * sequencially, so that if we return skb to original state on exit,
2066  * we will not harm anyone.
2067  */
2068 
2069 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2070                       struct packet_type *pt, struct net_device *orig_dev)
2071 {
2072         struct sock *sk;
2073         struct sockaddr_ll *sll;
2074         struct packet_sock *po;
2075         u8 *skb_head = skb->data;
2076         int skb_len = skb->len;
2077         unsigned int snaplen, res;
2078         bool is_drop_n_account = false;
2079 
2080         if (skb->pkt_type == PACKET_LOOPBACK)
2081                 goto drop;
2082 
2083         sk = pt->af_packet_priv;
2084         po = pkt_sk(sk);
2085 
2086         if (!net_eq(dev_net(dev), sock_net(sk)))
2087                 goto drop;
2088 
2089         skb->dev = dev;
2090 
2091         if (dev->header_ops) {
2092                 /* The device has an explicit notion of ll header,
2093                  * exported to higher levels.
2094                  *
2095                  * Otherwise, the device hides details of its frame
2096                  * structure, so that corresponding packet head is
2097                  * never delivered to user.
2098                  */
2099                 if (sk->sk_type != SOCK_DGRAM)
2100                         skb_push(skb, skb->data - skb_mac_header(skb));
2101                 else if (skb->pkt_type == PACKET_OUTGOING) {
2102                         /* Special case: outgoing packets have ll header at head */
2103                         skb_pull(skb, skb_network_offset(skb));
2104                 }
2105         }
2106 
2107         snaplen = skb->len;
2108 
2109         res = run_filter(skb, sk, snaplen);
2110         if (!res)
2111                 goto drop_n_restore;
2112         if (snaplen > res)
2113                 snaplen = res;
2114 
2115         if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2116                 goto drop_n_acct;
2117 
2118         if (skb_shared(skb)) {
2119                 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2120                 if (nskb == NULL)
2121                         goto drop_n_acct;
2122 
2123                 if (skb_head != skb->data) {
2124                         skb->data = skb_head;
2125                         skb->len = skb_len;
2126                 }
2127                 consume_skb(skb);
2128                 skb = nskb;
2129         }
2130 
2131         sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2132 
2133         sll = &PACKET_SKB_CB(skb)->sa.ll;
2134         sll->sll_hatype = dev->type;
2135         sll->sll_pkttype = skb->pkt_type;
2136         if (unlikely(po->origdev))
2137                 sll->sll_ifindex = orig_dev->ifindex;
2138         else
2139                 sll->sll_ifindex = dev->ifindex;
2140 
2141         sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2142 
2143         /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2144          * Use their space for storing the original skb length.
2145          */
2146         PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2147 
2148         if (pskb_trim(skb, snaplen))
2149                 goto drop_n_acct;
2150 
2151         skb_set_owner_r(skb, sk);
2152         skb->dev = NULL;
2153         skb_dst_drop(skb);
2154 
2155         /* drop conntrack reference */
2156         nf_reset(skb);
2157 
2158         spin_lock(&sk->sk_receive_queue.lock);
2159         po->stats.stats1.tp_packets++;
2160         sock_skb_set_dropcount(sk, skb);
2161         __skb_queue_tail(&sk->sk_receive_queue, skb);
2162         spin_unlock(&sk->sk_receive_queue.lock);
2163         sk->sk_data_ready(sk);
2164         return 0;
2165 
2166 drop_n_acct:
2167         is_drop_n_account = true;
2168         spin_lock(&sk->sk_receive_queue.lock);
2169         po->stats.stats1.tp_drops++;
2170         atomic_inc(&sk->sk_drops);
2171         spin_unlock(&sk->sk_receive_queue.lock);
2172 
2173 drop_n_restore:
2174         if (skb_head != skb->data && skb_shared(skb)) {
2175                 skb->data = skb_head;
2176                 skb->len = skb_len;
2177         }
2178 drop:
2179         if (!is_drop_n_account)
2180                 consume_skb(skb);
2181         else
2182                 kfree_skb(skb);
2183         return 0;
2184 }
2185 
2186 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2187                        struct packet_type *pt, struct net_device *orig_dev)
2188 {
2189         struct sock *sk;
2190         struct packet_sock *po;
2191         struct sockaddr_ll *sll;
2192         union tpacket_uhdr h;
2193         u8 *skb_head = skb->data;
2194         int skb_len = skb->len;
2195         unsigned int snaplen, res;
2196         unsigned long status = TP_STATUS_USER;
2197         unsigned short macoff, netoff, hdrlen;
2198         struct sk_buff *copy_skb = NULL;
2199         struct timespec ts;
2200         __u32 ts_status;
2201         bool is_drop_n_account = false;
2202         bool do_vnet = false;
2203 
2204         /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2205          * We may add members to them until current aligned size without forcing
2206          * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2207          */
2208         BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2209         BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2210 
2211         if (skb->pkt_type == PACKET_LOOPBACK)
2212                 goto drop;
2213 
2214         sk = pt->af_packet_priv;
2215         po = pkt_sk(sk);
2216 
2217         if (!net_eq(dev_net(dev), sock_net(sk)))
2218                 goto drop;
2219 
2220         if (dev->header_ops) {
2221                 if (sk->sk_type != SOCK_DGRAM)
2222                         skb_push(skb, skb->data - skb_mac_header(skb));
2223                 else if (skb->pkt_type == PACKET_OUTGOING) {
2224                         /* Special case: outgoing packets have ll header at head */
2225                         skb_pull(skb, skb_network_offset(skb));
2226                 }
2227         }
2228 
2229         snaplen = skb->len;
2230 
2231         res = run_filter(skb, sk, snaplen);
2232         if (!res)
2233                 goto drop_n_restore;
2234 
2235         if (skb->ip_summed == CHECKSUM_PARTIAL)
2236                 status |= TP_STATUS_CSUMNOTREADY;
2237         else if (skb->pkt_type != PACKET_OUTGOING &&
2238                  (skb->ip_summed == CHECKSUM_COMPLETE ||
2239                   skb_csum_unnecessary(skb)))
2240                 status |= TP_STATUS_CSUM_VALID;
2241 
2242         if (snaplen > res)
2243                 snaplen = res;
2244 
2245         if (sk->sk_type == SOCK_DGRAM) {
2246                 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2247                                   po->tp_reserve;
2248         } else {
2249                 unsigned int maclen = skb_network_offset(skb);
2250                 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2251                                        (maclen < 16 ? 16 : maclen)) +
2252                                        po->tp_reserve;
2253                 if (po->has_vnet_hdr) {
2254                         netoff += sizeof(struct virtio_net_hdr);
2255                         do_vnet = true;
2256                 }
2257                 macoff = netoff - maclen;
2258         }
2259         if (po->tp_version <= TPACKET_V2) {
2260                 if (macoff + snaplen > po->rx_ring.frame_size) {
2261                         if (po->copy_thresh &&
2262                             atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2263                                 if (skb_shared(skb)) {
2264                                         copy_skb = skb_clone(skb, GFP_ATOMIC);
2265                                 } else {
2266                                         copy_skb = skb_get(skb);
2267                                         skb_head = skb->data;
2268                                 }
2269                                 if (copy_skb)
2270                                         skb_set_owner_r(copy_skb, sk);
2271                         }
2272                         snaplen = po->rx_ring.frame_size - macoff;
2273                         if ((int)snaplen < 0) {
2274                                 snaplen = 0;
2275                                 do_vnet = false;
2276                         }
2277                 }
2278         } else if (unlikely(macoff + snaplen >
2279                             GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2280                 u32 nval;
2281 
2282                 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2283                 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2284                             snaplen, nval, macoff);
2285                 snaplen = nval;
2286                 if (unlikely((int)snaplen < 0)) {
2287                         snaplen = 0;
2288                         macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2289                         do_vnet = false;
2290                 }
2291         }
2292         spin_lock(&sk->sk_receive_queue.lock);
2293         h.raw = packet_current_rx_frame(po, skb,
2294                                         TP_STATUS_KERNEL, (macoff+snaplen));
2295         if (!h.raw)
2296                 goto drop_n_account;
2297         if (po->tp_version <= TPACKET_V2) {
2298                 packet_increment_rx_head(po, &po->rx_ring);
2299         /*
2300          * LOSING will be reported till you read the stats,
2301          * because it's COR - Clear On Read.
2302          * Anyways, moving it for V1/V2 only as V3 doesn't need this
2303          * at packet level.
2304          */
2305                 if (po->stats.stats1.tp_drops)
2306                         status |= TP_STATUS_LOSING;
2307         }
2308         po->stats.stats1.tp_packets++;
2309         if (copy_skb) {
2310                 status |= TP_STATUS_COPY;
2311                 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2312         }
2313         spin_unlock(&sk->sk_receive_queue.lock);
2314 
2315         if (do_vnet) {
2316                 if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
2317                                             sizeof(struct virtio_net_hdr),
2318                                             vio_le(), true)) {
2319                         spin_lock(&sk->sk_receive_queue.lock);
2320                         goto drop_n_account;
2321                 }
2322         }
2323 
2324         skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2325 
2326         if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2327                 getnstimeofday(&ts);
2328 
2329         status |= ts_status;
2330 
2331         switch (po->tp_version) {
2332         case TPACKET_V1:
2333                 h.h1->tp_len = skb->len;
2334                 h.h1->tp_snaplen = snaplen;
2335                 h.h1->tp_mac = macoff;
2336                 h.h1->tp_net = netoff;
2337                 h.h1->tp_sec = ts.tv_sec;
2338                 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2339                 hdrlen = sizeof(*h.h1);
2340                 break;
2341         case TPACKET_V2:
2342                 h.h2->tp_len = skb->len;
2343                 h.h2->tp_snaplen = snaplen;
2344                 h.h2->tp_mac = macoff;
2345                 h.h2->tp_net = netoff;
2346                 h.h2->tp_sec = ts.tv_sec;
2347                 h.h2->tp_nsec = ts.tv_nsec;
2348                 if (skb_vlan_tag_present(skb)) {
2349                         h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2350                         h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2351                         status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2352                 } else {
2353                         h.h2->tp_vlan_tci = 0;
2354                         h.h2->tp_vlan_tpid = 0;
2355                 }
2356                 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2357                 hdrlen = sizeof(*h.h2);
2358                 break;
2359         case TPACKET_V3:
2360                 /* tp_nxt_offset,vlan are already populated above.
2361                  * So DONT clear those fields here
2362                  */
2363                 h.h3->tp_status |= status;
2364                 h.h3->tp_len = skb->len;
2365                 h.h3->tp_snaplen = snaplen;
2366                 h.h3->tp_mac = macoff;
2367                 h.h3->tp_net = netoff;
2368                 h.h3->tp_sec  = ts.tv_sec;
2369                 h.h3->tp_nsec = ts.tv_nsec;
2370                 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2371                 hdrlen = sizeof(*h.h3);
2372                 break;
2373         default:
2374                 BUG();
2375         }
2376 
2377         sll = h.raw + TPACKET_ALIGN(hdrlen);
2378         sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2379         sll->sll_family = AF_PACKET;
2380         sll->sll_hatype = dev->type;
2381         sll->sll_protocol = skb->protocol;
2382         sll->sll_pkttype = skb->pkt_type;
2383         if (unlikely(po->origdev))
2384                 sll->sll_ifindex = orig_dev->ifindex;
2385         else
2386                 sll->sll_ifindex = dev->ifindex;
2387 
2388         smp_mb();
2389 
2390 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2391         if (po->tp_version <= TPACKET_V2) {
2392                 u8 *start, *end;
2393 
2394                 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2395                                         macoff + snaplen);
2396 
2397                 for (start = h.raw; start < end; start += PAGE_SIZE)
2398                         flush_dcache_page(pgv_to_page(start));
2399         }
2400         smp_wmb();
2401 #endif
2402 
2403         if (po->tp_version <= TPACKET_V2) {
2404                 __packet_set_status(po, h.raw, status);
2405                 sk->sk_data_ready(sk);
2406         } else {
2407                 prb_clear_blk_fill_status(&po->rx_ring);
2408         }
2409 
2410 drop_n_restore:
2411         if (skb_head != skb->data && skb_shared(skb)) {
2412                 skb->data = skb_head;
2413                 skb->len = skb_len;
2414         }
2415 drop:
2416         if (!is_drop_n_account)
2417                 consume_skb(skb);
2418         else
2419                 kfree_skb(skb);
2420         return 0;
2421 
2422 drop_n_account:
2423         is_drop_n_account = true;
2424         po->stats.stats1.tp_drops++;
2425         spin_unlock(&sk->sk_receive_queue.lock);
2426 
2427         sk->sk_data_ready(sk);
2428         kfree_skb(copy_skb);
2429         goto drop_n_restore;
2430 }
2431 
2432 static void tpacket_destruct_skb(struct sk_buff *skb)
2433 {
2434         struct packet_sock *po = pkt_sk(skb->sk);
2435 
2436         if (likely(po->tx_ring.pg_vec)) {
2437                 void *ph;
2438                 __u32 ts;
2439 
2440                 ph = skb_shinfo(skb)->destructor_arg;
2441                 packet_dec_pending(&po->tx_ring);
2442 
2443                 ts = __packet_set_timestamp(po, ph, skb);
2444                 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2445         }
2446 
2447         sock_wfree(skb);
2448 }
2449 
2450 static void tpacket_set_protocol(const struct net_device *dev,
2451                                  struct sk_buff *skb)
2452 {
2453         if (dev->type == ARPHRD_ETHER) {
2454                 skb_reset_mac_header(skb);
2455                 skb->protocol = eth_hdr(skb)->h_proto;
2456         }
2457 }
2458 
2459 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2460 {
2461         if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2462             (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2463              __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2464               __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2465                 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2466                          __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2467                         __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2468 
2469         if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2470                 return -EINVAL;
2471 
2472         return 0;
2473 }
2474 
2475 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2476                                  struct virtio_net_hdr *vnet_hdr)
2477 {
2478         if (*len < sizeof(*vnet_hdr))
2479                 return -EINVAL;
2480         *len -= sizeof(*vnet_hdr);
2481 
2482         if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2483                 return -EFAULT;
2484 
2485         return __packet_snd_vnet_parse(vnet_hdr, *len);
2486 }
2487 
2488 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2489                 void *frame, struct net_device *dev, void *data, int tp_len,
2490                 __be16 proto, unsigned char *addr, int hlen, int copylen,
2491                 const struct sockcm_cookie *sockc)
2492 {
2493         union tpacket_uhdr ph;
2494         int to_write, offset, len, nr_frags, len_max;
2495         struct socket *sock = po->sk.sk_socket;
2496         struct page *page;
2497         int err;
2498 
2499         ph.raw = frame;
2500 
2501         skb->protocol = proto;
2502         skb->dev = dev;
2503         skb->priority = po->sk.sk_priority;
2504         skb->mark = po->sk.sk_mark;
2505         sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
2506         skb_shinfo(skb)->destructor_arg = ph.raw;
2507 
2508         skb_reserve(skb, hlen);
2509         skb_reset_network_header(skb);
2510 
2511         to_write = tp_len;
2512 
2513         if (sock->type == SOCK_DGRAM) {
2514                 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2515                                 NULL, tp_len);
2516                 if (unlikely(err < 0))
2517                         return -EINVAL;
2518         } else if (copylen) {
2519                 int hdrlen = min_t(int, copylen, tp_len);
2520 
2521                 skb_push(skb, dev->hard_header_len);
2522                 skb_put(skb, copylen - dev->hard_header_len);
2523                 err = skb_store_bits(skb, 0, data, hdrlen);
2524                 if (unlikely(err))
2525                         return err;
2526                 if (!dev_validate_header(dev, skb->data, hdrlen))
2527                         return -EINVAL;
2528                 if (!skb->protocol)
2529                         tpacket_set_protocol(dev, skb);
2530 
2531                 data += hdrlen;
2532                 to_write -= hdrlen;
2533         }
2534 
2535         offset = offset_in_page(data);
2536         len_max = PAGE_SIZE - offset;
2537         len = ((to_write > len_max) ? len_max : to_write);
2538 
2539         skb->data_len = to_write;
2540         skb->len += to_write;
2541         skb->truesize += to_write;
2542         refcount_add(to_write, &po->sk.sk_wmem_alloc);
2543 
2544         while (likely(to_write)) {
2545                 nr_frags = skb_shinfo(skb)->nr_frags;
2546 
2547                 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2548                         pr_err("Packet exceed the number of skb frags(%lu)\n",
2549                                MAX_SKB_FRAGS);
2550                         return -EFAULT;
2551                 }
2552 
2553                 page = pgv_to_page(data);
2554                 data += len;
2555                 flush_dcache_page(page);
2556                 get_page(page);
2557                 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2558                 to_write -= len;
2559                 offset = 0;
2560                 len_max = PAGE_SIZE;
2561                 len = ((to_write > len_max) ? len_max : to_write);
2562         }
2563 
2564         skb_probe_transport_header(skb, 0);
2565 
2566         return tp_len;
2567 }
2568 
2569 static int tpacket_parse_header(struct packet_sock *po, void *frame,
2570                                 int size_max, void **data)
2571 {
2572         union tpacket_uhdr ph;
2573         int tp_len, off;
2574 
2575         ph.raw = frame;
2576 
2577         switch (po->tp_version) {
2578         case TPACKET_V3:
2579                 if (ph.h3->tp_next_offset != 0) {
2580                         pr_warn_once("variable sized slot not supported");
2581                         return -EINVAL;
2582                 }
2583                 tp_len = ph.h3->tp_len;
2584                 break;
2585         case TPACKET_V2:
2586                 tp_len = ph.h2->tp_len;
2587                 break;
2588         default:
2589                 tp_len = ph.h1->tp_len;
2590                 break;
2591         }
2592         if (unlikely(tp_len > size_max)) {
2593                 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2594                 return -EMSGSIZE;
2595         }
2596 
2597         if (unlikely(po->tp_tx_has_off)) {
2598                 int off_min, off_max;
2599 
2600                 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2601                 off_max = po->tx_ring.frame_size - tp_len;
2602                 if (po->sk.sk_type == SOCK_DGRAM) {
2603                         switch (po->tp_version) {
2604                         case TPACKET_V3:
2605                                 off = ph.h3->tp_net;
2606                                 break;
2607                         case TPACKET_V2:
2608                                 off = ph.h2->tp_net;
2609                                 break;
2610                         default:
2611                                 off = ph.h1->tp_net;
2612                                 break;
2613                         }
2614                 } else {
2615                         switch (po->tp_version) {
2616                         case TPACKET_V3:
2617                                 off = ph.h3->tp_mac;
2618                                 break;
2619                         case TPACKET_V2:
2620                                 off = ph.h2->tp_mac;
2621                                 break;
2622                         default:
2623                                 off = ph.h1->tp_mac;
2624                                 break;
2625                         }
2626                 }
2627                 if (unlikely((off < off_min) || (off_max < off)))
2628                         return -EINVAL;
2629         } else {
2630                 off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2631         }
2632 
2633         *data = frame + off;
2634         return tp_len;
2635 }
2636 
2637 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2638 {
2639         struct sk_buff *skb;
2640         struct net_device *dev;
2641         struct virtio_net_hdr *vnet_hdr = NULL;
2642         struct sockcm_cookie sockc;
2643         __be16 proto;
2644         int err, reserve = 0;
2645         void *ph;
2646         DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2647         bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2648         int tp_len, size_max;
2649         unsigned char *addr;
2650         void *data;
2651         int len_sum = 0;
2652         int status = TP_STATUS_AVAILABLE;
2653         int hlen, tlen, copylen = 0;
2654 
2655         mutex_lock(&po->pg_vec_lock);
2656 
2657         if (likely(saddr == NULL)) {
2658                 dev     = packet_cached_dev_get(po);
2659                 proto   = po->num;
2660                 addr    = NULL;
2661         } else {
2662                 err = -EINVAL;
2663                 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2664                         goto out;
2665                 if (msg->msg_namelen < (saddr->sll_halen
2666                                         + offsetof(struct sockaddr_ll,
2667                                                 sll_addr)))
2668                         goto out;
2669                 proto   = saddr->sll_protocol;
2670                 addr    = saddr->sll_addr;
2671                 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2672         }
2673 
2674         err = -ENXIO;
2675         if (unlikely(dev == NULL))
2676                 goto out;
2677         err = -ENETDOWN;
2678         if (unlikely(!(dev->flags & IFF_UP)))
2679                 goto out_put;
2680 
2681         sockc.tsflags = po->sk.sk_tsflags;
2682         if (msg->msg_controllen) {
2683                 err = sock_cmsg_send(&po->sk, msg, &sockc);
2684                 if (unlikely(err))
2685                         goto out_put;
2686         }
2687 
2688         if (po->sk.sk_socket->type == SOCK_RAW)
2689                 reserve = dev->hard_header_len;
2690         size_max = po->tx_ring.frame_size
2691                 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2692 
2693         if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2694                 size_max = dev->mtu + reserve + VLAN_HLEN;
2695 
2696         do {
2697                 ph = packet_current_frame(po, &po->tx_ring,
2698                                           TP_STATUS_SEND_REQUEST);
2699                 if (unlikely(ph == NULL)) {
2700                         if (need_wait && need_resched())
2701                                 schedule();
2702                         continue;
2703                 }
2704 
2705                 skb = NULL;
2706                 tp_len = tpacket_parse_header(po, ph, size_max, &data);
2707                 if (tp_len < 0)
2708                         goto tpacket_error;
2709 
2710                 status = TP_STATUS_SEND_REQUEST;
2711                 hlen = LL_RESERVED_SPACE(dev);
2712                 tlen = dev->needed_tailroom;
2713                 if (po->has_vnet_hdr) {
2714                         vnet_hdr = data;
2715                         data += sizeof(*vnet_hdr);
2716                         tp_len -= sizeof(*vnet_hdr);
2717                         if (tp_len < 0 ||
2718                             __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2719                                 tp_len = -EINVAL;
2720                                 goto tpacket_error;
2721                         }
2722                         copylen = __virtio16_to_cpu(vio_le(),
2723                                                     vnet_hdr->hdr_len);
2724                 }
2725                 copylen = max_t(int, copylen, dev->hard_header_len);
2726                 skb = sock_alloc_send_skb(&po->sk,
2727                                 hlen + tlen + sizeof(struct sockaddr_ll) +
2728                                 (copylen - dev->hard_header_len),
2729                                 !need_wait, &err);
2730 
2731                 if (unlikely(skb == NULL)) {
2732                         /* we assume the socket was initially writeable ... */
2733                         if (likely(len_sum > 0))
2734                                 err = len_sum;
2735                         goto out_status;
2736                 }
2737                 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2738                                           addr, hlen, copylen, &sockc);
2739                 if (likely(tp_len >= 0) &&
2740                     tp_len > dev->mtu + reserve &&
2741                     !po->has_vnet_hdr &&
2742                     !packet_extra_vlan_len_allowed(dev, skb))
2743                         tp_len = -EMSGSIZE;
2744 
2745                 if (unlikely(tp_len < 0)) {
2746 tpacket_error:
2747                         if (po->tp_loss) {
2748                                 __packet_set_status(po, ph,
2749                                                 TP_STATUS_AVAILABLE);
2750                                 packet_increment_head(&po->tx_ring);
2751                                 kfree_skb(skb);
2752                                 continue;
2753                         } else {
2754                                 status = TP_STATUS_WRONG_FORMAT;
2755                                 err = tp_len;
2756                                 goto out_status;
2757                         }
2758                 }
2759 
2760                 if (po->has_vnet_hdr && virtio_net_hdr_to_skb(skb, vnet_hdr,
2761                                                               vio_le())) {
2762                         tp_len = -EINVAL;
2763                         goto tpacket_error;
2764                 }
2765 
2766                 skb->destructor = tpacket_destruct_skb;
2767                 __packet_set_status(po, ph, TP_STATUS_SENDING);
2768                 packet_inc_pending(&po->tx_ring);
2769 
2770                 status = TP_STATUS_SEND_REQUEST;
2771                 err = po->xmit(skb);
2772                 if (unlikely(err > 0)) {
2773                         err = net_xmit_errno(err);
2774                         if (err && __packet_get_status(po, ph) ==
2775                                    TP_STATUS_AVAILABLE) {
2776                                 /* skb was destructed already */
2777                                 skb = NULL;
2778                                 goto out_status;
2779                         }
2780                         /*
2781                          * skb was dropped but not destructed yet;
2782                          * let's treat it like congestion or err < 0
2783                          */
2784                         err = 0;
2785                 }
2786                 packet_increment_head(&po->tx_ring);
2787                 len_sum += tp_len;
2788         } while (likely((ph != NULL) ||
2789                 /* Note: packet_read_pending() might be slow if we have
2790                  * to call it as it's per_cpu variable, but in fast-path
2791                  * we already short-circuit the loop with the first
2792                  * condition, and luckily don't have to go that path
2793                  * anyway.
2794                  */
2795                  (need_wait && packet_read_pending(&po->tx_ring))));
2796 
2797         err = len_sum;
2798         goto out_put;
2799 
2800 out_status:
2801         __packet_set_status(po, ph, status);
2802         kfree_skb(skb);
2803 out_put:
2804         dev_put(dev);
2805 out:
2806         mutex_unlock(&po->pg_vec_lock);
2807         return err;
2808 }
2809 
2810 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2811                                         size_t reserve, size_t len,
2812                                         size_t linear, int noblock,
2813                                         int *err)
2814 {
2815         struct sk_buff *skb;
2816 
2817         /* Under a page?  Don't bother with paged skb. */
2818         if (prepad + len < PAGE_SIZE || !linear)
2819                 linear = len;
2820 
2821         skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2822                                    err, 0);
2823         if (!skb)
2824                 return NULL;
2825 
2826         skb_reserve(skb, reserve);
2827         skb_put(skb, linear);
2828         skb->data_len = len - linear;
2829         skb->len += len - linear;
2830 
2831         return skb;
2832 }
2833 
2834 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2835 {
2836         struct sock *sk = sock->sk;
2837         DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2838         struct sk_buff *skb;
2839         struct net_device *dev;
2840         __be16 proto;
2841         unsigned char *addr;
2842         int err, reserve = 0;
2843         struct sockcm_cookie sockc;
2844         struct virtio_net_hdr vnet_hdr = { 0 };
2845         int offset = 0;
2846         struct packet_sock *po = pkt_sk(sk);
2847         bool has_vnet_hdr = false;
2848         int hlen, tlen, linear;
2849         int extra_len = 0;
2850 
2851         /*
2852          *      Get and verify the address.
2853          */
2854 
2855         if (likely(saddr == NULL)) {
2856                 dev     = packet_cached_dev_get(po);
2857                 proto   = po->num;
2858                 addr    = NULL;
2859         } else {
2860                 err = -EINVAL;
2861                 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2862                         goto out;
2863                 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2864                         goto out;
2865                 proto   = saddr->sll_protocol;
2866                 addr    = saddr->sll_addr;
2867                 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2868         }
2869 
2870         err = -ENXIO;
2871         if (unlikely(dev == NULL))
2872                 goto out_unlock;
2873         err = -ENETDOWN;
2874         if (unlikely(!(dev->flags & IFF_UP)))
2875                 goto out_unlock;
2876 
2877         sockc.tsflags = sk->sk_tsflags;
2878         sockc.mark = sk->sk_mark;
2879         if (msg->msg_controllen) {
2880                 err = sock_cmsg_send(sk, msg, &sockc);
2881                 if (unlikely(err))
2882                         goto out_unlock;
2883         }
2884 
2885         if (sock->type == SOCK_RAW)
2886                 reserve = dev->hard_header_len;
2887         if (po->has_vnet_hdr) {
2888                 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2889                 if (err)
2890                         goto out_unlock;
2891                 has_vnet_hdr = true;
2892         }
2893 
2894         if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2895                 if (!netif_supports_nofcs(dev)) {
2896                         err = -EPROTONOSUPPORT;
2897                         goto out_unlock;
2898                 }
2899                 extra_len = 4; /* We're doing our own CRC */
2900         }
2901 
2902         err = -EMSGSIZE;
2903         if (!vnet_hdr.gso_type &&
2904             (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2905                 goto out_unlock;
2906 
2907         err = -ENOBUFS;
2908         hlen = LL_RESERVED_SPACE(dev);
2909         tlen = dev->needed_tailroom;
2910         linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2911         linear = max(linear, min_t(int, len, dev->hard_header_len));
2912         skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2913                                msg->msg_flags & MSG_DONTWAIT, &err);
2914         if (skb == NULL)
2915                 goto out_unlock;
2916 
2917         skb_set_network_header(skb, reserve);
2918 
2919         err = -EINVAL;
2920         if (sock->type == SOCK_DGRAM) {
2921                 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2922                 if (unlikely(offset < 0))
2923                         goto out_free;
2924         }
2925 
2926         /* Returns -EFAULT on error */
2927         err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2928         if (err)
2929                 goto out_free;
2930 
2931         if (sock->type == SOCK_RAW &&
2932             !dev_validate_header(dev, skb->data, len)) {
2933                 err = -EINVAL;
2934                 goto out_free;
2935         }
2936 
2937         sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
2938 
2939         if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2940             !packet_extra_vlan_len_allowed(dev, skb)) {
2941                 err = -EMSGSIZE;
2942                 goto out_free;
2943         }
2944 
2945         skb->protocol = proto;
2946         skb->dev = dev;
2947         skb->priority = sk->sk_priority;
2948         skb->mark = sockc.mark;
2949 
2950         if (has_vnet_hdr) {
2951                 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
2952                 if (err)
2953                         goto out_free;
2954                 len += sizeof(vnet_hdr);
2955         }
2956 
2957         skb_probe_transport_header(skb, reserve);
2958 
2959         if (unlikely(extra_len == 4))
2960                 skb->no_fcs = 1;
2961 
2962         err = po->xmit(skb);
2963         if (err > 0 && (err = net_xmit_errno(err)) != 0)
2964                 goto out_unlock;
2965 
2966         dev_put(dev);
2967 
2968         return len;
2969 
2970 out_free:
2971         kfree_skb(skb);
2972 out_unlock:
2973         if (dev)
2974                 dev_put(dev);
2975 out:
2976         return err;
2977 }
2978 
2979 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2980 {
2981         struct sock *sk = sock->sk;
2982         struct packet_sock *po = pkt_sk(sk);
2983 
2984         if (po->tx_ring.pg_vec)
2985                 return tpacket_snd(po, msg);
2986         else
2987                 return packet_snd(sock, msg, len);
2988 }
2989 
2990 /*
2991  *      Close a PACKET socket. This is fairly simple. We immediately go
2992  *      to 'closed' state and remove our protocol entry in the device list.
2993  */
2994 
2995 static int packet_release(struct socket *sock)
2996 {
2997         struct sock *sk = sock->sk;
2998         struct packet_sock *po;
2999         struct packet_fanout *f;
3000         struct net *net;
3001         union tpacket_req_u req_u;
3002 
3003         if (!sk)
3004                 return 0;
3005 
3006         net = sock_net(sk);
3007         po = pkt_sk(sk);
3008 
3009         mutex_lock(&net->packet.sklist_lock);
3010         sk_del_node_init_rcu(sk);
3011         mutex_unlock(&net->packet.sklist_lock);
3012 
3013         preempt_disable();
3014         sock_prot_inuse_add(net, sk->sk_prot, -1);
3015         preempt_enable();
3016 
3017         spin_lock(&po->bind_lock);
3018         unregister_prot_hook(sk, false);
3019         packet_cached_dev_reset(po);
3020 
3021         if (po->prot_hook.dev) {
3022                 dev_put(po->prot_hook.dev);
3023                 po->prot_hook.dev = NULL;
3024         }
3025         spin_unlock(&po->bind_lock);
3026 
3027         packet_flush_mclist(sk);
3028 
3029         if (po->rx_ring.pg_vec) {
3030                 memset(&req_u, 0, sizeof(req_u));
3031                 packet_set_ring(sk, &req_u, 1, 0);
3032         }
3033 
3034         if (po->tx_ring.pg_vec) {
3035                 memset(&req_u, 0, sizeof(req_u));
3036                 packet_set_ring(sk, &req_u, 1, 1);
3037         }
3038 
3039         f = fanout_release(sk);
3040 
3041         synchronize_net();
3042 
3043         if (f) {
3044                 fanout_release_data(f);
3045                 kfree(f);
3046         }
3047         /*
3048          *      Now the socket is dead. No more input will appear.
3049          */
3050         sock_orphan(sk);
3051         sock->sk = NULL;
3052 
3053         /* Purge queues */
3054 
3055         skb_queue_purge(&sk->sk_receive_queue);
3056         packet_free_pending(po);
3057         sk_refcnt_debug_release(sk);
3058 
3059         sock_put(sk);
3060         return 0;
3061 }
3062 
3063 /*
3064  *      Attach a packet hook.
3065  */
3066 
3067 static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3068                           __be16 proto)
3069 {
3070         struct packet_sock *po = pkt_sk(sk);
3071         struct net_device *dev_curr;
3072         __be16 proto_curr;
3073         bool need_rehook;
3074         struct net_device *dev = NULL;
3075         int ret = 0;
3076         bool unlisted = false;
3077 
3078         lock_sock(sk);
3079         spin_lock(&po->bind_lock);
3080         rcu_read_lock();
3081 
3082         if (po->fanout) {
3083                 ret = -EINVAL;
3084                 goto out_unlock;
3085         }
3086 
3087         if (name) {
3088                 dev = dev_get_by_name_rcu(sock_net(sk), name);
3089                 if (!dev) {
3090                         ret = -ENODEV;
3091                         goto out_unlock;
3092                 }
3093         } else if (ifindex) {
3094                 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3095                 if (!dev) {
3096                         ret = -ENODEV;
3097                         goto out_unlock;
3098                 }
3099         }
3100 
3101         if (dev)
3102                 dev_hold(dev);
3103 
3104         proto_curr = po->prot_hook.type;
3105         dev_curr = po->prot_hook.dev;
3106 
3107         need_rehook = proto_curr != proto || dev_curr != dev;
3108 
3109         if (need_rehook) {
3110                 if (po->running) {
3111                         rcu_read_unlock();
3112                         __unregister_prot_hook(sk, true);
3113                         rcu_read_lock();
3114                         dev_curr = po->prot_hook.dev;
3115                         if (dev)
3116                                 unlisted = !dev_get_by_index_rcu(sock_net(sk),
3117                                                                  dev->ifindex);
3118                 }
3119 
3120                 po->num = proto;
3121                 po->prot_hook.type = proto;
3122 
3123                 if (unlikely(unlisted)) {
3124                         dev_put(dev);
3125                         po->prot_hook.dev = NULL;
3126                         po->ifindex = -1;
3127                         packet_cached_dev_reset(po);
3128                 } else {
3129                         po->prot_hook.dev = dev;
3130                         po->ifindex = dev ? dev->ifindex : 0;
3131                         packet_cached_dev_assign(po, dev);
3132                 }
3133         }
3134         if (dev_curr)
3135                 dev_put(dev_curr);
3136 
3137         if (proto == 0 || !need_rehook)
3138                 goto out_unlock;
3139 
3140         if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3141                 register_prot_hook(sk);
3142         } else {
3143                 sk->sk_err = ENETDOWN;
3144                 if (!sock_flag(sk, SOCK_DEAD))
3145                         sk->sk_error_report(sk);
3146         }
3147 
3148 out_unlock:
3149         rcu_read_unlock();
3150         spin_unlock(&po->bind_lock);
3151         release_sock(sk);
3152         return ret;
3153 }
3154 
3155 /*
3156  *      Bind a packet socket to a device
3157  */
3158 
3159 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3160                             int addr_len)
3161 {
3162         struct sock *sk = sock->sk;
3163         char name[sizeof(uaddr->sa_data) + 1];
3164 
3165         /*
3166          *      Check legality
3167          */
3168 
3169         if (addr_len != sizeof(struct sockaddr))
3170                 return -EINVAL;
3171         /* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3172          * zero-terminated.
3173          */
3174         memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3175         name[sizeof(uaddr->sa_data)] = 0;
3176 
3177         return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3178 }
3179 
3180 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3181 {
3182         struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3183         struct sock *sk = sock->sk;
3184 
3185         /*
3186          *      Check legality
3187          */
3188 
3189         if (addr_len < sizeof(struct sockaddr_ll))
3190                 return -EINVAL;
3191         if (sll->sll_family != AF_PACKET)
3192                 return -EINVAL;
3193 
3194         return packet_do_bind(sk, NULL, sll->sll_ifindex,
3195                               sll->sll_protocol ? : pkt_sk(sk)->num);
3196 }
3197 
3198 static struct proto packet_proto = {
3199         .name     = "PACKET",
3200         .owner    = THIS_MODULE,
3201         .obj_size = sizeof(struct packet_sock),
3202 };
3203 
3204 /*
3205  *      Create a packet of type SOCK_PACKET.
3206  */
3207 
3208 static int packet_create(struct net *net, struct socket *sock, int protocol,
3209                          int kern)
3210 {
3211         struct sock *sk;
3212         struct packet_sock *po;
3213         __be16 proto = (__force __be16)protocol; /* weird, but documented */
3214         int err;
3215 
3216         if (!ns_capable(net->user_ns, CAP_NET_RAW))
3217                 return -EPERM;
3218         if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3219             sock->type != SOCK_PACKET)
3220                 return -ESOCKTNOSUPPORT;
3221 
3222         sock->state = SS_UNCONNECTED;
3223 
3224         err = -ENOBUFS;
3225         sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3226         if (sk == NULL)
3227                 goto out;
3228 
3229         sock->ops = &packet_ops;
3230         if (sock->type == SOCK_PACKET)
3231                 sock->ops = &packet_ops_spkt;
3232 
3233         sock_init_data(sock, sk);
3234 
3235         po = pkt_sk(sk);
3236         sk->sk_family = PF_PACKET;
3237         po->num = proto;
3238         po->xmit = dev_queue_xmit;
3239 
3240         err = packet_alloc_pending(po);
3241         if (err)
3242                 goto out2;
3243 
3244         packet_cached_dev_reset(po);
3245 
3246         sk->sk_destruct = packet_sock_destruct;
3247         sk_refcnt_debug_inc(sk);
3248 
3249         /*
3250          *      Attach a protocol block
3251          */
3252 
3253         spin_lock_init(&po->bind_lock);
3254         mutex_init(&po->pg_vec_lock);
3255         po->rollover = NULL;
3256         po->prot_hook.func = packet_rcv;
3257 
3258         if (sock->type == SOCK_PACKET)
3259                 po->prot_hook.func = packet_rcv_spkt;
3260 
3261         po->prot_hook.af_packet_priv = sk;
3262 
3263         if (proto) {
3264                 po->prot_hook.type = proto;
3265                 register_prot_hook(sk);
3266         }
3267 
3268         mutex_lock(&net->packet.sklist_lock);
3269         sk_add_node_rcu(sk, &net->packet.sklist);
3270         mutex_unlock(&net->packet.sklist_lock);
3271 
3272         preempt_disable();
3273         sock_prot_inuse_add(net, &packet_proto, 1);
3274         preempt_enable();
3275 
3276         return 0;
3277 out2:
3278         sk_free(sk);
3279 out:
3280         return err;
3281 }
3282 
3283 /*
3284  *      Pull a packet from our receive queue and hand it to the user.
3285  *      If necessary we block.
3286  */
3287 
3288 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3289                           int flags)
3290 {
3291         struct sock *sk = sock->sk;
3292         struct sk_buff *skb;
3293         int copied, err;
3294         int vnet_hdr_len = 0;
3295         unsigned int origlen = 0;
3296 
3297         err = -EINVAL;
3298         if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3299                 goto out;
3300 
3301 #if 0
3302         /* What error should we return now? EUNATTACH? */
3303         if (pkt_sk(sk)->ifindex < 0)
3304                 return -ENODEV;
3305 #endif
3306 
3307         if (flags & MSG_ERRQUEUE) {
3308                 err = sock_recv_errqueue(sk, msg, len,
3309                                          SOL_PACKET, PACKET_TX_TIMESTAMP);
3310                 goto out;
3311         }
3312 
3313         /*
3314          *      Call the generic datagram receiver. This handles all sorts
3315          *      of horrible races and re-entrancy so we can forget about it
3316          *      in the protocol layers.
3317          *
3318          *      Now it will return ENETDOWN, if device have just gone down,
3319          *      but then it will block.
3320          */
3321 
3322         skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3323 
3324         /*
3325          *      An error occurred so return it. Because skb_recv_datagram()
3326          *      handles the blocking we don't see and worry about blocking
3327          *      retries.
3328          */
3329 
3330         if (skb == NULL)
3331                 goto out;
3332 
3333         if (pkt_sk(sk)->pressure)
3334                 packet_rcv_has_room(pkt_sk(sk), NULL);
3335 
3336         if (pkt_sk(sk)->has_vnet_hdr) {
3337                 err = packet_rcv_vnet(msg, skb, &len);
3338                 if (err)
3339                         goto out_free;
3340                 vnet_hdr_len = sizeof(struct virtio_net_hdr);
3341         }
3342 
3343         /* You lose any data beyond the buffer you gave. If it worries
3344          * a user program they can ask the device for its MTU
3345          * anyway.
3346          */
3347         copied = skb->len;
3348         if (copied > len) {
3349                 copied = len;
3350                 msg->msg_flags |= MSG_TRUNC;
3351         }
3352 
3353         err = skb_copy_datagram_msg(skb, 0, msg, copied);
3354         if (err)
3355                 goto out_free;
3356 
3357         if (sock->type != SOCK_PACKET) {
3358                 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3359 
3360                 /* Original length was stored in sockaddr_ll fields */
3361                 origlen = PACKET_SKB_CB(skb)->sa.origlen;
3362                 sll->sll_family = AF_PACKET;
3363                 sll->sll_protocol = skb->protocol;
3364         }
3365 
3366         sock_recv_ts_and_drops(msg, sk, skb);
3367 
3368         if (msg->msg_name) {
3369                 /* If the address length field is there to be filled
3370                  * in, we fill it in now.
3371                  */
3372                 if (sock->type == SOCK_PACKET) {
3373                         __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3374                         msg->msg_namelen = sizeof(struct sockaddr_pkt);
3375                 } else {
3376                         struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3377 
3378                         msg->msg_namelen = sll->sll_halen +
3379                                 offsetof(struct sockaddr_ll, sll_addr);
3380                 }
3381                 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
3382                        msg->msg_namelen);
3383         }
3384 
3385         if (pkt_sk(sk)->auxdata) {
3386                 struct tpacket_auxdata aux;
3387 
3388                 aux.tp_status = TP_STATUS_USER;
3389                 if (skb->ip_summed == CHECKSUM_PARTIAL)
3390                         aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3391                 else if (skb->pkt_type != PACKET_OUTGOING &&
3392                          (skb->ip_summed == CHECKSUM_COMPLETE ||
3393                           skb_csum_unnecessary(skb)))
3394                         aux.tp_status |= TP_STATUS_CSUM_VALID;
3395 
3396                 aux.tp_len = origlen;
3397                 aux.tp_snaplen = skb->len;
3398                 aux.tp_mac = 0;
3399                 aux.tp_net = skb_network_offset(skb);
3400                 if (skb_vlan_tag_present(skb)) {
3401                         aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3402                         aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3403                         aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3404                 } else {
3405                         aux.tp_vlan_tci = 0;
3406                         aux.tp_vlan_tpid = 0;
3407                 }
3408                 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3409         }
3410 
3411         /*
3412          *      Free or return the buffer as appropriate. Again this
3413          *      hides all the races and re-entrancy issues from us.
3414          */
3415         err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3416 
3417 out_free:
3418         skb_free_datagram(sk, skb);
3419 out:
3420         return err;
3421 }
3422 
3423 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3424                                int *uaddr_len, int peer)
3425 {
3426         struct net_device *dev;
3427         struct sock *sk = sock->sk;
3428 
3429         if (peer)
3430                 return -EOPNOTSUPP;
3431 
3432         uaddr->sa_family = AF_PACKET;
3433         memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3434         rcu_read_lock();
3435         dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3436         if (dev)
3437                 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3438         rcu_read_unlock();
3439         *uaddr_len = sizeof(*uaddr);
3440 
3441         return 0;
3442 }
3443 
3444 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3445                           int *uaddr_len, int peer)
3446 {
3447         struct net_device *dev;
3448         struct sock *sk = sock->sk;
3449         struct packet_sock *po = pkt_sk(sk);
3450         DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3451 
3452         if (peer)
3453                 return -EOPNOTSUPP;
3454 
3455         sll->sll_family = AF_PACKET;
3456         sll->sll_ifindex = po->ifindex;
3457         sll->sll_protocol = po->num;
3458         sll->sll_pkttype = 0;
3459         rcu_read_lock();
3460         dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3461         if (dev) {
3462                 sll->sll_hatype = dev->type;
3463                 sll->sll_halen = dev->addr_len;
3464                 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3465         } else {
3466                 sll->sll_hatype = 0;    /* Bad: we have no ARPHRD_UNSPEC */
3467                 sll->sll_halen = 0;
3468         }
3469         rcu_read_unlock();
3470         *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3471 
3472         return 0;
3473 }
3474 
3475 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3476                          int what)
3477 {
3478         switch (i->type) {
3479         case PACKET_MR_MULTICAST:
3480                 if (i->alen != dev->addr_len)
3481                         return -EINVAL;
3482                 if (what > 0)
3483                         return dev_mc_add(dev, i->addr);
3484                 else
3485                         return dev_mc_del(dev, i->addr);
3486                 break;
3487         case PACKET_MR_PROMISC:
3488                 return dev_set_promiscuity(dev, what);
3489         case PACKET_MR_ALLMULTI:
3490                 return dev_set_allmulti(dev, what);
3491         case PACKET_MR_UNICAST:
3492                 if (i->alen != dev->addr_len)
3493                         return -EINVAL;
3494                 if (what > 0)
3495                         return dev_uc_add(dev, i->addr);
3496                 else
3497                         return dev_uc_del(dev, i->addr);
3498                 break;
3499         default:
3500                 break;
3501         }
3502         return 0;
3503 }
3504 
3505 static void packet_dev_mclist_delete(struct net_device *dev,
3506                                      struct packet_mclist **mlp)
3507 {
3508         struct packet_mclist *ml;
3509 
3510         while ((ml = *mlp) != NULL) {
3511                 if (ml->ifindex == dev->ifindex) {
3512                         packet_dev_mc(dev, ml, -1);
3513                         *mlp = ml->next;
3514                         kfree(ml);
3515                 } else
3516                         mlp = &ml->next;
3517         }
3518 }
3519 
3520 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3521 {
3522         struct packet_sock *po = pkt_sk(sk);
3523         struct packet_mclist *ml, *i;
3524         struct net_device *dev;
3525         int err;
3526 
3527         rtnl_lock();
3528 
3529         err = -ENODEV;
3530         dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3531         if (!dev)
3532                 goto done;
3533 
3534         err = -EINVAL;
3535         if (mreq->mr_alen > dev->addr_len)
3536                 goto done;
3537 
3538         err = -ENOBUFS;
3539         i = kmalloc(sizeof(*i), GFP_KERNEL);
3540         if (i == NULL)
3541                 goto done;
3542 
3543         err = 0;
3544         for (ml = po->mclist; ml; ml = ml->next) {
3545                 if (ml->ifindex == mreq->mr_ifindex &&
3546                     ml->type == mreq->mr_type &&
3547                     ml->alen == mreq->mr_alen &&
3548                     memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3549                         ml->count++;
3550                         /* Free the new element ... */
3551                         kfree(i);
3552                         goto done;
3553                 }
3554         }
3555 
3556         i->type = mreq->mr_type;
3557         i->ifindex = mreq->mr_ifindex;
3558         i->alen = mreq->mr_alen;
3559         memcpy(i->addr, mreq->mr_address, i->alen);
3560         memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3561         i->count = 1;
3562         i->next = po->mclist;
3563         po->mclist = i;
3564         err = packet_dev_mc(dev, i, 1);
3565         if (err) {
3566                 po->mclist = i->next;
3567                 kfree(i);
3568         }
3569 
3570 done:
3571         rtnl_unlock();
3572         return err;
3573 }
3574 
3575 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3576 {
3577         struct packet_mclist *ml, **mlp;
3578 
3579         rtnl_lock();
3580 
3581         for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3582                 if (ml->ifindex == mreq->mr_ifindex &&
3583                     ml->type == mreq->mr_type &&
3584                     ml->alen == mreq->mr_alen &&
3585                     memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3586                         if (--ml->count == 0) {
3587                                 struct net_device *dev;
3588                                 *mlp = ml->next;
3589                                 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3590                                 if (dev)
3591                                         packet_dev_mc(dev, ml, -1);
3592                                 kfree(ml);
3593                         }
3594                         break;
3595                 }
3596         }
3597         rtnl_unlock();
3598         return 0;
3599 }
3600 
3601 static void packet_flush_mclist(struct sock *sk)
3602 {
3603         struct packet_sock *po = pkt_sk(sk);
3604         struct packet_mclist *ml;
3605 
3606         if (!po->mclist)
3607                 return;
3608 
3609         rtnl_lock();
3610         while ((ml = po->mclist) != NULL) {
3611                 struct net_device *dev;
3612 
3613                 po->mclist = ml->next;
3614                 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3615                 if (dev != NULL)
3616                         packet_dev_mc(dev, ml, -1);
3617                 kfree(ml);
3618         }
3619         rtnl_unlock();
3620 }
3621 
3622 static int
3623 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
3624 {
3625         struct sock *sk = sock->sk;
3626         struct packet_sock *po = pkt_sk(sk);
3627         int ret;
3628 
3629         if (level != SOL_PACKET)
3630                 return -ENOPROTOOPT;
3631 
3632         switch (optname) {
3633         case PACKET_ADD_MEMBERSHIP:
3634         case PACKET_DROP_MEMBERSHIP:
3635         {
3636                 struct packet_mreq_max mreq;
3637                 int len = optlen;
3638                 memset(&mreq, 0, sizeof(mreq));
3639                 if (len < sizeof(struct packet_mreq))
3640                         return -EINVAL;
3641                 if (len > sizeof(mreq))
3642                         len = sizeof(mreq);
3643                 if (copy_from_user(&mreq, optval, len))
3644                         return -EFAULT;
3645                 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3646                         return -EINVAL;
3647                 if (optname == PACKET_ADD_MEMBERSHIP)
3648                         ret = packet_mc_add(sk, &mreq);
3649                 else
3650                         ret = packet_mc_drop(sk, &mreq);
3651                 return ret;
3652         }
3653 
3654         case PACKET_RX_RING:
3655         case PACKET_TX_RING:
3656         {
3657                 union tpacket_req_u req_u;
3658                 int len;
3659 
3660                 switch (po->tp_version) {
3661                 case TPACKET_V1:
3662                 case TPACKET_V2:
3663                         len = sizeof(req_u.req);
3664                         break;
3665                 case TPACKET_V3:
3666                 default:
3667                         len = sizeof(req_u.req3);
3668                         break;
3669                 }
3670                 if (optlen < len)
3671                         return -EINVAL;
3672                 if (copy_from_user(&req_u.req, optval, len))
3673                         return -EFAULT;
3674                 return packet_set_ring(sk, &req_u, 0,
3675                         optname == PACKET_TX_RING);
3676         }
3677         case PACKET_COPY_THRESH:
3678         {
3679                 int val;
3680 
3681                 if (optlen != sizeof(val))
3682                         return -EINVAL;
3683                 if (copy_from_user(&val, optval, sizeof(val)))
3684                         return -EFAULT;
3685 
3686                 pkt_sk(sk)->copy_thresh = val;
3687                 return 0;
3688         }
3689         case PACKET_VERSION:
3690         {
3691                 int val;
3692 
3693                 if (optlen != sizeof(val))
3694                         return -EINVAL;
3695                 if (copy_from_user(&val, optval, sizeof(val)))
3696                         return -EFAULT;
3697                 switch (val) {
3698                 case TPACKET_V1:
3699                 case TPACKET_V2:
3700                 case TPACKET_V3:
3701                         break;
3702                 default:
3703                         return -EINVAL;
3704                 }
3705                 lock_sock(sk);
3706                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3707                         ret = -EBUSY;
3708                 } else {
3709                         po->tp_version = val;
3710                         ret = 0;
3711                 }
3712                 release_sock(sk);
3713                 return ret;
3714         }
3715         case PACKET_RESERVE:
3716         {
3717                 unsigned int val;
3718 
3719                 if (optlen != sizeof(val))
3720                         return -EINVAL;
3721                 if (copy_from_user(&val, optval, sizeof(val)))
3722                         return -EFAULT;
3723                 if (val > INT_MAX)
3724                         return -EINVAL;
3725                 lock_sock(sk);
3726                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3727                         ret = -EBUSY;
3728                 } else {
3729                         po->tp_reserve = val;
3730                         ret = 0;
3731                 }
3732                 release_sock(sk);
3733                 return ret;
3734         }
3735         case PACKET_LOSS:
3736         {
3737                 unsigned int val;
3738 
3739                 if (optlen != sizeof(val))
3740                         return -EINVAL;
3741                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3742                         return -EBUSY;
3743                 if (copy_from_user(&val, optval, sizeof(val)))
3744                         return -EFAULT;
3745                 po->tp_loss = !!val;
3746                 return 0;
3747         }
3748         case PACKET_AUXDATA:
3749         {
3750                 int val;
3751 
3752                 if (optlen < sizeof(val))
3753                         return -EINVAL;
3754                 if (copy_from_user(&val, optval, sizeof(val)))
3755                         return -EFAULT;
3756 
3757                 po->auxdata = !!val;
3758                 return 0;
3759         }
3760         case PACKET_ORIGDEV:
3761         {
3762                 int val;
3763 
3764                 if (optlen < sizeof(val))
3765                         return -EINVAL;
3766                 if (copy_from_user(&val, optval, sizeof(val)))
3767                         return -EFAULT;
3768 
3769                 po->origdev = !!val;
3770                 return 0;
3771         }
3772         case PACKET_VNET_HDR:
3773         {
3774                 int val;
3775 
3776                 if (sock->type != SOCK_RAW)
3777                         return -EINVAL;
3778                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3779                         return -EBUSY;
3780                 if (optlen < sizeof(val))
3781                         return -EINVAL;
3782                 if (copy_from_user(&val, optval, sizeof(val)))
3783                         return -EFAULT;
3784 
3785                 po->has_vnet_hdr = !!val;
3786                 return 0;
3787         }
3788         case PACKET_TIMESTAMP:
3789         {
3790                 int val;
3791 
3792                 if (optlen != sizeof(val))
3793                         return -EINVAL;
3794                 if (copy_from_user(&val, optval, sizeof(val)))
3795                         return -EFAULT;
3796 
3797                 po->tp_tstamp = val;
3798                 return 0;
3799         }
3800         case PACKET_FANOUT:
3801         {
3802                 int val;
3803 
3804                 if (optlen != sizeof(val))
3805                         return -EINVAL;
3806                 if (copy_from_user(&val, optval, sizeof(val)))
3807                         return -EFAULT;
3808 
3809                 return fanout_add(sk, val & 0xffff, val >> 16);
3810         }
3811         case PACKET_FANOUT_DATA:
3812         {
3813                 if (!po->fanout)
3814                         return -EINVAL;
3815 
3816                 return fanout_set_data(po, optval, optlen);
3817         }
3818         case PACKET_TX_HAS_OFF:
3819         {
3820                 unsigned int val;
3821 
3822                 if (optlen != sizeof(val))
3823                         return -EINVAL;
3824                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3825                         return -EBUSY;
3826                 if (copy_from_user(&val, optval, sizeof(val)))
3827                         return -EFAULT;
3828                 po->tp_tx_has_off = !!val;
3829                 return 0;
3830         }
3831         case PACKET_QDISC_BYPASS:
3832         {
3833                 int val;
3834 
3835                 if (optlen != sizeof(val))
3836                         return -EINVAL;
3837                 if (copy_from_user(&val, optval, sizeof(val)))
3838                         return -EFAULT;
3839 
3840                 po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3841                 return 0;
3842         }
3843         default:
3844                 return -ENOPROTOOPT;
3845         }
3846 }
3847 
3848 static int packet_getsockopt(struct socket *sock, int level, int optname,
3849                              char __user *optval, int __user *optlen)
3850 {
3851         int len;
3852         int val, lv = sizeof(val);
3853         struct sock *sk = sock->sk;
3854         struct packet_sock *po = pkt_sk(sk);
3855         void *data = &val;
3856         union tpacket_stats_u st;
3857         struct tpacket_rollover_stats rstats;
3858         struct packet_rollover *rollover;
3859 
3860         if (level != SOL_PACKET)
3861                 return -ENOPROTOOPT;
3862 
3863         if (get_user(len, optlen))
3864                 return -EFAULT;
3865 
3866         if (len < 0)
3867                 return -EINVAL;
3868 
3869         switch (optname) {
3870         case PACKET_STATISTICS:
3871                 spin_lock_bh(&sk->sk_receive_queue.lock);
3872                 memcpy(&st, &po->stats, sizeof(st));
3873                 memset(&po->stats, 0, sizeof(po->stats));
3874                 spin_unlock_bh(&sk->sk_receive_queue.lock);
3875 
3876                 if (po->tp_version == TPACKET_V3) {
3877                         lv = sizeof(struct tpacket_stats_v3);
3878                         st.stats3.tp_packets += st.stats3.tp_drops;
3879                         data = &st.stats3;
3880                 } else {
3881                         lv = sizeof(struct tpacket_stats);
3882                         st.stats1.tp_packets += st.stats1.tp_drops;
3883                         data = &st.stats1;
3884                 }
3885 
3886                 break;
3887         case PACKET_AUXDATA:
3888                 val = po->auxdata;
3889                 break;
3890         case PACKET_ORIGDEV:
3891                 val = po->origdev;
3892                 break;
3893         case PACKET_VNET_HDR:
3894                 val = po->has_vnet_hdr;
3895                 break;
3896         case PACKET_VERSION:
3897                 val = po->tp_version;
3898                 break;
3899         case PACKET_HDRLEN:
3900                 if (len > sizeof(int))
3901                         len = sizeof(int);
3902                 if (len < sizeof(int))
3903                         return -EINVAL;
3904                 if (copy_from_user(&val, optval, len))
3905                         return -EFAULT;
3906                 switch (val) {
3907                 case TPACKET_V1:
3908                         val = sizeof(struct tpacket_hdr);
3909                         break;
3910                 case TPACKET_V2:
3911                         val = sizeof(struct tpacket2_hdr);
3912                         break;
3913                 case TPACKET_V3:
3914                         val = sizeof(struct tpacket3_hdr);
3915                         break;
3916                 default:
3917                         return -EINVAL;
3918                 }
3919                 break;
3920         case PACKET_RESERVE:
3921                 val = po->tp_reserve;
3922                 break;
3923         case PACKET_LOSS:
3924                 val = po->tp_loss;
3925                 break;
3926         case PACKET_TIMESTAMP:
3927                 val = po->tp_tstamp;
3928                 break;
3929         case PACKET_FANOUT:
3930                 val = (po->fanout ?
3931                        ((u32)po->fanout->id |
3932                         ((u32)po->fanout->type << 16) |
3933                         ((u32)po->fanout->flags << 24)) :
3934                        0);
3935                 break;
3936         case PACKET_ROLLOVER_STATS:
3937                 rcu_read_lock();
3938                 rollover = rcu_dereference(po->rollover);
3939                 if (rollover) {
3940                         rstats.tp_all = atomic_long_read(&rollover->num);
3941                         rstats.tp_huge = atomic_long_read(&rollover->num_huge);
3942                         rstats.tp_failed = atomic_long_read(&rollover->num_failed);
3943                         data = &rstats;
3944                         lv = sizeof(rstats);
3945                 }
3946                 rcu_read_unlock();
3947                 if (!rollover)
3948                         return -EINVAL;
3949                 break;
3950         case PACKET_TX_HAS_OFF:
3951                 val = po->tp_tx_has_off;
3952                 break;
3953         case PACKET_QDISC_BYPASS:
3954                 val = packet_use_direct_xmit(po);
3955                 break;
3956         default:
3957                 return -ENOPROTOOPT;
3958         }
3959 
3960         if (len > lv)
3961                 len = lv;
3962         if (put_user(len, optlen))
3963                 return -EFAULT;
3964         if (copy_to_user(optval, data, len))
3965                 return -EFAULT;
3966         return 0;
3967 }
3968 
3969 
3970 #ifdef CONFIG_COMPAT
3971 static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
3972                                     char __user *optval, unsigned int optlen)
3973 {
3974         struct packet_sock *po = pkt_sk(sock->sk);
3975 
3976         if (level != SOL_PACKET)
3977                 return -ENOPROTOOPT;
3978 
3979         if (optname == PACKET_FANOUT_DATA &&
3980             po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
3981                 optval = (char __user *)get_compat_bpf_fprog(optval);
3982                 if (!optval)
3983                         return -EFAULT;
3984                 optlen = sizeof(struct sock_fprog);
3985         }
3986 
3987         return packet_setsockopt(sock, level, optname, optval, optlen);
3988 }
3989 #endif
3990 
3991 static int packet_notifier(struct notifier_block *this,
3992                            unsigned long msg, void *ptr)
3993 {
3994         struct sock *sk;
3995         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3996         struct net *net = dev_net(dev);
3997 
3998         rcu_read_lock();
3999         sk_for_each_rcu(sk, &net->packet.sklist) {
4000                 struct packet_sock *po = pkt_sk(sk);
4001 
4002                 switch (msg) {
4003                 case NETDEV_UNREGISTER:
4004                         if (po->mclist)
4005                                 packet_dev_mclist_delete(dev, &po->mclist);
4006                         /* fallthrough */
4007 
4008                 case NETDEV_DOWN:
4009                         if (dev->ifindex == po->ifindex) {
4010                                 spin_lock(&po->bind_lock);
4011                                 if (po->running) {
4012                                         __unregister_prot_hook(sk, false);
4013                                         sk->sk_err = ENETDOWN;
4014                                         if (!sock_flag(sk, SOCK_DEAD))
4015                                                 sk->sk_error_report(sk);
4016                                 }
4017                                 if (msg == NETDEV_UNREGISTER) {
4018                                         packet_cached_dev_reset(po);
4019                                         po->ifindex = -1;
4020                                         if (po->prot_hook.dev)
4021                                                 dev_put(po->prot_hook.dev);
4022                                         po->prot_hook.dev = NULL;
4023                                 }
4024                                 spin_unlock(&po->bind_lock);
4025                         }
4026                         break;
4027                 case NETDEV_UP:
4028                         if (dev->ifindex == po->ifindex) {
4029                                 spin_lock(&po->bind_lock);
4030                                 if (po->num)
4031                                         register_prot_hook(sk);
4032                                 spin_unlock(&po->bind_lock);
4033                         }
4034                         break;
4035                 }
4036         }
4037         rcu_read_unlock();
4038         return NOTIFY_DONE;
4039 }
4040 
4041 
4042 static int packet_ioctl(struct socket *sock, unsigned int cmd,
4043                         unsigned long arg)
4044 {
4045         struct sock *sk = sock->sk;
4046 
4047         switch (cmd) {
4048         case SIOCOUTQ:
4049         {
4050                 int amount = sk_wmem_alloc_get(sk);
4051 
4052                 return put_user(amount, (int __user *)arg);
4053         }
4054         case SIOCINQ:
4055         {
4056                 struct sk_buff *skb;
4057                 int amount = 0;
4058 
4059                 spin_lock_bh(&sk->sk_receive_queue.lock);
4060                 skb = skb_peek(&sk->sk_receive_queue);
4061                 if (skb)
4062                         amount = skb->len;
4063                 spin_unlock_bh(&sk->sk_receive_queue.lock);
4064                 return put_user(amount, (int __user *)arg);
4065         }
4066         case SIOCGSTAMP:
4067                 return sock_get_timestamp(sk, (struct timeval __user *)arg);
4068         case SIOCGSTAMPNS:
4069                 return sock_get_timestampns(sk, (struct timespec __user *)arg);
4070 
4071 #ifdef CONFIG_INET
4072         case SIOCADDRT:
4073         case SIOCDELRT:
4074         case SIOCDARP:
4075         case SIOCGARP:
4076         case SIOCSARP:
4077         case SIOCGIFADDR:
4078         case SIOCSIFADDR:
4079         case SIOCGIFBRDADDR:
4080         case SIOCSIFBRDADDR:
4081         case SIOCGIFNETMASK:
4082         case SIOCSIFNETMASK:
4083         case SIOCGIFDSTADDR:
4084         case SIOCSIFDSTADDR:
4085         case SIOCSIFFLAGS:
4086                 return inet_dgram_ops.ioctl(sock, cmd, arg);
4087 #endif
4088 
4089         default:
4090                 return -ENOIOCTLCMD;
4091         }
4092         return 0;
4093 }
4094 
4095 static unsigned int packet_poll(struct file *file, struct socket *sock,
4096                                 poll_table *wait)
4097 {
4098         struct sock *sk = sock->sk;
4099         struct packet_sock *po = pkt_sk(sk);
4100         unsigned int mask = datagram_poll(file, sock, wait);
4101 
4102         spin_lock_bh(&sk->sk_receive_queue.lock);
4103         if (po->rx_ring.pg_vec) {
4104                 if (!packet_previous_rx_frame(po, &po->rx_ring,
4105                         TP_STATUS_KERNEL))
4106                         mask |= POLLIN | POLLRDNORM;
4107         }
4108         if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
4109                 po->pressure = 0;
4110         spin_unlock_bh(&sk->sk_receive_queue.lock);
4111         spin_lock_bh(&sk->sk_write_queue.lock);
4112         if (po->tx_ring.pg_vec) {
4113                 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4114                         mask |= POLLOUT | POLLWRNORM;
4115         }
4116         spin_unlock_bh(&sk->sk_write_queue.lock);
4117         return mask;
4118 }
4119 
4120 
4121 /* Dirty? Well, I still did not learn better way to account
4122  * for user mmaps.
4123  */
4124 
4125 static void packet_mm_open(struct vm_area_struct *vma)
4126 {
4127         struct file *file = vma->vm_file;
4128         struct socket *sock = file->private_data;
4129         struct sock *sk = sock->sk;
4130 
4131         if (sk)
4132                 atomic_inc(&pkt_sk(sk)->mapped);
4133 }
4134 
4135 static void packet_mm_close(struct vm_area_struct *vma)
4136 {
4137         struct file *file = vma->vm_file;
4138         struct socket *sock = file->private_data;
4139         struct sock *sk = sock->sk;
4140 
4141         if (sk)
4142                 atomic_dec(&pkt_sk(sk)->mapped);
4143 }
4144 
4145 static const struct vm_operations_struct packet_mmap_ops = {
4146         .open   =       packet_mm_open,
4147         .close  =       packet_mm_close,
4148 };
4149 
4150 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4151                         unsigned int len)
4152 {
4153         int i;
4154 
4155         for (i = 0; i < len; i++) {
4156                 if (likely(pg_vec[i].buffer)) {
4157                         if (is_vmalloc_addr(pg_vec[i].buffer))
4158                                 vfree(pg_vec[i].buffer);
4159                         else
4160                                 free_pages((unsigned long)pg_vec[i].buffer,
4161                                            order);
4162                         pg_vec[i].buffer = NULL;
4163                 }
4164         }
4165         kfree(pg_vec);
4166 }
4167 
4168 static char *alloc_one_pg_vec_page(unsigned long order)
4169 {
4170         char *buffer;
4171         gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4172                           __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4173 
4174         buffer = (char *) __get_free_pages(gfp_flags, order);
4175         if (buffer)
4176                 return buffer;
4177 
4178         /* __get_free_pages failed, fall back to vmalloc */
4179         buffer = vzalloc((1 << order) * PAGE_SIZE);
4180         if (buffer)
4181                 return buffer;
4182 
4183         /* vmalloc failed, lets dig into swap here */
4184         gfp_flags &= ~__GFP_NORETRY;
4185         buffer = (char *) __get_free_pages(gfp_flags, order);
4186         if (buffer)
4187                 return buffer;
4188 
4189         /* complete and utter failure */
4190         return NULL;
4191 }
4192 
4193 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4194 {
4195         unsigned int block_nr = req->tp_block_nr;
4196         struct pgv *pg_vec;
4197         int i;
4198 
4199         pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
4200         if (unlikely(!pg_vec))
4201                 goto out;
4202 
4203         for (i = 0; i < block_nr; i++) {
4204                 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4205                 if (unlikely(!pg_vec[i].buffer))
4206                         goto out_free_pgvec;
4207         }
4208 
4209 out:
4210         return pg_vec;
4211 
4212 out_free_pgvec:
4213         free_pg_vec(pg_vec, order, block_nr);
4214         pg_vec = NULL;
4215         goto out;
4216 }
4217 
4218 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4219                 int closing, int tx_ring)
4220 {
4221         struct pgv *pg_vec = NULL;
4222         struct packet_sock *po = pkt_sk(sk);
4223         int was_running, order = 0;
4224         struct packet_ring_buffer *rb;
4225         struct sk_buff_head *rb_queue;
4226         __be16 num;
4227         int err = -EINVAL;
4228         /* Added to avoid minimal code churn */
4229         struct tpacket_req *req = &req_u->req;
4230 
4231         lock_sock(sk);
4232 
4233         rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4234         rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4235 
4236         err = -EBUSY;
4237         if (!closing) {
4238                 if (atomic_read(&po->mapped))
4239                         goto out;
4240                 if (packet_read_pending(rb))
4241                         goto out;
4242         }
4243 
4244         if (req->tp_block_nr) {
4245                 /* Sanity tests and some calculations */
4246                 err = -EBUSY;
4247                 if (unlikely(rb->pg_vec))
4248                         goto out;
4249 
4250                 switch (po->tp_version) {
4251                 case TPACKET_V1:
4252                         po->tp_hdrlen = TPACKET_HDRLEN;
4253                         break;
4254                 case TPACKET_V2:
4255                         po->tp_hdrlen = TPACKET2_HDRLEN;
4256                         break;
4257                 case TPACKET_V3:
4258                         po->tp_hdrlen = TPACKET3_HDRLEN;
4259                         break;
4260                 }
4261 
4262                 err = -EINVAL;
4263                 if (unlikely((int)req->tp_block_size <= 0))
4264                         goto out;
4265                 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4266                         goto out;
4267                 if (po->tp_version >= TPACKET_V3 &&
4268                     req->tp_block_size <=
4269                           BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
4270                         goto out;
4271                 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
4272                                         po->tp_reserve))
4273                         goto out;
4274                 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4275                         goto out;
4276 
4277                 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4278                 if (unlikely(rb->frames_per_block == 0))
4279                         goto out;
4280                 if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
4281                         goto out;
4282                 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4283                                         req->tp_frame_nr))
4284                         goto out;
4285 
4286                 err = -ENOMEM;
4287                 order = get_order(req->tp_block_size);
4288                 pg_vec = alloc_pg_vec(req, order);
4289                 if (unlikely(!pg_vec))
4290                         goto out;
4291                 switch (po->tp_version) {
4292                 case TPACKET_V3:
4293                         /* Block transmit is not supported yet */
4294                         if (!tx_ring) {
4295                                 init_prb_bdqc(po, rb, pg_vec, req_u);
4296                         } else {
4297                                 struct tpacket_req3 *req3 = &req_u->req3;
4298 
4299                                 if (req3->tp_retire_blk_tov ||
4300                                     req3->tp_sizeof_priv ||
4301                                     req3->tp_feature_req_word) {
4302                                         err = -EINVAL;
4303                                         goto out;
4304                                 }
4305                         }
4306                         break;
4307                 default:
4308                         break;
4309                 }
4310         }
4311         /* Done */
4312         else {
4313                 err = -EINVAL;
4314                 if (unlikely(req->tp_frame_nr))
4315                         goto out;
4316         }
4317 
4318 
4319         /* Detach socket from network */
4320         spin_lock(&po->bind_lock);
4321         was_running = po->running;
4322         num = po->num;
4323         if (was_running) {
4324                 po->num = 0;
4325                 __unregister_prot_hook(sk, false);
4326         }
4327         spin_unlock(&po->bind_lock);
4328 
4329         synchronize_net();
4330 
4331         err = -EBUSY;
4332         mutex_lock(&po->pg_vec_lock);
4333         if (closing || atomic_read(&po->mapped) == 0) {
4334                 err = 0;
4335                 spin_lock_bh(&rb_queue->lock);
4336                 swap(rb->pg_vec, pg_vec);
4337                 rb->frame_max = (req->tp_frame_nr - 1);
4338                 rb->head = 0;
4339                 rb->frame_size = req->tp_frame_size;
4340                 spin_unlock_bh(&rb_queue->lock);
4341 
4342                 swap(rb->pg_vec_order, order);
4343                 swap(rb->pg_vec_len, req->tp_block_nr);
4344 
4345                 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4346                 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4347                                                 tpacket_rcv : packet_rcv;
4348                 skb_queue_purge(rb_queue);
4349                 if (atomic_read(&po->mapped))
4350                         pr_err("packet_mmap: vma is busy: %d\n",
4351                                atomic_read(&po->mapped));
4352         }
4353         mutex_unlock(&po->pg_vec_lock);
4354 
4355         spin_lock(&po->bind_lock);
4356         if (was_running) {
4357                 po->num = num;
4358                 register_prot_hook(sk);
4359         }
4360         spin_unlock(&po->bind_lock);
4361         if (pg_vec && (po->tp_version > TPACKET_V2)) {
4362                 /* Because we don't support block-based V3 on tx-ring */
4363                 if (!tx_ring)
4364                         prb_shutdown_retire_blk_timer(po, rb_queue);
4365         }
4366 
4367         if (pg_vec)
4368                 free_pg_vec(pg_vec, order, req->tp_block_nr);
4369 out:
4370         release_sock(sk);
4371         return err;
4372 }
4373 
4374 static int packet_mmap(struct file *file, struct socket *sock,
4375                 struct vm_area_struct *vma)
4376 {
4377         struct sock *sk = sock->sk;
4378         struct packet_sock *po = pkt_sk(sk);
4379         unsigned long size, expected_size;
4380         struct packet_ring_buffer *rb;
4381         unsigned long start;
4382         int err = -EINVAL;
4383         int i;
4384 
4385         if (vma->vm_pgoff)
4386                 return -EINVAL;
4387 
4388         mutex_lock(&po->pg_vec_lock);
4389 
4390         expected_size = 0;
4391         for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4392                 if (rb->pg_vec) {
4393                         expected_size += rb->pg_vec_len
4394                                                 * rb->pg_vec_pages
4395                                                 * PAGE_SIZE;
4396                 }
4397         }
4398 
4399         if (expected_size == 0)
4400                 goto out;
4401 
4402         size = vma->vm_end - vma->vm_start;
4403         if (size != expected_size)
4404                 goto out;
4405 
4406         start = vma->vm_start;
4407         for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4408                 if (rb->pg_vec == NULL)
4409                         continue;
4410 
4411                 for (i = 0; i < rb->pg_vec_len; i++) {
4412                         struct page *page;
4413                         void *kaddr = rb->pg_vec[i].buffer;
4414                         int pg_num;
4415 
4416                         for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4417                                 page = pgv_to_page(kaddr);
4418                                 err = vm_insert_page(vma, start, page);
4419                                 if (unlikely(err))
4420                                         goto out;
4421                                 start += PAGE_SIZE;
4422                                 kaddr += PAGE_SIZE;
4423                         }
4424                 }
4425         }
4426 
4427         atomic_inc(&po->mapped);
4428         vma->vm_ops = &packet_mmap_ops;
4429         err = 0;
4430 
4431 out:
4432         mutex_unlock(&po->pg_vec_lock);
4433         return err;
4434 }
4435 
4436 static const struct proto_ops packet_ops_spkt = {
4437         .family =       PF_PACKET,
4438         .owner =        THIS_MODULE,
4439         .release =      packet_release,
4440         .bind =         packet_bind_spkt,
4441         .connect =      sock_no_connect,
4442         .socketpair =   sock_no_socketpair,
4443         .accept =       sock_no_accept,
4444         .getname =      packet_getname_spkt,
4445         .poll =         datagram_poll,
4446         .ioctl =        packet_ioctl,
4447         .listen =       sock_no_listen,
4448         .shutdown =     sock_no_shutdown,
4449         .setsockopt =   sock_no_setsockopt,
4450         .getsockopt =   sock_no_getsockopt,
4451         .sendmsg =      packet_sendmsg_spkt,
4452         .recvmsg =      packet_recvmsg,
4453         .mmap =         sock_no_mmap,
4454         .sendpage =     sock_no_sendpage,
4455 };
4456 
4457 static const struct proto_ops packet_ops = {
4458         .family =       PF_PACKET,
4459         .owner =        THIS_MODULE,
4460         .release =      packet_release,
4461         .bind =         packet_bind,
4462         .connect =      sock_no_connect,
4463         .socketpair =   sock_no_socketpair,
4464         .accept =       sock_no_accept,
4465         .getname =      packet_getname,
4466         .poll =         packet_poll,
4467         .ioctl =        packet_ioctl,
4468         .listen =       sock_no_listen,
4469         .shutdown =     sock_no_shutdown,
4470         .setsockopt =   packet_setsockopt,
4471         .getsockopt =   packet_getsockopt,
4472 #ifdef CONFIG_COMPAT
4473         .compat_setsockopt = compat_packet_setsockopt,
4474 #endif
4475         .sendmsg =      packet_sendmsg,
4476         .recvmsg =      packet_recvmsg,
4477         .mmap =         packet_mmap,
4478         .sendpage =     sock_no_sendpage,
4479 };
4480 
4481 static const struct net_proto_family packet_family_ops = {
4482         .family =       PF_PACKET,
4483         .create =       packet_create,
4484         .owner  =       THIS_MODULE,
4485 };
4486 
4487 static struct notifier_block packet_netdev_notifier = {
4488         .notifier_call =        packet_notifier,
4489 };
4490 
4491 #ifdef CONFIG_PROC_FS
4492 
4493 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4494         __acquires(RCU)
4495 {
4496         struct net *net = seq_file_net(seq);
4497 
4498         rcu_read_lock();
4499         return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4500 }
4501 
4502 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4503 {
4504         struct net *net = seq_file_net(seq);
4505         return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4506 }
4507 
4508 static void packet_seq_stop(struct seq_file *seq, void *v)
4509         __releases(RCU)
4510 {
4511         rcu_read_unlock();
4512 }
4513 
4514 static int packet_seq_show(struct seq_file *seq, void *v)
4515 {
4516         if (v == SEQ_START_TOKEN)
4517                 seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
4518         else {
4519                 struct sock *s = sk_entry(v);
4520                 const struct packet_sock *po = pkt_sk(s);
4521 
4522                 seq_printf(seq,
4523                            "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4524                            s,
4525                            refcount_read(&s->sk_refcnt),
4526                            s->sk_type,
4527                            ntohs(po->num),
4528                            po->ifindex,
4529                            po->running,
4530                            atomic_read(&s->sk_rmem_alloc),
4531                            from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4532                            sock_i_ino(s));
4533         }
4534 
4535         return 0;
4536 }
4537 
4538 static const struct seq_operations packet_seq_ops = {
4539         .start  = packet_seq_start,
4540         .next   = packet_seq_next,
4541         .stop   = packet_seq_stop,
4542         .show   = packet_seq_show,
4543 };
4544 
4545 static int packet_seq_open(struct inode *inode, struct file *file)
4546 {
4547         return seq_open_net(inode, file, &packet_seq_ops,
4548                             sizeof(struct seq_net_private));
4549 }
4550 
4551 static const struct file_operations packet_seq_fops = {
4552         .owner          = THIS_MODULE,
4553         .open           = packet_seq_open,
4554         .read           = seq_read,
4555         .llseek         = seq_lseek,
4556         .release        = seq_release_net,
4557 };
4558 
4559 #endif
4560 
4561 static int __net_init packet_net_init(struct net *net)
4562 {
4563         mutex_init(&net->packet.sklist_lock);
4564         INIT_HLIST_HEAD(&net->packet.sklist);
4565 
4566         if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
4567                 return -ENOMEM;
4568 
4569         return 0;
4570 }
4571 
4572 static void __net_exit packet_net_exit(struct net *net)
4573 {
4574         remove_proc_entry("packet", net->proc_net);
4575 }
4576 
4577 static struct pernet_operations packet_net_ops = {
4578         .init = packet_net_init,
4579         .exit = packet_net_exit,
4580 };
4581 
4582 
4583 static void __exit packet_exit(void)
4584 {
4585         unregister_netdevice_notifier(&packet_netdev_notifier);
4586         unregister_pernet_subsys(&packet_net_ops);
4587         sock_unregister(PF_PACKET);
4588         proto_unregister(&packet_proto);
4589 }
4590 
4591 static int __init packet_init(void)
4592 {
4593         int rc = proto_register(&packet_proto, 0);
4594 
4595         if (rc != 0)
4596                 goto out;
4597 
4598         sock_register(&packet_family_ops);
4599         register_pernet_subsys(&packet_net_ops);
4600         register_netdevice_notifier(&packet_netdev_notifier);
4601 out:
4602         return rc;
4603 }
4604 
4605 module_init(packet_init);
4606 module_exit(packet_exit);
4607 MODULE_LICENSE("GPL");
4608 MODULE_ALIAS_NETPROTO(PF_PACKET);
4609 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp