~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/net/sock.h

Version: ~ [ linux-5.1-rc1 ] ~ [ linux-5.0.3 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.30 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.107 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.164 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.176 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.136 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.63 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
  3  *              operating system.  INET is implemented using the  BSD Socket
  4  *              interface as the means of communication with the user level.
  5  *
  6  *              Definitions for the AF_INET socket handler.
  7  *
  8  * Version:     @(#)sock.h      1.0.4   05/13/93
  9  *
 10  * Authors:     Ross Biro
 11  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 12  *              Corey Minyard <wf-rch!minyard@relay.EU.net>
 13  *              Florian La Roche <flla@stud.uni-sb.de>
 14  *
 15  * Fixes:
 16  *              Alan Cox        :       Volatiles in skbuff pointers. See
 17  *                                      skbuff comments. May be overdone,
 18  *                                      better to prove they can be removed
 19  *                                      than the reverse.
 20  *              Alan Cox        :       Added a zapped field for tcp to note
 21  *                                      a socket is reset and must stay shut up
 22  *              Alan Cox        :       New fields for options
 23  *      Pauline Middelink       :       identd support
 24  *              Alan Cox        :       Eliminate low level recv/recvfrom
 25  *              David S. Miller :       New socket lookup architecture.
 26  *              Steve Whitehouse:       Default routines for sock_ops
 27  *              Arnaldo C. Melo :       removed net_pinfo, tp_pinfo and made
 28  *                                      protinfo be just a void pointer, as the
 29  *                                      protocol specific parts were moved to
 30  *                                      respective headers and ipv4/v6, etc now
 31  *                                      use private slabcaches for its socks
 32  *              Pedro Hortas    :       New flags field for socket options
 33  *
 34  *
 35  *              This program is free software; you can redistribute it and/or
 36  *              modify it under the terms of the GNU General Public License
 37  *              as published by the Free Software Foundation; either version
 38  *              2 of the License, or (at your option) any later version.
 39  */
 40 #ifndef _SOCK_H
 41 #define _SOCK_H
 42 
 43 #include <linux/hardirq.h>
 44 #include <linux/kernel.h>
 45 #include <linux/list.h>
 46 #include <linux/list_nulls.h>
 47 #include <linux/timer.h>
 48 #include <linux/cache.h>
 49 #include <linux/bitops.h>
 50 #include <linux/lockdep.h>
 51 #include <linux/netdevice.h>
 52 #include <linux/skbuff.h>       /* struct sk_buff */
 53 #include <linux/mm.h>
 54 #include <linux/security.h>
 55 #include <linux/slab.h>
 56 #include <linux/uaccess.h>
 57 #include <linux/page_counter.h>
 58 #include <linux/memcontrol.h>
 59 #include <linux/static_key.h>
 60 #include <linux/sched.h>
 61 #include <linux/wait.h>
 62 #include <linux/cgroup-defs.h>
 63 #include <linux/rbtree.h>
 64 #include <linux/filter.h>
 65 #include <linux/rculist_nulls.h>
 66 #include <linux/poll.h>
 67 
 68 #include <linux/atomic.h>
 69 #include <linux/refcount.h>
 70 #include <net/dst.h>
 71 #include <net/checksum.h>
 72 #include <net/tcp_states.h>
 73 #include <linux/net_tstamp.h>
 74 #include <net/smc.h>
 75 #include <net/l3mdev.h>
 76 
 77 /*
 78  * This structure really needs to be cleaned up.
 79  * Most of it is for TCP, and not used by any of
 80  * the other protocols.
 81  */
 82 
 83 /* Define this to get the SOCK_DBG debugging facility. */
 84 #define SOCK_DEBUGGING
 85 #ifdef SOCK_DEBUGGING
 86 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
 87                                         printk(KERN_DEBUG msg); } while (0)
 88 #else
 89 /* Validate arguments and do nothing */
 90 static inline __printf(2, 3)
 91 void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
 92 {
 93 }
 94 #endif
 95 
 96 /* This is the per-socket lock.  The spinlock provides a synchronization
 97  * between user contexts and software interrupt processing, whereas the
 98  * mini-semaphore synchronizes multiple users amongst themselves.
 99  */
100 typedef struct {
101         spinlock_t              slock;
102         int                     owned;
103         wait_queue_head_t       wq;
104         /*
105          * We express the mutex-alike socket_lock semantics
106          * to the lock validator by explicitly managing
107          * the slock as a lock variant (in addition to
108          * the slock itself):
109          */
110 #ifdef CONFIG_DEBUG_LOCK_ALLOC
111         struct lockdep_map dep_map;
112 #endif
113 } socket_lock_t;
114 
115 struct sock;
116 struct proto;
117 struct net;
118 
119 typedef __u32 __bitwise __portpair;
120 typedef __u64 __bitwise __addrpair;
121 
122 /**
123  *      struct sock_common - minimal network layer representation of sockets
124  *      @skc_daddr: Foreign IPv4 addr
125  *      @skc_rcv_saddr: Bound local IPv4 addr
126  *      @skc_hash: hash value used with various protocol lookup tables
127  *      @skc_u16hashes: two u16 hash values used by UDP lookup tables
128  *      @skc_dport: placeholder for inet_dport/tw_dport
129  *      @skc_num: placeholder for inet_num/tw_num
130  *      @skc_family: network address family
131  *      @skc_state: Connection state
132  *      @skc_reuse: %SO_REUSEADDR setting
133  *      @skc_reuseport: %SO_REUSEPORT setting
134  *      @skc_bound_dev_if: bound device index if != 0
135  *      @skc_bind_node: bind hash linkage for various protocol lookup tables
136  *      @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
137  *      @skc_prot: protocol handlers inside a network family
138  *      @skc_net: reference to the network namespace of this socket
139  *      @skc_node: main hash linkage for various protocol lookup tables
140  *      @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
141  *      @skc_tx_queue_mapping: tx queue number for this connection
142  *      @skc_rx_queue_mapping: rx queue number for this connection
143  *      @skc_flags: place holder for sk_flags
144  *              %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
145  *              %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
146  *      @skc_incoming_cpu: record/match cpu processing incoming packets
147  *      @skc_refcnt: reference count
148  *
149  *      This is the minimal network layer representation of sockets, the header
150  *      for struct sock and struct inet_timewait_sock.
151  */
152 struct sock_common {
153         /* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned
154          * address on 64bit arches : cf INET_MATCH()
155          */
156         union {
157                 __addrpair      skc_addrpair;
158                 struct {
159                         __be32  skc_daddr;
160                         __be32  skc_rcv_saddr;
161                 };
162         };
163         union  {
164                 unsigned int    skc_hash;
165                 __u16           skc_u16hashes[2];
166         };
167         /* skc_dport && skc_num must be grouped as well */
168         union {
169                 __portpair      skc_portpair;
170                 struct {
171                         __be16  skc_dport;
172                         __u16   skc_num;
173                 };
174         };
175 
176         unsigned short          skc_family;
177         volatile unsigned char  skc_state;
178         unsigned char           skc_reuse:4;
179         unsigned char           skc_reuseport:1;
180         unsigned char           skc_ipv6only:1;
181         unsigned char           skc_net_refcnt:1;
182         int                     skc_bound_dev_if;
183         union {
184                 struct hlist_node       skc_bind_node;
185                 struct hlist_node       skc_portaddr_node;
186         };
187         struct proto            *skc_prot;
188         possible_net_t          skc_net;
189 
190 #if IS_ENABLED(CONFIG_IPV6)
191         struct in6_addr         skc_v6_daddr;
192         struct in6_addr         skc_v6_rcv_saddr;
193 #endif
194 
195         atomic64_t              skc_cookie;
196 
197         /* following fields are padding to force
198          * offset(struct sock, sk_refcnt) == 128 on 64bit arches
199          * assuming IPV6 is enabled. We use this padding differently
200          * for different kind of 'sockets'
201          */
202         union {
203                 unsigned long   skc_flags;
204                 struct sock     *skc_listener; /* request_sock */
205                 struct inet_timewait_death_row *skc_tw_dr; /* inet_timewait_sock */
206         };
207         /*
208          * fields between dontcopy_begin/dontcopy_end
209          * are not copied in sock_copy()
210          */
211         /* private: */
212         int                     skc_dontcopy_begin[0];
213         /* public: */
214         union {
215                 struct hlist_node       skc_node;
216                 struct hlist_nulls_node skc_nulls_node;
217         };
218         unsigned short          skc_tx_queue_mapping;
219 #ifdef CONFIG_XPS
220         unsigned short          skc_rx_queue_mapping;
221 #endif
222         union {
223                 int             skc_incoming_cpu;
224                 u32             skc_rcv_wnd;
225                 u32             skc_tw_rcv_nxt; /* struct tcp_timewait_sock  */
226         };
227 
228         refcount_t              skc_refcnt;
229         /* private: */
230         int                     skc_dontcopy_end[0];
231         union {
232                 u32             skc_rxhash;
233                 u32             skc_window_clamp;
234                 u32             skc_tw_snd_nxt; /* struct tcp_timewait_sock */
235         };
236         /* public: */
237 };
238 
239 /**
240   *     struct sock - network layer representation of sockets
241   *     @__sk_common: shared layout with inet_timewait_sock
242   *     @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
243   *     @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
244   *     @sk_lock:       synchronizer
245   *     @sk_kern_sock: True if sock is using kernel lock classes
246   *     @sk_rcvbuf: size of receive buffer in bytes
247   *     @sk_wq: sock wait queue and async head
248   *     @sk_rx_dst: receive input route used by early demux
249   *     @sk_dst_cache: destination cache
250   *     @sk_dst_pending_confirm: need to confirm neighbour
251   *     @sk_policy: flow policy
252   *     @sk_receive_queue: incoming packets
253   *     @sk_wmem_alloc: transmit queue bytes committed
254   *     @sk_tsq_flags: TCP Small Queues flags
255   *     @sk_write_queue: Packet sending queue
256   *     @sk_omem_alloc: "o" is "option" or "other"
257   *     @sk_wmem_queued: persistent queue size
258   *     @sk_forward_alloc: space allocated forward
259   *     @sk_napi_id: id of the last napi context to receive data for sk
260   *     @sk_ll_usec: usecs to busypoll when there is no data
261   *     @sk_allocation: allocation mode
262   *     @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
263   *     @sk_pacing_status: Pacing status (requested, handled by sch_fq)
264   *     @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
265   *     @sk_sndbuf: size of send buffer in bytes
266   *     @__sk_flags_offset: empty field used to determine location of bitfield
267   *     @sk_padding: unused element for alignment
268   *     @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
269   *     @sk_no_check_rx: allow zero checksum in RX packets
270   *     @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
271   *     @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
272   *     @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
273   *     @sk_gso_max_size: Maximum GSO segment size to build
274   *     @sk_gso_max_segs: Maximum number of GSO segments
275   *     @sk_pacing_shift: scaling factor for TCP Small Queues
276   *     @sk_lingertime: %SO_LINGER l_linger setting
277   *     @sk_backlog: always used with the per-socket spinlock held
278   *     @sk_callback_lock: used with the callbacks in the end of this struct
279   *     @sk_error_queue: rarely used
280   *     @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
281   *                       IPV6_ADDRFORM for instance)
282   *     @sk_err: last error
283   *     @sk_err_soft: errors that don't cause failure but are the cause of a
284   *                   persistent failure not just 'timed out'
285   *     @sk_drops: raw/udp drops counter
286   *     @sk_ack_backlog: current listen backlog
287   *     @sk_max_ack_backlog: listen backlog set in listen()
288   *     @sk_uid: user id of owner
289   *     @sk_priority: %SO_PRIORITY setting
290   *     @sk_type: socket type (%SOCK_STREAM, etc)
291   *     @sk_protocol: which protocol this socket belongs in this network family
292   *     @sk_peer_pid: &struct pid for this socket's peer
293   *     @sk_peer_cred: %SO_PEERCRED setting
294   *     @sk_rcvlowat: %SO_RCVLOWAT setting
295   *     @sk_rcvtimeo: %SO_RCVTIMEO setting
296   *     @sk_sndtimeo: %SO_SNDTIMEO setting
297   *     @sk_txhash: computed flow hash for use on transmit
298   *     @sk_filter: socket filtering instructions
299   *     @sk_timer: sock cleanup timer
300   *     @sk_stamp: time stamp of last packet received
301   *     @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
302   *     @sk_tsflags: SO_TIMESTAMPING socket options
303   *     @sk_tskey: counter to disambiguate concurrent tstamp requests
304   *     @sk_zckey: counter to order MSG_ZEROCOPY notifications
305   *     @sk_socket: Identd and reporting IO signals
306   *     @sk_user_data: RPC layer private data
307   *     @sk_frag: cached page frag
308   *     @sk_peek_off: current peek_offset value
309   *     @sk_send_head: front of stuff to transmit
310   *     @sk_security: used by security modules
311   *     @sk_mark: generic packet mark
312   *     @sk_cgrp_data: cgroup data for this cgroup
313   *     @sk_memcg: this socket's memory cgroup association
314   *     @sk_write_pending: a write to stream socket waits to start
315   *     @sk_state_change: callback to indicate change in the state of the sock
316   *     @sk_data_ready: callback to indicate there is data to be processed
317   *     @sk_write_space: callback to indicate there is bf sending space available
318   *     @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
319   *     @sk_backlog_rcv: callback to process the backlog
320   *     @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
321   *     @sk_reuseport_cb: reuseport group container
322   *     @sk_rcu: used during RCU grace period
323   *     @sk_clockid: clockid used by time-based scheduling (SO_TXTIME)
324   *     @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME
325   *     @sk_txtime_unused: unused txtime flags
326   */
327 struct sock {
328         /*
329          * Now struct inet_timewait_sock also uses sock_common, so please just
330          * don't add nothing before this first member (__sk_common) --acme
331          */
332         struct sock_common      __sk_common;
333 #define sk_node                 __sk_common.skc_node
334 #define sk_nulls_node           __sk_common.skc_nulls_node
335 #define sk_refcnt               __sk_common.skc_refcnt
336 #define sk_tx_queue_mapping     __sk_common.skc_tx_queue_mapping
337 #ifdef CONFIG_XPS
338 #define sk_rx_queue_mapping     __sk_common.skc_rx_queue_mapping
339 #endif
340 
341 #define sk_dontcopy_begin       __sk_common.skc_dontcopy_begin
342 #define sk_dontcopy_end         __sk_common.skc_dontcopy_end
343 #define sk_hash                 __sk_common.skc_hash
344 #define sk_portpair             __sk_common.skc_portpair
345 #define sk_num                  __sk_common.skc_num
346 #define sk_dport                __sk_common.skc_dport
347 #define sk_addrpair             __sk_common.skc_addrpair
348 #define sk_daddr                __sk_common.skc_daddr
349 #define sk_rcv_saddr            __sk_common.skc_rcv_saddr
350 #define sk_family               __sk_common.skc_family
351 #define sk_state                __sk_common.skc_state
352 #define sk_reuse                __sk_common.skc_reuse
353 #define sk_reuseport            __sk_common.skc_reuseport
354 #define sk_ipv6only             __sk_common.skc_ipv6only
355 #define sk_net_refcnt           __sk_common.skc_net_refcnt
356 #define sk_bound_dev_if         __sk_common.skc_bound_dev_if
357 #define sk_bind_node            __sk_common.skc_bind_node
358 #define sk_prot                 __sk_common.skc_prot
359 #define sk_net                  __sk_common.skc_net
360 #define sk_v6_daddr             __sk_common.skc_v6_daddr
361 #define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
362 #define sk_cookie               __sk_common.skc_cookie
363 #define sk_incoming_cpu         __sk_common.skc_incoming_cpu
364 #define sk_flags                __sk_common.skc_flags
365 #define sk_rxhash               __sk_common.skc_rxhash
366 
367         socket_lock_t           sk_lock;
368         atomic_t                sk_drops;
369         int                     sk_rcvlowat;
370         struct sk_buff_head     sk_error_queue;
371         struct sk_buff_head     sk_receive_queue;
372         /*
373          * The backlog queue is special, it is always used with
374          * the per-socket spinlock held and requires low latency
375          * access. Therefore we special case it's implementation.
376          * Note : rmem_alloc is in this structure to fill a hole
377          * on 64bit arches, not because its logically part of
378          * backlog.
379          */
380         struct {
381                 atomic_t        rmem_alloc;
382                 int             len;
383                 struct sk_buff  *head;
384                 struct sk_buff  *tail;
385         } sk_backlog;
386 #define sk_rmem_alloc sk_backlog.rmem_alloc
387 
388         int                     sk_forward_alloc;
389 #ifdef CONFIG_NET_RX_BUSY_POLL
390         unsigned int            sk_ll_usec;
391         /* ===== mostly read cache line ===== */
392         unsigned int            sk_napi_id;
393 #endif
394         int                     sk_rcvbuf;
395 
396         struct sk_filter __rcu  *sk_filter;
397         union {
398                 struct socket_wq __rcu  *sk_wq;
399                 struct socket_wq        *sk_wq_raw;
400         };
401 #ifdef CONFIG_XFRM
402         struct xfrm_policy __rcu *sk_policy[2];
403 #endif
404         struct dst_entry        *sk_rx_dst;
405         struct dst_entry __rcu  *sk_dst_cache;
406         atomic_t                sk_omem_alloc;
407         int                     sk_sndbuf;
408 
409         /* ===== cache line for TX ===== */
410         int                     sk_wmem_queued;
411         refcount_t              sk_wmem_alloc;
412         unsigned long           sk_tsq_flags;
413         union {
414                 struct sk_buff  *sk_send_head;
415                 struct rb_root  tcp_rtx_queue;
416         };
417         struct sk_buff_head     sk_write_queue;
418         __s32                   sk_peek_off;
419         int                     sk_write_pending;
420         __u32                   sk_dst_pending_confirm;
421         u32                     sk_pacing_status; /* see enum sk_pacing */
422         long                    sk_sndtimeo;
423         struct timer_list       sk_timer;
424         __u32                   sk_priority;
425         __u32                   sk_mark;
426         unsigned long           sk_pacing_rate; /* bytes per second */
427         unsigned long           sk_max_pacing_rate;
428         struct page_frag        sk_frag;
429         netdev_features_t       sk_route_caps;
430         netdev_features_t       sk_route_nocaps;
431         netdev_features_t       sk_route_forced_caps;
432         int                     sk_gso_type;
433         unsigned int            sk_gso_max_size;
434         gfp_t                   sk_allocation;
435         __u32                   sk_txhash;
436 
437         /*
438          * Because of non atomicity rules, all
439          * changes are protected by socket lock.
440          */
441         unsigned int            __sk_flags_offset[0];
442 #ifdef __BIG_ENDIAN_BITFIELD
443 #define SK_FL_PROTO_SHIFT  16
444 #define SK_FL_PROTO_MASK   0x00ff0000
445 
446 #define SK_FL_TYPE_SHIFT   0
447 #define SK_FL_TYPE_MASK    0x0000ffff
448 #else
449 #define SK_FL_PROTO_SHIFT  8
450 #define SK_FL_PROTO_MASK   0x0000ff00
451 
452 #define SK_FL_TYPE_SHIFT   16
453 #define SK_FL_TYPE_MASK    0xffff0000
454 #endif
455 
456         unsigned int            sk_padding : 1,
457                                 sk_kern_sock : 1,
458                                 sk_no_check_tx : 1,
459                                 sk_no_check_rx : 1,
460                                 sk_userlocks : 4,
461                                 sk_protocol  : 8,
462                                 sk_type      : 16;
463 #define SK_PROTOCOL_MAX U8_MAX
464         u16                     sk_gso_max_segs;
465         u8                      sk_pacing_shift;
466         unsigned long           sk_lingertime;
467         struct proto            *sk_prot_creator;
468         rwlock_t                sk_callback_lock;
469         int                     sk_err,
470                                 sk_err_soft;
471         u32                     sk_ack_backlog;
472         u32                     sk_max_ack_backlog;
473         kuid_t                  sk_uid;
474         struct pid              *sk_peer_pid;
475         const struct cred       *sk_peer_cred;
476         long                    sk_rcvtimeo;
477         ktime_t                 sk_stamp;
478 #if BITS_PER_LONG==32
479         seqlock_t               sk_stamp_seq;
480 #endif
481         u16                     sk_tsflags;
482         u8                      sk_shutdown;
483         u32                     sk_tskey;
484         atomic_t                sk_zckey;
485 
486         u8                      sk_clockid;
487         u8                      sk_txtime_deadline_mode : 1,
488                                 sk_txtime_report_errors : 1,
489                                 sk_txtime_unused : 6;
490 
491         struct socket           *sk_socket;
492         void                    *sk_user_data;
493 #ifdef CONFIG_SECURITY
494         void                    *sk_security;
495 #endif
496         struct sock_cgroup_data sk_cgrp_data;
497         struct mem_cgroup       *sk_memcg;
498         void                    (*sk_state_change)(struct sock *sk);
499         void                    (*sk_data_ready)(struct sock *sk);
500         void                    (*sk_write_space)(struct sock *sk);
501         void                    (*sk_error_report)(struct sock *sk);
502         int                     (*sk_backlog_rcv)(struct sock *sk,
503                                                   struct sk_buff *skb);
504 #ifdef CONFIG_SOCK_VALIDATE_XMIT
505         struct sk_buff*         (*sk_validate_xmit_skb)(struct sock *sk,
506                                                         struct net_device *dev,
507                                                         struct sk_buff *skb);
508 #endif
509         void                    (*sk_destruct)(struct sock *sk);
510         struct sock_reuseport __rcu     *sk_reuseport_cb;
511         struct rcu_head         sk_rcu;
512 };
513 
514 enum sk_pacing {
515         SK_PACING_NONE          = 0,
516         SK_PACING_NEEDED        = 1,
517         SK_PACING_FQ            = 2,
518 };
519 
520 #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
521 
522 #define rcu_dereference_sk_user_data(sk)        rcu_dereference(__sk_user_data((sk)))
523 #define rcu_assign_sk_user_data(sk, ptr)        rcu_assign_pointer(__sk_user_data((sk)), ptr)
524 
525 /*
526  * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
527  * or not whether his port will be reused by someone else. SK_FORCE_REUSE
528  * on a socket means that the socket will reuse everybody else's port
529  * without looking at the other's sk_reuse value.
530  */
531 
532 #define SK_NO_REUSE     0
533 #define SK_CAN_REUSE    1
534 #define SK_FORCE_REUSE  2
535 
536 int sk_set_peek_off(struct sock *sk, int val);
537 
538 static inline int sk_peek_offset(struct sock *sk, int flags)
539 {
540         if (unlikely(flags & MSG_PEEK)) {
541                 return READ_ONCE(sk->sk_peek_off);
542         }
543 
544         return 0;
545 }
546 
547 static inline void sk_peek_offset_bwd(struct sock *sk, int val)
548 {
549         s32 off = READ_ONCE(sk->sk_peek_off);
550 
551         if (unlikely(off >= 0)) {
552                 off = max_t(s32, off - val, 0);
553                 WRITE_ONCE(sk->sk_peek_off, off);
554         }
555 }
556 
557 static inline void sk_peek_offset_fwd(struct sock *sk, int val)
558 {
559         sk_peek_offset_bwd(sk, -val);
560 }
561 
562 /*
563  * Hashed lists helper routines
564  */
565 static inline struct sock *sk_entry(const struct hlist_node *node)
566 {
567         return hlist_entry(node, struct sock, sk_node);
568 }
569 
570 static inline struct sock *__sk_head(const struct hlist_head *head)
571 {
572         return hlist_entry(head->first, struct sock, sk_node);
573 }
574 
575 static inline struct sock *sk_head(const struct hlist_head *head)
576 {
577         return hlist_empty(head) ? NULL : __sk_head(head);
578 }
579 
580 static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
581 {
582         return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
583 }
584 
585 static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
586 {
587         return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
588 }
589 
590 static inline struct sock *sk_next(const struct sock *sk)
591 {
592         return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node);
593 }
594 
595 static inline struct sock *sk_nulls_next(const struct sock *sk)
596 {
597         return (!is_a_nulls(sk->sk_nulls_node.next)) ?
598                 hlist_nulls_entry(sk->sk_nulls_node.next,
599                                   struct sock, sk_nulls_node) :
600                 NULL;
601 }
602 
603 static inline bool sk_unhashed(const struct sock *sk)
604 {
605         return hlist_unhashed(&sk->sk_node);
606 }
607 
608 static inline bool sk_hashed(const struct sock *sk)
609 {
610         return !sk_unhashed(sk);
611 }
612 
613 static inline void sk_node_init(struct hlist_node *node)
614 {
615         node->pprev = NULL;
616 }
617 
618 static inline void sk_nulls_node_init(struct hlist_nulls_node *node)
619 {
620         node->pprev = NULL;
621 }
622 
623 static inline void __sk_del_node(struct sock *sk)
624 {
625         __hlist_del(&sk->sk_node);
626 }
627 
628 /* NB: equivalent to hlist_del_init_rcu */
629 static inline bool __sk_del_node_init(struct sock *sk)
630 {
631         if (sk_hashed(sk)) {
632                 __sk_del_node(sk);
633                 sk_node_init(&sk->sk_node);
634                 return true;
635         }
636         return false;
637 }
638 
639 /* Grab socket reference count. This operation is valid only
640    when sk is ALREADY grabbed f.e. it is found in hash table
641    or a list and the lookup is made under lock preventing hash table
642    modifications.
643  */
644 
645 static __always_inline void sock_hold(struct sock *sk)
646 {
647         refcount_inc(&sk->sk_refcnt);
648 }
649 
650 /* Ungrab socket in the context, which assumes that socket refcnt
651    cannot hit zero, f.e. it is true in context of any socketcall.
652  */
653 static __always_inline void __sock_put(struct sock *sk)
654 {
655         refcount_dec(&sk->sk_refcnt);
656 }
657 
658 static inline bool sk_del_node_init(struct sock *sk)
659 {
660         bool rc = __sk_del_node_init(sk);
661 
662         if (rc) {
663                 /* paranoid for a while -acme */
664                 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
665                 __sock_put(sk);
666         }
667         return rc;
668 }
669 #define sk_del_node_init_rcu(sk)        sk_del_node_init(sk)
670 
671 static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk)
672 {
673         if (sk_hashed(sk)) {
674                 hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
675                 return true;
676         }
677         return false;
678 }
679 
680 static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
681 {
682         bool rc = __sk_nulls_del_node_init_rcu(sk);
683 
684         if (rc) {
685                 /* paranoid for a while -acme */
686                 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
687                 __sock_put(sk);
688         }
689         return rc;
690 }
691 
692 static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
693 {
694         hlist_add_head(&sk->sk_node, list);
695 }
696 
697 static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
698 {
699         sock_hold(sk);
700         __sk_add_node(sk, list);
701 }
702 
703 static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
704 {
705         sock_hold(sk);
706         if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
707             sk->sk_family == AF_INET6)
708                 hlist_add_tail_rcu(&sk->sk_node, list);
709         else
710                 hlist_add_head_rcu(&sk->sk_node, list);
711 }
712 
713 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
714 {
715         hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
716 }
717 
718 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
719 {
720         sock_hold(sk);
721         __sk_nulls_add_node_rcu(sk, list);
722 }
723 
724 static inline void __sk_del_bind_node(struct sock *sk)
725 {
726         __hlist_del(&sk->sk_bind_node);
727 }
728 
729 static inline void sk_add_bind_node(struct sock *sk,
730                                         struct hlist_head *list)
731 {
732         hlist_add_head(&sk->sk_bind_node, list);
733 }
734 
735 #define sk_for_each(__sk, list) \
736         hlist_for_each_entry(__sk, list, sk_node)
737 #define sk_for_each_rcu(__sk, list) \
738         hlist_for_each_entry_rcu(__sk, list, sk_node)
739 #define sk_nulls_for_each(__sk, node, list) \
740         hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
741 #define sk_nulls_for_each_rcu(__sk, node, list) \
742         hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
743 #define sk_for_each_from(__sk) \
744         hlist_for_each_entry_from(__sk, sk_node)
745 #define sk_nulls_for_each_from(__sk, node) \
746         if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
747                 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
748 #define sk_for_each_safe(__sk, tmp, list) \
749         hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
750 #define sk_for_each_bound(__sk, list) \
751         hlist_for_each_entry(__sk, list, sk_bind_node)
752 
753 /**
754  * sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset
755  * @tpos:       the type * to use as a loop cursor.
756  * @pos:        the &struct hlist_node to use as a loop cursor.
757  * @head:       the head for your list.
758  * @offset:     offset of hlist_node within the struct.
759  *
760  */
761 #define sk_for_each_entry_offset_rcu(tpos, pos, head, offset)                  \
762         for (pos = rcu_dereference(hlist_first_rcu(head));                     \
763              pos != NULL &&                                                    \
764                 ({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;});       \
765              pos = rcu_dereference(hlist_next_rcu(pos)))
766 
767 static inline struct user_namespace *sk_user_ns(struct sock *sk)
768 {
769         /* Careful only use this in a context where these parameters
770          * can not change and must all be valid, such as recvmsg from
771          * userspace.
772          */
773         return sk->sk_socket->file->f_cred->user_ns;
774 }
775 
776 /* Sock flags */
777 enum sock_flags {
778         SOCK_DEAD,
779         SOCK_DONE,
780         SOCK_URGINLINE,
781         SOCK_KEEPOPEN,
782         SOCK_LINGER,
783         SOCK_DESTROY,
784         SOCK_BROADCAST,
785         SOCK_TIMESTAMP,
786         SOCK_ZAPPED,
787         SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
788         SOCK_DBG, /* %SO_DEBUG setting */
789         SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
790         SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
791         SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
792         SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
793         SOCK_MEMALLOC, /* VM depends on this socket for swapping */
794         SOCK_TIMESTAMPING_RX_SOFTWARE,  /* %SOF_TIMESTAMPING_RX_SOFTWARE */
795         SOCK_FASYNC, /* fasync() active */
796         SOCK_RXQ_OVFL,
797         SOCK_ZEROCOPY, /* buffers from userspace */
798         SOCK_WIFI_STATUS, /* push wifi status to userspace */
799         SOCK_NOFCS, /* Tell NIC not to do the Ethernet FCS.
800                      * Will use last 4 bytes of packet sent from
801                      * user-space instead.
802                      */
803         SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */
804         SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
805         SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */
806         SOCK_TXTIME,
807         SOCK_XDP, /* XDP is attached */
808         SOCK_TSTAMP_NEW, /* Indicates 64 bit timestamps always */
809 };
810 
811 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
812 
813 static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
814 {
815         nsk->sk_flags = osk->sk_flags;
816 }
817 
818 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
819 {
820         __set_bit(flag, &sk->sk_flags);
821 }
822 
823 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
824 {
825         __clear_bit(flag, &sk->sk_flags);
826 }
827 
828 static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
829 {
830         return test_bit(flag, &sk->sk_flags);
831 }
832 
833 #ifdef CONFIG_NET
834 DECLARE_STATIC_KEY_FALSE(memalloc_socks_key);
835 static inline int sk_memalloc_socks(void)
836 {
837         return static_branch_unlikely(&memalloc_socks_key);
838 }
839 #else
840 
841 static inline int sk_memalloc_socks(void)
842 {
843         return 0;
844 }
845 
846 #endif
847 
848 static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
849 {
850         return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC);
851 }
852 
853 static inline void sk_acceptq_removed(struct sock *sk)
854 {
855         sk->sk_ack_backlog--;
856 }
857 
858 static inline void sk_acceptq_added(struct sock *sk)
859 {
860         sk->sk_ack_backlog++;
861 }
862 
863 static inline bool sk_acceptq_is_full(const struct sock *sk)
864 {
865         return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
866 }
867 
868 /*
869  * Compute minimal free write space needed to queue new packets.
870  */
871 static inline int sk_stream_min_wspace(const struct sock *sk)
872 {
873         return sk->sk_wmem_queued >> 1;
874 }
875 
876 static inline int sk_stream_wspace(const struct sock *sk)
877 {
878         return sk->sk_sndbuf - sk->sk_wmem_queued;
879 }
880 
881 void sk_stream_write_space(struct sock *sk);
882 
883 /* OOB backlog add */
884 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
885 {
886         /* dont let skb dst not refcounted, we are going to leave rcu lock */
887         skb_dst_force(skb);
888 
889         if (!sk->sk_backlog.tail)
890                 sk->sk_backlog.head = skb;
891         else
892                 sk->sk_backlog.tail->next = skb;
893 
894         sk->sk_backlog.tail = skb;
895         skb->next = NULL;
896 }
897 
898 /*
899  * Take into account size of receive queue and backlog queue
900  * Do not take into account this skb truesize,
901  * to allow even a single big packet to come.
902  */
903 static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit)
904 {
905         unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
906 
907         return qsize > limit;
908 }
909 
910 /* The per-socket spinlock must be held here. */
911 static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
912                                               unsigned int limit)
913 {
914         if (sk_rcvqueues_full(sk, limit))
915                 return -ENOBUFS;
916 
917         /*
918          * If the skb was allocated from pfmemalloc reserves, only
919          * allow SOCK_MEMALLOC sockets to use it as this socket is
920          * helping free memory
921          */
922         if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
923                 return -ENOMEM;
924 
925         __sk_add_backlog(sk, skb);
926         sk->sk_backlog.len += skb->truesize;
927         return 0;
928 }
929 
930 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
931 
932 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
933 {
934         if (sk_memalloc_socks() && skb_pfmemalloc(skb))
935                 return __sk_backlog_rcv(sk, skb);
936 
937         return sk->sk_backlog_rcv(sk, skb);
938 }
939 
940 static inline void sk_incoming_cpu_update(struct sock *sk)
941 {
942         int cpu = raw_smp_processor_id();
943 
944         if (unlikely(sk->sk_incoming_cpu != cpu))
945                 sk->sk_incoming_cpu = cpu;
946 }
947 
948 static inline void sock_rps_record_flow_hash(__u32 hash)
949 {
950 #ifdef CONFIG_RPS
951         struct rps_sock_flow_table *sock_flow_table;
952 
953         rcu_read_lock();
954         sock_flow_table = rcu_dereference(rps_sock_flow_table);
955         rps_record_sock_flow(sock_flow_table, hash);
956         rcu_read_unlock();
957 #endif
958 }
959 
960 static inline void sock_rps_record_flow(const struct sock *sk)
961 {
962 #ifdef CONFIG_RPS
963         if (static_key_false(&rfs_needed)) {
964                 /* Reading sk->sk_rxhash might incur an expensive cache line
965                  * miss.
966                  *
967                  * TCP_ESTABLISHED does cover almost all states where RFS
968                  * might be useful, and is cheaper [1] than testing :
969                  *      IPv4: inet_sk(sk)->inet_daddr
970                  *      IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
971                  * OR   an additional socket flag
972                  * [1] : sk_state and sk_prot are in the same cache line.
973                  */
974                 if (sk->sk_state == TCP_ESTABLISHED)
975                         sock_rps_record_flow_hash(sk->sk_rxhash);
976         }
977 #endif
978 }
979 
980 static inline void sock_rps_save_rxhash(struct sock *sk,
981                                         const struct sk_buff *skb)
982 {
983 #ifdef CONFIG_RPS
984         if (unlikely(sk->sk_rxhash != skb->hash))
985                 sk->sk_rxhash = skb->hash;
986 #endif
987 }
988 
989 static inline void sock_rps_reset_rxhash(struct sock *sk)
990 {
991 #ifdef CONFIG_RPS
992         sk->sk_rxhash = 0;
993 #endif
994 }
995 
996 #define sk_wait_event(__sk, __timeo, __condition, __wait)               \
997         ({      int __rc;                                               \
998                 release_sock(__sk);                                     \
999                 __rc = __condition;                                     \
1000                 if (!__rc) {                                            \
1001                         *(__timeo) = wait_woken(__wait,                 \
1002                                                 TASK_INTERRUPTIBLE,     \
1003                                                 *(__timeo));            \
1004                 }                                                       \
1005                 sched_annotate_sleep();                                 \
1006                 lock_sock(__sk);                                        \
1007                 __rc = __condition;                                     \
1008                 __rc;                                                   \
1009         })
1010 
1011 int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
1012 int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
1013 void sk_stream_wait_close(struct sock *sk, long timeo_p);
1014 int sk_stream_error(struct sock *sk, int flags, int err);
1015 void sk_stream_kill_queues(struct sock *sk);
1016 void sk_set_memalloc(struct sock *sk);
1017 void sk_clear_memalloc(struct sock *sk);
1018 
1019 void __sk_flush_backlog(struct sock *sk);
1020 
1021 static inline bool sk_flush_backlog(struct sock *sk)
1022 {
1023         if (unlikely(READ_ONCE(sk->sk_backlog.tail))) {
1024                 __sk_flush_backlog(sk);
1025                 return true;
1026         }
1027         return false;
1028 }
1029 
1030 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
1031 
1032 struct request_sock_ops;
1033 struct timewait_sock_ops;
1034 struct inet_hashinfo;
1035 struct raw_hashinfo;
1036 struct smc_hashinfo;
1037 struct module;
1038 
1039 /*
1040  * caches using SLAB_TYPESAFE_BY_RCU should let .next pointer from nulls nodes
1041  * un-modified. Special care is taken when initializing object to zero.
1042  */
1043 static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1044 {
1045         if (offsetof(struct sock, sk_node.next) != 0)
1046                 memset(sk, 0, offsetof(struct sock, sk_node.next));
1047         memset(&sk->sk_node.pprev, 0,
1048                size - offsetof(struct sock, sk_node.pprev));
1049 }
1050 
1051 /* Networking protocol blocks we attach to sockets.
1052  * socket layer -> transport layer interface
1053  */
1054 struct proto {
1055         void                    (*close)(struct sock *sk,
1056                                         long timeout);
1057         int                     (*pre_connect)(struct sock *sk,
1058                                         struct sockaddr *uaddr,
1059                                         int addr_len);
1060         int                     (*connect)(struct sock *sk,
1061                                         struct sockaddr *uaddr,
1062                                         int addr_len);
1063         int                     (*disconnect)(struct sock *sk, int flags);
1064 
1065         struct sock *           (*accept)(struct sock *sk, int flags, int *err,
1066                                           bool kern);
1067 
1068         int                     (*ioctl)(struct sock *sk, int cmd,
1069                                          unsigned long arg);
1070         int                     (*init)(struct sock *sk);
1071         void                    (*destroy)(struct sock *sk);
1072         void                    (*shutdown)(struct sock *sk, int how);
1073         int                     (*setsockopt)(struct sock *sk, int level,
1074                                         int optname, char __user *optval,
1075                                         unsigned int optlen);
1076         int                     (*getsockopt)(struct sock *sk, int level,
1077                                         int optname, char __user *optval,
1078                                         int __user *option);
1079         void                    (*keepalive)(struct sock *sk, int valbool);
1080 #ifdef CONFIG_COMPAT
1081         int                     (*compat_setsockopt)(struct sock *sk,
1082                                         int level,
1083                                         int optname, char __user *optval,
1084                                         unsigned int optlen);
1085         int                     (*compat_getsockopt)(struct sock *sk,
1086                                         int level,
1087                                         int optname, char __user *optval,
1088                                         int __user *option);
1089         int                     (*compat_ioctl)(struct sock *sk,
1090                                         unsigned int cmd, unsigned long arg);
1091 #endif
1092         int                     (*sendmsg)(struct sock *sk, struct msghdr *msg,
1093                                            size_t len);
1094         int                     (*recvmsg)(struct sock *sk, struct msghdr *msg,
1095                                            size_t len, int noblock, int flags,
1096                                            int *addr_len);
1097         int                     (*sendpage)(struct sock *sk, struct page *page,
1098                                         int offset, size_t size, int flags);
1099         int                     (*bind)(struct sock *sk,
1100                                         struct sockaddr *uaddr, int addr_len);
1101 
1102         int                     (*backlog_rcv) (struct sock *sk,
1103                                                 struct sk_buff *skb);
1104 
1105         void            (*release_cb)(struct sock *sk);
1106 
1107         /* Keeping track of sk's, looking them up, and port selection methods. */
1108         int                     (*hash)(struct sock *sk);
1109         void                    (*unhash)(struct sock *sk);
1110         void                    (*rehash)(struct sock *sk);
1111         int                     (*get_port)(struct sock *sk, unsigned short snum);
1112 
1113         /* Keeping track of sockets in use */
1114 #ifdef CONFIG_PROC_FS
1115         unsigned int            inuse_idx;
1116 #endif
1117 
1118         bool                    (*stream_memory_free)(const struct sock *sk, int wake);
1119         bool                    (*stream_memory_read)(const struct sock *sk);
1120         /* Memory pressure */
1121         void                    (*enter_memory_pressure)(struct sock *sk);
1122         void                    (*leave_memory_pressure)(struct sock *sk);
1123         atomic_long_t           *memory_allocated;      /* Current allocated memory. */
1124         struct percpu_counter   *sockets_allocated;     /* Current number of sockets. */
1125         /*
1126          * Pressure flag: try to collapse.
1127          * Technical note: it is used by multiple contexts non atomically.
1128          * All the __sk_mem_schedule() is of this nature: accounting
1129          * is strict, actions are advisory and have some latency.
1130          */
1131         unsigned long           *memory_pressure;
1132         long                    *sysctl_mem;
1133 
1134         int                     *sysctl_wmem;
1135         int                     *sysctl_rmem;
1136         u32                     sysctl_wmem_offset;
1137         u32                     sysctl_rmem_offset;
1138 
1139         int                     max_header;
1140         bool                    no_autobind;
1141 
1142         struct kmem_cache       *slab;
1143         unsigned int            obj_size;
1144         slab_flags_t            slab_flags;
1145         unsigned int            useroffset;     /* Usercopy region offset */
1146         unsigned int            usersize;       /* Usercopy region size */
1147 
1148         struct percpu_counter   *orphan_count;
1149 
1150         struct request_sock_ops *rsk_prot;
1151         struct timewait_sock_ops *twsk_prot;
1152 
1153         union {
1154                 struct inet_hashinfo    *hashinfo;
1155                 struct udp_table        *udp_table;
1156                 struct raw_hashinfo     *raw_hash;
1157                 struct smc_hashinfo     *smc_hash;
1158         } h;
1159 
1160         struct module           *owner;
1161 
1162         char                    name[32];
1163 
1164         struct list_head        node;
1165 #ifdef SOCK_REFCNT_DEBUG
1166         atomic_t                socks;
1167 #endif
1168         int                     (*diag_destroy)(struct sock *sk, int err);
1169 } __randomize_layout;
1170 
1171 int proto_register(struct proto *prot, int alloc_slab);
1172 void proto_unregister(struct proto *prot);
1173 int sock_load_diag_module(int family, int protocol);
1174 
1175 #ifdef SOCK_REFCNT_DEBUG
1176 static inline void sk_refcnt_debug_inc(struct sock *sk)
1177 {
1178         atomic_inc(&sk->sk_prot->socks);
1179 }
1180 
1181 static inline void sk_refcnt_debug_dec(struct sock *sk)
1182 {
1183         atomic_dec(&sk->sk_prot->socks);
1184         printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
1185                sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
1186 }
1187 
1188 static inline void sk_refcnt_debug_release(const struct sock *sk)
1189 {
1190         if (refcount_read(&sk->sk_refcnt) != 1)
1191                 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
1192                        sk->sk_prot->name, sk, refcount_read(&sk->sk_refcnt));
1193 }
1194 #else /* SOCK_REFCNT_DEBUG */
1195 #define sk_refcnt_debug_inc(sk) do { } while (0)
1196 #define sk_refcnt_debug_dec(sk) do { } while (0)
1197 #define sk_refcnt_debug_release(sk) do { } while (0)
1198 #endif /* SOCK_REFCNT_DEBUG */
1199 
1200 static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
1201 {
1202         if (sk->sk_wmem_queued >= sk->sk_sndbuf)
1203                 return false;
1204 
1205         return sk->sk_prot->stream_memory_free ?
1206                 sk->sk_prot->stream_memory_free(sk, wake) : true;
1207 }
1208 
1209 static inline bool sk_stream_memory_free(const struct sock *sk)
1210 {
1211         return __sk_stream_memory_free(sk, 0);
1212 }
1213 
1214 static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake)
1215 {
1216         return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
1217                __sk_stream_memory_free(sk, wake);
1218 }
1219 
1220 static inline bool sk_stream_is_writeable(const struct sock *sk)
1221 {
1222         return __sk_stream_is_writeable(sk, 0);
1223 }
1224 
1225 static inline int sk_under_cgroup_hierarchy(struct sock *sk,
1226                                             struct cgroup *ancestor)
1227 {
1228 #ifdef CONFIG_SOCK_CGROUP_DATA
1229         return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data),
1230                                     ancestor);
1231 #else
1232         return -ENOTSUPP;
1233 #endif
1234 }
1235 
1236 static inline bool sk_has_memory_pressure(const struct sock *sk)
1237 {
1238         return sk->sk_prot->memory_pressure != NULL;
1239 }
1240 
1241 static inline bool sk_under_memory_pressure(const struct sock *sk)
1242 {
1243         if (!sk->sk_prot->memory_pressure)
1244                 return false;
1245 
1246         if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
1247             mem_cgroup_under_socket_pressure(sk->sk_memcg))
1248                 return true;
1249 
1250         return !!*sk->sk_prot->memory_pressure;
1251 }
1252 
1253 static inline long
1254 sk_memory_allocated(const struct sock *sk)
1255 {
1256         return atomic_long_read(sk->sk_prot->memory_allocated);
1257 }
1258 
1259 static inline long
1260 sk_memory_allocated_add(struct sock *sk, int amt)
1261 {
1262         return atomic_long_add_return(amt, sk->sk_prot->memory_allocated);
1263 }
1264 
1265 static inline void
1266 sk_memory_allocated_sub(struct sock *sk, int amt)
1267 {
1268         atomic_long_sub(amt, sk->sk_prot->memory_allocated);
1269 }
1270 
1271 static inline void sk_sockets_allocated_dec(struct sock *sk)
1272 {
1273         percpu_counter_dec(sk->sk_prot->sockets_allocated);
1274 }
1275 
1276 static inline void sk_sockets_allocated_inc(struct sock *sk)
1277 {
1278         percpu_counter_inc(sk->sk_prot->sockets_allocated);
1279 }
1280 
1281 static inline u64
1282 sk_sockets_allocated_read_positive(struct sock *sk)
1283 {
1284         return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
1285 }
1286 
1287 static inline int
1288 proto_sockets_allocated_sum_positive(struct proto *prot)
1289 {
1290         return percpu_counter_sum_positive(prot->sockets_allocated);
1291 }
1292 
1293 static inline long
1294 proto_memory_allocated(struct proto *prot)
1295 {
1296         return atomic_long_read(prot->memory_allocated);
1297 }
1298 
1299 static inline bool
1300 proto_memory_pressure(struct proto *prot)
1301 {
1302         if (!prot->memory_pressure)
1303                 return false;
1304         return !!*prot->memory_pressure;
1305 }
1306 
1307 
1308 #ifdef CONFIG_PROC_FS
1309 /* Called with local bh disabled */
1310 void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
1311 int sock_prot_inuse_get(struct net *net, struct proto *proto);
1312 int sock_inuse_get(struct net *net);
1313 #else
1314 static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
1315                 int inc)
1316 {
1317 }
1318 #endif
1319 
1320 
1321 /* With per-bucket locks this operation is not-atomic, so that
1322  * this version is not worse.
1323  */
1324 static inline int __sk_prot_rehash(struct sock *sk)
1325 {
1326         sk->sk_prot->unhash(sk);
1327         return sk->sk_prot->hash(sk);
1328 }
1329 
1330 /* About 10 seconds */
1331 #define SOCK_DESTROY_TIME (10*HZ)
1332 
1333 /* Sockets 0-1023 can't be bound to unless you are superuser */
1334 #define PROT_SOCK       1024
1335 
1336 #define SHUTDOWN_MASK   3
1337 #define RCV_SHUTDOWN    1
1338 #define SEND_SHUTDOWN   2
1339 
1340 #define SOCK_SNDBUF_LOCK        1
1341 #define SOCK_RCVBUF_LOCK        2
1342 #define SOCK_BINDADDR_LOCK      4
1343 #define SOCK_BINDPORT_LOCK      8
1344 
1345 struct socket_alloc {
1346         struct socket socket;
1347         struct inode vfs_inode;
1348 };
1349 
1350 static inline struct socket *SOCKET_I(struct inode *inode)
1351 {
1352         return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
1353 }
1354 
1355 static inline struct inode *SOCK_INODE(struct socket *socket)
1356 {
1357         return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
1358 }
1359 
1360 /*
1361  * Functions for memory accounting
1362  */
1363 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind);
1364 int __sk_mem_schedule(struct sock *sk, int size, int kind);
1365 void __sk_mem_reduce_allocated(struct sock *sk, int amount);
1366 void __sk_mem_reclaim(struct sock *sk, int amount);
1367 
1368 /* We used to have PAGE_SIZE here, but systems with 64KB pages
1369  * do not necessarily have 16x time more memory than 4KB ones.
1370  */
1371 #define SK_MEM_QUANTUM 4096
1372 #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
1373 #define SK_MEM_SEND     0
1374 #define SK_MEM_RECV     1
1375 
1376 /* sysctl_mem values are in pages, we convert them in SK_MEM_QUANTUM units */
1377 static inline long sk_prot_mem_limits(const struct sock *sk, int index)
1378 {
1379         long val = sk->sk_prot->sysctl_mem[index];
1380 
1381 #if PAGE_SIZE > SK_MEM_QUANTUM
1382         val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT;
1383 #elif PAGE_SIZE < SK_MEM_QUANTUM
1384         val >>= SK_MEM_QUANTUM_SHIFT - PAGE_SHIFT;
1385 #endif
1386         return val;
1387 }
1388 
1389 static inline int sk_mem_pages(int amt)
1390 {
1391         return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
1392 }
1393 
1394 static inline bool sk_has_account(struct sock *sk)
1395 {
1396         /* return true if protocol supports memory accounting */
1397         return !!sk->sk_prot->memory_allocated;
1398 }
1399 
1400 static inline bool sk_wmem_schedule(struct sock *sk, int size)
1401 {
1402         if (!sk_has_account(sk))
1403                 return true;
1404         return size <= sk->sk_forward_alloc ||
1405                 __sk_mem_schedule(sk, size, SK_MEM_SEND);
1406 }
1407 
1408 static inline bool
1409 sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
1410 {
1411         if (!sk_has_account(sk))
1412                 return true;
1413         return size<= sk->sk_forward_alloc ||
1414                 __sk_mem_schedule(sk, size, SK_MEM_RECV) ||
1415                 skb_pfmemalloc(skb);
1416 }
1417 
1418 static inline void sk_mem_reclaim(struct sock *sk)
1419 {
1420         if (!sk_has_account(sk))
1421                 return;
1422         if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
1423                 __sk_mem_reclaim(sk, sk->sk_forward_alloc);
1424 }
1425 
1426 static inline void sk_mem_reclaim_partial(struct sock *sk)
1427 {
1428         if (!sk_has_account(sk))
1429                 return;
1430         if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
1431                 __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
1432 }
1433 
1434 static inline void sk_mem_charge(struct sock *sk, int size)
1435 {
1436         if (!sk_has_account(sk))
1437                 return;
1438         sk->sk_forward_alloc -= size;
1439 }
1440 
1441 static inline void sk_mem_uncharge(struct sock *sk, int size)
1442 {
1443         if (!sk_has_account(sk))
1444                 return;
1445         sk->sk_forward_alloc += size;
1446 
1447         /* Avoid a possible overflow.
1448          * TCP send queues can make this happen, if sk_mem_reclaim()
1449          * is not called and more than 2 GBytes are released at once.
1450          *
1451          * If we reach 2 MBytes, reclaim 1 MBytes right now, there is
1452          * no need to hold that much forward allocation anyway.
1453          */
1454         if (unlikely(sk->sk_forward_alloc >= 1 << 21))
1455                 __sk_mem_reclaim(sk, 1 << 20);
1456 }
1457 
1458 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
1459 {
1460         sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
1461         sk->sk_wmem_queued -= skb->truesize;
1462         sk_mem_uncharge(sk, skb->truesize);
1463         __kfree_skb(skb);
1464 }
1465 
1466 static inline void sock_release_ownership(struct sock *sk)
1467 {
1468         if (sk->sk_lock.owned) {
1469                 sk->sk_lock.owned = 0;
1470 
1471                 /* The sk_lock has mutex_unlock() semantics: */
1472                 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
1473         }
1474 }
1475 
1476 /*
1477  * Macro so as to not evaluate some arguments when
1478  * lockdep is not enabled.
1479  *
1480  * Mark both the sk_lock and the sk_lock.slock as a
1481  * per-address-family lock class.
1482  */
1483 #define sock_lock_init_class_and_name(sk, sname, skey, name, key)       \
1484 do {                                                                    \
1485         sk->sk_lock.owned = 0;                                          \
1486         init_waitqueue_head(&sk->sk_lock.wq);                           \
1487         spin_lock_init(&(sk)->sk_lock.slock);                           \
1488         debug_check_no_locks_freed((void *)&(sk)->sk_lock,              \
1489                         sizeof((sk)->sk_lock));                         \
1490         lockdep_set_class_and_name(&(sk)->sk_lock.slock,                \
1491                                 (skey), (sname));                               \
1492         lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);     \
1493 } while (0)
1494 
1495 #ifdef CONFIG_LOCKDEP
1496 static inline bool lockdep_sock_is_held(const struct sock *sk)
1497 {
1498         return lockdep_is_held(&sk->sk_lock) ||
1499                lockdep_is_held(&sk->sk_lock.slock);
1500 }
1501 #endif
1502 
1503 void lock_sock_nested(struct sock *sk, int subclass);
1504 
1505 static inline void lock_sock(struct sock *sk)
1506 {
1507         lock_sock_nested(sk, 0);
1508 }
1509 
1510 void __release_sock(struct sock *sk);
1511 void release_sock(struct sock *sk);
1512 
1513 /* BH context may only use the following locking interface. */
1514 #define bh_lock_sock(__sk)      spin_lock(&((__sk)->sk_lock.slock))
1515 #define bh_lock_sock_nested(__sk) \
1516                                 spin_lock_nested(&((__sk)->sk_lock.slock), \
1517                                 SINGLE_DEPTH_NESTING)
1518 #define bh_unlock_sock(__sk)    spin_unlock(&((__sk)->sk_lock.slock))
1519 
1520 bool lock_sock_fast(struct sock *sk);
1521 /**
1522  * unlock_sock_fast - complement of lock_sock_fast
1523  * @sk: socket
1524  * @slow: slow mode
1525  *
1526  * fast unlock socket for user context.
1527  * If slow mode is on, we call regular release_sock()
1528  */
1529 static inline void unlock_sock_fast(struct sock *sk, bool slow)
1530 {
1531         if (slow)
1532                 release_sock(sk);
1533         else
1534                 spin_unlock_bh(&sk->sk_lock.slock);
1535 }
1536 
1537 /* Used by processes to "lock" a socket state, so that
1538  * interrupts and bottom half handlers won't change it
1539  * from under us. It essentially blocks any incoming
1540  * packets, so that we won't get any new data or any
1541  * packets that change the state of the socket.
1542  *
1543  * While locked, BH processing will add new packets to
1544  * the backlog queue.  This queue is processed by the
1545  * owner of the socket lock right before it is released.
1546  *
1547  * Since ~2.3.5 it is also exclusive sleep lock serializing
1548  * accesses from user process context.
1549  */
1550 
1551 static inline void sock_owned_by_me(const struct sock *sk)
1552 {
1553 #ifdef CONFIG_LOCKDEP
1554         WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks);
1555 #endif
1556 }
1557 
1558 static inline bool sock_owned_by_user(const struct sock *sk)
1559 {
1560         sock_owned_by_me(sk);
1561         return sk->sk_lock.owned;
1562 }
1563 
1564 static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
1565 {
1566         return sk->sk_lock.owned;
1567 }
1568 
1569 /* no reclassification while locks are held */
1570 static inline bool sock_allow_reclassification(const struct sock *csk)
1571 {
1572         struct sock *sk = (struct sock *)csk;
1573 
1574         return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock);
1575 }
1576 
1577 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1578                       struct proto *prot, int kern);
1579 void sk_free(struct sock *sk);
1580 void sk_destruct(struct sock *sk);
1581 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
1582 void sk_free_unlock_clone(struct sock *sk);
1583 
1584 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1585                              gfp_t priority);
1586 void __sock_wfree(struct sk_buff *skb);
1587 void sock_wfree(struct sk_buff *skb);
1588 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
1589                              gfp_t priority);
1590 void skb_orphan_partial(struct sk_buff *skb);
1591 void sock_rfree(struct sk_buff *skb);
1592 void sock_efree(struct sk_buff *skb);
1593 #ifdef CONFIG_INET
1594 void sock_edemux(struct sk_buff *skb);
1595 #else
1596 #define sock_edemux sock_efree
1597 #endif
1598 
1599 int sock_setsockopt(struct socket *sock, int level, int op,
1600                     char __user *optval, unsigned int optlen);
1601 
1602 int sock_getsockopt(struct socket *sock, int level, int op,
1603                     char __user *optval, int __user *optlen);
1604 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1605                                     int noblock, int *errcode);
1606 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1607                                      unsigned long data_len, int noblock,
1608                                      int *errcode, int max_page_order);
1609 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
1610 void sock_kfree_s(struct sock *sk, void *mem, int size);
1611 void sock_kzfree_s(struct sock *sk, void *mem, int size);
1612 void sk_send_sigurg(struct sock *sk);
1613 
1614 struct sockcm_cookie {
1615         u64 transmit_time;
1616         u32 mark;
1617         u16 tsflags;
1618 };
1619 
1620 static inline void sockcm_init(struct sockcm_cookie *sockc,
1621                                const struct sock *sk)
1622 {
1623         *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
1624 }
1625 
1626 int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
1627                      struct sockcm_cookie *sockc);
1628 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1629                    struct sockcm_cookie *sockc);
1630 
1631 /*
1632  * Functions to fill in entries in struct proto_ops when a protocol
1633  * does not implement a particular function.
1634  */
1635 int sock_no_bind(struct socket *, struct sockaddr *, int);
1636 int sock_no_connect(struct socket *, struct sockaddr *, int, int);
1637 int sock_no_socketpair(struct socket *, struct socket *);
1638 int sock_no_accept(struct socket *, struct socket *, int, bool);
1639 int sock_no_getname(struct socket *, struct sockaddr *, int);
1640 int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
1641 int sock_no_listen(struct socket *, int);
1642 int sock_no_shutdown(struct socket *, int);
1643 int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *);
1644 int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int);
1645 int sock_no_sendmsg(struct socket *, struct msghdr *, size_t);
1646 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
1647 int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
1648 int sock_no_mmap(struct file *file, struct socket *sock,
1649                  struct vm_area_struct *vma);
1650 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
1651                          size_t size, int flags);
1652 ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
1653                                 int offset, size_t size, int flags);
1654 
1655 /*
1656  * Functions to fill in entries in struct proto_ops when a protocol
1657  * uses the inet style.
1658  */
1659 int sock_common_getsockopt(struct socket *sock, int level, int optname,
1660                                   char __user *optval, int __user *optlen);
1661 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1662                         int flags);
1663 int sock_common_setsockopt(struct socket *sock, int level, int optname,
1664                                   char __user *optval, unsigned int optlen);
1665 int compat_sock_common_getsockopt(struct socket *sock, int level,
1666                 int optname, char __user *optval, int __user *optlen);
1667 int compat_sock_common_setsockopt(struct socket *sock, int level,
1668                 int optname, char __user *optval, unsigned int optlen);
1669 
1670 void sk_common_release(struct sock *sk);
1671 
1672 /*
1673  *      Default socket callbacks and setup code
1674  */
1675 
1676 /* Initialise core socket variables */
1677 void sock_init_data(struct socket *sock, struct sock *sk);
1678 
1679 /*
1680  * Socket reference counting postulates.
1681  *
1682  * * Each user of socket SHOULD hold a reference count.
1683  * * Each access point to socket (an hash table bucket, reference from a list,
1684  *   running timer, skb in flight MUST hold a reference count.
1685  * * When reference count hits 0, it means it will never increase back.
1686  * * When reference count hits 0, it means that no references from
1687  *   outside exist to this socket and current process on current CPU
1688  *   is last user and may/should destroy this socket.
1689  * * sk_free is called from any context: process, BH, IRQ. When
1690  *   it is called, socket has no references from outside -> sk_free
1691  *   may release descendant resources allocated by the socket, but
1692  *   to the time when it is called, socket is NOT referenced by any
1693  *   hash tables, lists etc.
1694  * * Packets, delivered from outside (from network or from another process)
1695  *   and enqueued on receive/error queues SHOULD NOT grab reference count,
1696  *   when they sit in queue. Otherwise, packets will leak to hole, when
1697  *   socket is looked up by one cpu and unhasing is made by another CPU.
1698  *   It is true for udp/raw, netlink (leak to receive and error queues), tcp
1699  *   (leak to backlog). Packet socket does all the processing inside
1700  *   BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
1701  *   use separate SMP lock, so that they are prone too.
1702  */
1703 
1704 /* Ungrab socket and destroy it, if it was the last reference. */
1705 static inline void sock_put(struct sock *sk)
1706 {
1707         if (refcount_dec_and_test(&sk->sk_refcnt))
1708                 sk_free(sk);
1709 }
1710 /* Generic version of sock_put(), dealing with all sockets
1711  * (TCP_TIMEWAIT, TCP_NEW_SYN_RECV, ESTABLISHED...)
1712  */
1713 void sock_gen_put(struct sock *sk);
1714 
1715 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
1716                      unsigned int trim_cap, bool refcounted);
1717 static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1718                                  const int nested)
1719 {
1720         return __sk_receive_skb(sk, skb, nested, 1, true);
1721 }
1722 
1723 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
1724 {
1725         /* sk_tx_queue_mapping accept only upto a 16-bit value */
1726         if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
1727                 return;
1728         sk->sk_tx_queue_mapping = tx_queue;
1729 }
1730 
1731 #define NO_QUEUE_MAPPING        USHRT_MAX
1732 
1733 static inline void sk_tx_queue_clear(struct sock *sk)
1734 {
1735         sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
1736 }
1737 
1738 static inline int sk_tx_queue_get(const struct sock *sk)
1739 {
1740         if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
1741                 return sk->sk_tx_queue_mapping;
1742 
1743         return -1;
1744 }
1745 
1746 static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
1747 {
1748 #ifdef CONFIG_XPS
1749         if (skb_rx_queue_recorded(skb)) {
1750                 u16 rx_queue = skb_get_rx_queue(skb);
1751 
1752                 if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING))
1753                         return;
1754 
1755                 sk->sk_rx_queue_mapping = rx_queue;
1756         }
1757 #endif
1758 }
1759 
1760 static inline void sk_rx_queue_clear(struct sock *sk)
1761 {
1762 #ifdef CONFIG_XPS
1763         sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
1764 #endif
1765 }
1766 
1767 #ifdef CONFIG_XPS
1768 static inline int sk_rx_queue_get(const struct sock *sk)
1769 {
1770         if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
1771                 return sk->sk_rx_queue_mapping;
1772 
1773         return -1;
1774 }
1775 #endif
1776 
1777 static inline void sk_set_socket(struct sock *sk, struct socket *sock)
1778 {
1779         sk_tx_queue_clear(sk);
1780         sk->sk_socket = sock;
1781 }
1782 
1783 static inline wait_queue_head_t *sk_sleep(struct sock *sk)
1784 {
1785         BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
1786         return &rcu_dereference_raw(sk->sk_wq)->wait;
1787 }
1788 /* Detach socket from process context.
1789  * Announce socket dead, detach it from wait queue and inode.
1790  * Note that parent inode held reference count on this struct sock,
1791  * we do not release it in this function, because protocol
1792  * probably wants some additional cleanups or even continuing
1793  * to work with this socket (TCP).
1794  */
1795 static inline void sock_orphan(struct sock *sk)
1796 {
1797         write_lock_bh(&sk->sk_callback_lock);
1798         sock_set_flag(sk, SOCK_DEAD);
1799         sk_set_socket(sk, NULL);
1800         sk->sk_wq  = NULL;
1801         write_unlock_bh(&sk->sk_callback_lock);
1802 }
1803 
1804 static inline void sock_graft(struct sock *sk, struct socket *parent)
1805 {
1806         WARN_ON(parent->sk);
1807         write_lock_bh(&sk->sk_callback_lock);
1808         rcu_assign_pointer(sk->sk_wq, parent->wq);
1809         parent->sk = sk;
1810         sk_set_socket(sk, parent);
1811         sk->sk_uid = SOCK_INODE(parent)->i_uid;
1812         security_sock_graft(sk, parent);
1813         write_unlock_bh(&sk->sk_callback_lock);
1814 }
1815 
1816 kuid_t sock_i_uid(struct sock *sk);
1817 unsigned long sock_i_ino(struct sock *sk);
1818 
1819 static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
1820 {
1821         return sk ? sk->sk_uid : make_kuid(net->user_ns, 0);
1822 }
1823 
1824 static inline u32 net_tx_rndhash(void)
1825 {
1826         u32 v = prandom_u32();
1827 
1828         return v ?: 1;
1829 }
1830 
1831 static inline void sk_set_txhash(struct sock *sk)
1832 {
1833         sk->sk_txhash = net_tx_rndhash();
1834 }
1835 
1836 static inline void sk_rethink_txhash(struct sock *sk)
1837 {
1838         if (sk->sk_txhash)
1839                 sk_set_txhash(sk);
1840 }
1841 
1842 static inline struct dst_entry *
1843 __sk_dst_get(struct sock *sk)
1844 {
1845         return rcu_dereference_check(sk->sk_dst_cache,
1846                                      lockdep_sock_is_held(sk));
1847 }
1848 
1849 static inline struct dst_entry *
1850 sk_dst_get(struct sock *sk)
1851 {
1852         struct dst_entry *dst;
1853 
1854         rcu_read_lock();
1855         dst = rcu_dereference(sk->sk_dst_cache);
1856         if (dst && !atomic_inc_not_zero(&dst->__refcnt))
1857                 dst = NULL;
1858         rcu_read_unlock();
1859         return dst;
1860 }
1861 
1862 static inline void dst_negative_advice(struct sock *sk)
1863 {
1864         struct dst_entry *ndst, *dst = __sk_dst_get(sk);
1865 
1866         sk_rethink_txhash(sk);
1867 
1868         if (dst && dst->ops->negative_advice) {
1869                 ndst = dst->ops->negative_advice(dst);
1870 
1871                 if (ndst != dst) {
1872                         rcu_assign_pointer(sk->sk_dst_cache, ndst);
1873                         sk_tx_queue_clear(sk);
1874                         sk->sk_dst_pending_confirm = 0;
1875                 }
1876         }
1877 }
1878 
1879 static inline void
1880 __sk_dst_set(struct sock *sk, struct dst_entry *dst)
1881 {
1882         struct dst_entry *old_dst;
1883 
1884         sk_tx_queue_clear(sk);
1885         sk->sk_dst_pending_confirm = 0;
1886         old_dst = rcu_dereference_protected(sk->sk_dst_cache,
1887                                             lockdep_sock_is_held(sk));
1888         rcu_assign_pointer(sk->sk_dst_cache, dst);
1889         dst_release(old_dst);
1890 }
1891 
1892 static inline void
1893 sk_dst_set(struct sock *sk, struct dst_entry *dst)
1894 {
1895         struct dst_entry *old_dst;
1896 
1897         sk_tx_queue_clear(sk);
1898         sk->sk_dst_pending_confirm = 0;
1899         old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
1900         dst_release(old_dst);
1901 }
1902 
1903 static inline void
1904 __sk_dst_reset(struct sock *sk)
1905 {
1906         __sk_dst_set(sk, NULL);
1907 }
1908 
1909 static inline void
1910 sk_dst_reset(struct sock *sk)
1911 {
1912         sk_dst_set(sk, NULL);
1913 }
1914 
1915 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1916 
1917 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1918 
1919 static inline void sk_dst_confirm(struct sock *sk)
1920 {
1921         if (!sk->sk_dst_pending_confirm)
1922                 sk->sk_dst_pending_confirm = 1;
1923 }
1924 
1925 static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
1926 {
1927         if (skb_get_dst_pending_confirm(skb)) {
1928                 struct sock *sk = skb->sk;
1929                 unsigned long now = jiffies;
1930 
1931                 /* avoid dirtying neighbour */
1932                 if (n->confirmed != now)
1933                         n->confirmed = now;
1934                 if (sk && sk->sk_dst_pending_confirm)
1935                         sk->sk_dst_pending_confirm = 0;
1936         }
1937 }
1938 
1939 bool sk_mc_loop(struct sock *sk);
1940 
1941 static inline bool sk_can_gso(const struct sock *sk)
1942 {
1943         return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
1944 }
1945 
1946 void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1947 
1948 static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
1949 {
1950         sk->sk_route_nocaps |= flags;
1951         sk->sk_route_caps &= ~flags;
1952 }
1953 
1954 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
1955                                            struct iov_iter *from, char *to,
1956                                            int copy, int offset)
1957 {
1958         if (skb->ip_summed == CHECKSUM_NONE) {
1959                 __wsum csum = 0;
1960                 if (!csum_and_copy_from_iter_full(to, copy, &csum, from))
1961                         return -EFAULT;
1962                 skb->csum = csum_block_add(skb->csum, csum, offset);
1963         } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
1964                 if (!copy_from_iter_full_nocache(to, copy, from))
1965                         return -EFAULT;
1966         } else if (!copy_from_iter_full(to, copy, from))
1967                 return -EFAULT;
1968 
1969         return 0;
1970 }
1971 
1972 static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
1973                                        struct iov_iter *from, int copy)
1974 {
1975         int err, offset = skb->len;
1976 
1977         err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy),
1978                                        copy, offset);
1979         if (err)
1980                 __skb_trim(skb, offset);
1981 
1982         return err;
1983 }
1984 
1985 static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
1986                                            struct sk_buff *skb,
1987                                            struct page *page,
1988                                            int off, int copy)
1989 {
1990         int err;
1991 
1992         err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off,
1993                                        copy, skb->len);
1994         if (err)
1995                 return err;
1996 
1997         skb->len             += copy;
1998         skb->data_len        += copy;
1999         skb->truesize        += copy;
2000         sk->sk_wmem_queued   += copy;
2001         sk_mem_charge(sk, copy);
2002         return 0;
2003 }
2004 
2005 /**
2006  * sk_wmem_alloc_get - returns write allocations
2007  * @sk: socket
2008  *
2009  * Returns sk_wmem_alloc minus initial offset of one
2010  */
2011 static inline int sk_wmem_alloc_get(const struct sock *sk)
2012 {
2013         return refcount_read(&sk->sk_wmem_alloc) - 1;
2014 }
2015 
2016 /**
2017  * sk_rmem_alloc_get - returns read allocations
2018  * @sk: socket
2019  *
2020  * Returns sk_rmem_alloc
2021  */
2022 static inline int sk_rmem_alloc_get(const struct sock *sk)
2023 {
2024         return atomic_read(&sk->sk_rmem_alloc);
2025 }
2026 
2027 /**
2028  * sk_has_allocations - check if allocations are outstanding
2029  * @sk: socket
2030  *
2031  * Returns true if socket has write or read allocations
2032  */
2033 static inline bool sk_has_allocations(const struct sock *sk)
2034 {
2035         return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
2036 }
2037 
2038 /**
2039  * skwq_has_sleeper - check if there are any waiting processes
2040  * @wq: struct socket_wq
2041  *
2042  * Returns true if socket_wq has waiting processes
2043  *
2044  * The purpose of the skwq_has_sleeper and sock_poll_wait is to wrap the memory
2045  * barrier call. They were added due to the race found within the tcp code.
2046  *
2047  * Consider following tcp code paths::
2048  *
2049  *   CPU1                CPU2
2050  *   sys_select          receive packet
2051  *   ...                 ...
2052  *   __add_wait_queue    update tp->rcv_nxt
2053  *   ...                 ...
2054  *   tp->rcv_nxt check   sock_def_readable
2055  *   ...                 {
2056  *   schedule               rcu_read_lock();
2057  *                          wq = rcu_dereference(sk->sk_wq);
2058  *                          if (wq && waitqueue_active(&wq->wait))
2059  *                              wake_up_interruptible(&wq->wait)
2060  *                          ...
2061  *                       }
2062  *
2063  * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay
2064  * in its cache, and so does the tp->rcv_nxt update on CPU2 side.  The CPU1
2065  * could then endup calling schedule and sleep forever if there are no more
2066  * data on the socket.
2067  *
2068  */
2069 static inline bool skwq_has_sleeper(struct socket_wq *wq)
2070 {
2071         return wq && wq_has_sleeper(&wq->wait);
2072 }
2073 
2074 /**
2075  * sock_poll_wait - place memory barrier behind the poll_wait call.
2076  * @filp:           file
2077  * @sock:           socket to wait on
2078  * @p:              poll_table
2079  *
2080  * See the comments in the wq_has_sleeper function.
2081  *
2082  * Do not derive sock from filp->private_data here. An SMC socket establishes
2083  * an internal TCP socket that is used in the fallback case. All socket
2084  * operations on the SMC socket are then forwarded to the TCP socket. In case of
2085  * poll, the filp->private_data pointer references the SMC socket because the
2086  * TCP socket has no file assigned.
2087  */
2088 static inline void sock_poll_wait(struct file *filp, struct socket *sock,
2089                                   poll_table *p)
2090 {
2091         if (!poll_does_not_wait(p)) {
2092                 poll_wait(filp, &sock->wq->wait, p);
2093                 /* We need to be sure we are in sync with the
2094                  * socket flags modification.
2095                  *
2096                  * This memory barrier is paired in the wq_has_sleeper.
2097                  */
2098                 smp_mb();
2099         }
2100 }
2101 
2102 static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
2103 {
2104         if (sk->sk_txhash) {
2105                 skb->l4_hash = 1;
2106                 skb->hash = sk->sk_txhash;
2107         }
2108 }
2109 
2110 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
2111 
2112 /*
2113  *      Queue a received datagram if it will fit. Stream and sequenced
2114  *      protocols can't normally use this as they need to fit buffers in
2115  *      and play with them.
2116  *
2117  *      Inlined as it's very short and called for pretty much every
2118  *      packet ever received.
2119  */
2120 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
2121 {
2122         skb_orphan(skb);
2123         skb->sk = sk;
2124         skb->destructor = sock_rfree;
2125         atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2126         sk_mem_charge(sk, skb->truesize);
2127 }
2128 
2129 void sk_reset_timer(struct sock *sk, struct timer_list *timer,
2130                     unsigned long expires);
2131 
2132 void sk_stop_timer(struct sock *sk, struct timer_list *timer);
2133 
2134 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
2135                         struct sk_buff *skb, unsigned int flags,
2136                         void (*destructor)(struct sock *sk,
2137                                            struct sk_buff *skb));
2138 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2139 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2140 
2141 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
2142 struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
2143 
2144 /*
2145  *      Recover an error report and clear atomically
2146  */
2147 
2148 static inline int sock_error(struct sock *sk)
2149 {
2150         int err;
2151         if (likely(!sk->sk_err))
2152                 return 0;
2153         err = xchg(&sk->sk_err, 0);
2154         return -err;
2155 }
2156 
2157 static inline unsigned long sock_wspace(struct sock *sk)
2158 {
2159         int amt = 0;
2160 
2161         if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
2162                 amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc);
2163                 if (amt < 0)
2164                         amt = 0;
2165         }
2166         return amt;
2167 }
2168 
2169 /* Note:
2170  *  We use sk->sk_wq_raw, from contexts knowing this
2171  *  pointer is not NULL and cannot disappear/change.
2172  */
2173 static inline void sk_set_bit(int nr, struct sock *sk)
2174 {
2175         if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
2176             !sock_flag(sk, SOCK_FASYNC))
2177                 return;
2178 
2179         set_bit(nr, &sk->sk_wq_raw->flags);
2180 }
2181 
2182 static inline void sk_clear_bit(int nr, struct sock *sk)
2183 {
2184         if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
2185             !sock_flag(sk, SOCK_FASYNC))
2186                 return;
2187 
2188         clear_bit(nr, &sk->sk_wq_raw->flags);
2189 }
2190 
2191 static inline void sk_wake_async(const struct sock *sk, int how, int band)
2192 {
2193         if (sock_flag(sk, SOCK_FASYNC)) {
2194                 rcu_read_lock();
2195                 sock_wake_async(rcu_dereference(sk->sk_wq), how, band);
2196                 rcu_read_unlock();
2197         }
2198 }
2199 
2200 /* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might
2201  * need sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak.
2202  * Note: for send buffers, TCP works better if we can build two skbs at
2203  * minimum.
2204  */
2205 #define TCP_SKB_MIN_TRUESIZE    (2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff)))
2206 
2207 #define SOCK_MIN_SNDBUF         (TCP_SKB_MIN_TRUESIZE * 2)
2208 #define SOCK_MIN_RCVBUF          TCP_SKB_MIN_TRUESIZE
2209 
2210 static inline void sk_stream_moderate_sndbuf(struct sock *sk)
2211 {
2212         if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
2213                 sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
2214                 sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF);
2215         }
2216 }
2217 
2218 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
2219                                     bool force_schedule);
2220 
2221 /**
2222  * sk_page_frag - return an appropriate page_frag
2223  * @sk: socket
2224  *
2225  * If socket allocation mode allows current thread to sleep, it means its
2226  * safe to use the per task page_frag instead of the per socket one.
2227  */
2228 static inline struct page_frag *sk_page_frag(struct sock *sk)
2229 {
2230         if (gfpflags_allow_blocking(sk->sk_allocation))
2231                 return &current->task_frag;
2232 
2233         return &sk->sk_frag;
2234 }
2235 
2236 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
2237 
2238 /*
2239  *      Default write policy as shown to user space via poll/select/SIGIO
2240  */
2241 static inline bool sock_writeable(const struct sock *sk)
2242 {
2243         return refcount_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
2244 }
2245 
2246 static inline gfp_t gfp_any(void)
2247 {
2248         return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
2249 }
2250 
2251 static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
2252 {
2253         return noblock ? 0 : sk->sk_rcvtimeo;
2254 }
2255 
2256 static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
2257 {
2258         return noblock ? 0 : sk->sk_sndtimeo;
2259 }
2260 
2261 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
2262 {
2263         return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
2264 }
2265 
2266 /* Alas, with timeout socket operations are not restartable.
2267  * Compare this to poll().
2268  */
2269 static inline int sock_intr_errno(long timeo)
2270 {
2271         return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
2272 }
2273 
2274 struct sock_skb_cb {
2275         u32 dropcount;
2276 };
2277 
2278 /* Store sock_skb_cb at the end of skb->cb[] so protocol families
2279  * using skb->cb[] would keep using it directly and utilize its
2280  * alignement guarantee.
2281  */
2282 #define SOCK_SKB_CB_OFFSET ((FIELD_SIZEOF(struct sk_buff, cb) - \
2283                             sizeof(struct sock_skb_cb)))
2284 
2285 #define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
2286                             SOCK_SKB_CB_OFFSET))
2287 
2288 #define sock_skb_cb_check_size(size) \
2289         BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET)
2290 
2291 static inline void
2292 sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
2293 {
2294         SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ?
2295                                                 atomic_read(&sk->sk_drops) : 0;
2296 }
2297 
2298 static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
2299 {
2300         int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2301 
2302         atomic_add(segs, &sk->sk_drops);
2303 }
2304 
2305 static inline ktime_t sock_read_timestamp(struct sock *sk)
2306 {
2307 #if BITS_PER_LONG==32
2308         unsigned int seq;
2309         ktime_t kt;
2310 
2311         do {
2312                 seq = read_seqbegin(&sk->sk_stamp_seq);
2313                 kt = sk->sk_stamp;
2314         } while (read_seqretry(&sk->sk_stamp_seq, seq));
2315 
2316         return kt;
2317 #else
2318         return sk->sk_stamp;
2319 #endif
2320 }
2321 
2322 static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
2323 {
2324 #if BITS_PER_LONG==32
2325         write_seqlock(&sk->sk_stamp_seq);
2326         sk->sk_stamp = kt;
2327         write_sequnlock(&sk->sk_stamp_seq);
2328 #else
2329         sk->sk_stamp = kt;
2330 #endif
2331 }
2332 
2333 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
2334                            struct sk_buff *skb);
2335 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
2336                              struct sk_buff *skb);
2337 
2338 static inline void
2339 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
2340 {
2341         ktime_t kt = skb->tstamp;
2342         struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
2343 
2344         /*
2345          * generate control messages if
2346          * - receive time stamping in software requested
2347          * - software time stamp available and wanted
2348          * - hardware time stamps available and wanted
2349          */
2350         if (sock_flag(sk, SOCK_RCVTSTAMP) ||
2351             (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
2352             (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
2353             (hwtstamps->hwtstamp &&
2354              (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
2355                 __sock_recv_timestamp(msg, sk, skb);
2356         else
2357                 sock_write_timestamp(sk, kt);
2358 
2359         if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
2360                 __sock_recv_wifi_status(msg, sk, skb);
2361 }
2362 
2363 void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2364                               struct sk_buff *skb);
2365 
2366 #define SK_DEFAULT_STAMP (-1L * NSEC_PER_SEC)
2367 static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2368                                           struct sk_buff *skb)
2369 {
2370 #define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL)                       | \
2371                            (1UL << SOCK_RCVTSTAMP))
2372 #define TSFLAGS_ANY       (SOF_TIMESTAMPING_SOFTWARE                    | \
2373                            SOF_TIMESTAMPING_RAW_HARDWARE)
2374 
2375         if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
2376                 __sock_recv_ts_and_drops(msg, sk, skb);
2377         else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
2378                 sock_write_timestamp(sk, skb->tstamp);
2379         else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
2380                 sock_write_timestamp(sk, 0);
2381 }
2382 
2383 void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
2384 
2385 /**
2386  * _sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
2387  * @sk:         socket sending this packet
2388  * @tsflags:    timestamping flags to use
2389  * @tx_flags:   completed with instructions for time stamping
2390  * @tskey:      filled in with next sk_tskey (not for TCP, which uses seqno)
2391  *
2392  * Note: callers should take care of initial ``*tx_flags`` value (usually 0)
2393  */
2394 static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
2395                                       __u8 *tx_flags, __u32 *tskey)
2396 {
2397         if (unlikely(tsflags)) {
2398                 __sock_tx_timestamp(tsflags, tx_flags);
2399                 if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
2400                     tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
2401                         *tskey = sk->sk_tskey++;
2402         }
2403         if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
2404                 *tx_flags |= SKBTX_WIFI_STATUS;
2405 }
2406 
2407 static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
2408                                      __u8 *tx_flags)
2409 {
2410         _sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
2411 }
2412 
2413 static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
2414 {
2415         _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
2416                            &skb_shinfo(skb)->tskey);
2417 }
2418 
2419 /**
2420  * sk_eat_skb - Release a skb if it is no longer needed
2421  * @sk: socket to eat this skb from
2422  * @skb: socket buffer to eat
2423  *
2424  * This routine must be called with interrupts disabled or with the socket
2425  * locked so that the sk_buff queue operation is ok.
2426 */
2427 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
2428 {
2429         __skb_unlink(skb, &sk->sk_receive_queue);
2430         __kfree_skb(skb);
2431 }
2432 
2433 static inline
2434 struct net *sock_net(const struct sock *sk)
2435 {
2436         return read_pnet(&sk->sk_net);
2437 }
2438 
2439 static inline
2440 void sock_net_set(struct sock *sk, struct net *net)
2441 {
2442         write_pnet(&sk->sk_net, net);
2443 }
2444 
2445 static inline struct sock *skb_steal_sock(struct sk_buff *skb)
2446 {
2447         if (skb->sk) {
2448                 struct sock *sk = skb->sk;
2449 
2450                 skb->destructor = NULL;
2451                 skb->sk = NULL;
2452                 return sk;
2453         }
2454         return NULL;
2455 }
2456 
2457 /* This helper checks if a socket is a full socket,
2458  * ie _not_ a timewait or request socket.
2459  */
2460 static inline bool sk_fullsock(const struct sock *sk)
2461 {
2462         return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
2463 }
2464 
2465 /* Checks if this SKB belongs to an HW offloaded socket
2466  * and whether any SW fallbacks are required based on dev.
2467  */
2468 static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
2469                                                    struct net_device *dev)
2470 {
2471 #ifdef CONFIG_SOCK_VALIDATE_XMIT
2472         struct sock *sk = skb->sk;
2473 
2474         if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb)
2475                 skb = sk->sk_validate_xmit_skb(sk, dev, skb);
2476 #endif
2477 
2478         return skb;
2479 }
2480 
2481 /* This helper checks if a socket is a LISTEN or NEW_SYN_RECV
2482  * SYNACK messages can be attached to either ones (depending on SYNCOOKIE)
2483  */
2484 static inline bool sk_listener(const struct sock *sk)
2485 {
2486         return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
2487 }
2488 
2489 void sock_enable_timestamp(struct sock *sk, int flag);
2490 int sock_get_timestamp(struct sock *, struct timeval __user *);
2491 int sock_get_timestampns(struct sock *, struct timespec __user *);
2492 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
2493                        int type);
2494 
2495 bool sk_ns_capable(const struct sock *sk,
2496                    struct user_namespace *user_ns, int cap);
2497 bool sk_capable(const struct sock *sk, int cap);
2498 bool sk_net_capable(const struct sock *sk, int cap);
2499 
2500 void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
2501 
2502 /* Take into consideration the size of the struct sk_buff overhead in the
2503  * determination of these values, since that is non-constant across
2504  * platforms.  This makes socket queueing behavior and performance
2505  * not depend upon such differences.
2506  */
2507 #define _SK_MEM_PACKETS         256
2508 #define _SK_MEM_OVERHEAD        SKB_TRUESIZE(256)
2509 #define SK_WMEM_MAX             (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
2510 #define SK_RMEM_MAX             (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
2511 
2512 extern __u32 sysctl_wmem_max;
2513 extern __u32 sysctl_rmem_max;
2514 
2515 extern int sysctl_tstamp_allow_data;
2516 extern int sysctl_optmem_max;
2517 
2518 extern __u32 sysctl_wmem_default;
2519 extern __u32 sysctl_rmem_default;
2520 
2521 static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
2522 {
2523         /* Does this proto have per netns sysctl_wmem ? */
2524         if (proto->sysctl_wmem_offset)
2525                 return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset);
2526 
2527         return *proto->sysctl_wmem;
2528 }
2529 
2530 static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
2531 {
2532         /* Does this proto have per netns sysctl_rmem ? */
2533         if (proto->sysctl_rmem_offset)
2534                 return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset);
2535 
2536         return *proto->sysctl_rmem;
2537 }
2538 
2539 /* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
2540  * Some wifi drivers need to tweak it to get more chunks.
2541  * They can use this helper from their ndo_start_xmit()
2542  */
2543 static inline void sk_pacing_shift_update(struct sock *sk, int val)
2544 {
2545         if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val)
2546                 return;
2547         sk->sk_pacing_shift = val;
2548 }
2549 
2550 /* if a socket is bound to a device, check that the given device
2551  * index is either the same or that the socket is bound to an L3
2552  * master device and the given device index is also enslaved to
2553  * that L3 master
2554  */
2555 static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
2556 {
2557         int mdif;
2558 
2559         if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif)
2560                 return true;
2561 
2562         mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
2563         if (mdif && mdif == sk->sk_bound_dev_if)
2564                 return true;
2565 
2566         return false;
2567 }
2568 
2569 #endif  /* _SOCK_H */
2570 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp