1 /* 2 * Definitions for the 'struct sk_buff' memory handlers. 3 * 4 * Authors: 5 * Alan Cox, <gw4pts@gw4pts.ampr.org> 6 * Florian La Roche, <rzsfl@rz.uni-sb.de> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14 #ifndef _LINUX_SKBUFF_H 15 #define _LINUX_SKBUFF_H 16 17 #include <linux/kernel.h> 18 #include <linux/kmemcheck.h> 19 #include <linux/compiler.h> 20 #include <linux/time.h> 21 #include <linux/bug.h> 22 #include <linux/cache.h> 23 24 #include <linux/atomic.h> 25 #include <asm/types.h> 26 #include <linux/spinlock.h> 27 #include <linux/net.h> 28 #include <linux/textsearch.h> 29 #include <net/checksum.h> 30 #include <linux/rcupdate.h> 31 #include <linux/dmaengine.h> 32 #include <linux/hrtimer.h> 33 #include <linux/dma-mapping.h> 34 #include <linux/netdev_features.h> 35 #include <net/flow_keys.h> 36 37 /* Don't change this without changing skb_csum_unnecessary! */ 38 #define CHECKSUM_NONE 0 39 #define CHECKSUM_UNNECESSARY 1 40 #define CHECKSUM_COMPLETE 2 41 #define CHECKSUM_PARTIAL 3 42 43 #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ 44 ~(SMP_CACHE_BYTES - 1)) 45 #define SKB_WITH_OVERHEAD(X) \ 46 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 47 #define SKB_MAX_ORDER(X, ORDER) \ 48 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) 49 #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) 50 #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) 51 52 /* return minimum truesize of one skb containing X bytes of data */ 53 #define SKB_TRUESIZE(X) ((X) + \ 54 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \ 55 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 56 57 /* A. Checksumming of received packets by device. 58 * 59 * NONE: device failed to checksum this packet. 60 * skb->csum is undefined. 61 * 62 * UNNECESSARY: device parsed packet and wouldbe verified checksum. 63 * skb->csum is undefined. 64 * It is bad option, but, unfortunately, many of vendors do this. 65 * Apparently with secret goal to sell you new device, when you 66 * will add new protocol to your host. F.e. IPv6. 8) 67 * 68 * COMPLETE: the most generic way. Device supplied checksum of _all_ 69 * the packet as seen by netif_rx in skb->csum. 70 * NOTE: Even if device supports only some protocols, but 71 * is able to produce some skb->csum, it MUST use COMPLETE, 72 * not UNNECESSARY. 73 * 74 * PARTIAL: identical to the case for output below. This may occur 75 * on a packet received directly from another Linux OS, e.g., 76 * a virtualised Linux kernel on the same host. The packet can 77 * be treated in the same way as UNNECESSARY except that on 78 * output (i.e., forwarding) the checksum must be filled in 79 * by the OS or the hardware. 80 * 81 * B. Checksumming on output. 82 * 83 * NONE: skb is checksummed by protocol or csum is not required. 84 * 85 * PARTIAL: device is required to csum packet as seen by hard_start_xmit 86 * from skb->csum_start to the end and to record the checksum 87 * at skb->csum_start + skb->csum_offset. 88 * 89 * Device must show its capabilities in dev->features, set 90 * at device setup time. 91 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum 92 * everything. 93 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only 94 * TCP/UDP over IPv4. Sigh. Vendors like this 95 * way by an unknown reason. Though, see comment above 96 * about CHECKSUM_UNNECESSARY. 8) 97 * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead. 98 * 99 * UNNECESSARY: device will do per protocol specific csum. Protocol drivers 100 * that do not want net to perform the checksum calculation should use 101 * this flag in their outgoing skbs. 102 * NETIF_F_FCOE_CRC this indicates the device can do FCoE FC CRC 103 * offload. Correspondingly, the FCoE protocol driver 104 * stack should use CHECKSUM_UNNECESSARY. 105 * 106 * Any questions? No questions, good. --ANK 107 */ 108 109 struct net_device; 110 struct scatterlist; 111 struct pipe_inode_info; 112 113 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 114 struct nf_conntrack { 115 atomic_t use; 116 }; 117 #endif 118 119 #ifdef CONFIG_BRIDGE_NETFILTER 120 struct nf_bridge_info { 121 atomic_t use; 122 unsigned int mask; 123 struct net_device *physindev; 124 struct net_device *physoutdev; 125 unsigned long data[32 / sizeof(unsigned long)]; 126 }; 127 #endif 128 129 struct sk_buff_head { 130 /* These two members must be first. */ 131 struct sk_buff *next; 132 struct sk_buff *prev; 133 134 __u32 qlen; 135 spinlock_t lock; 136 }; 137 138 struct sk_buff; 139 140 /* To allow 64K frame to be packed as single skb without frag_list we 141 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for 142 * buffers which do not start on a page boundary. 143 * 144 * Since GRO uses frags we allocate at least 16 regardless of page 145 * size. 146 */ 147 #if (65536/PAGE_SIZE + 1) < 16 148 #define MAX_SKB_FRAGS 16UL 149 #else 150 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) 151 #endif 152 153 typedef struct skb_frag_struct skb_frag_t; 154 155 struct skb_frag_struct { 156 struct { 157 struct page *p; 158 } page; 159 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) 160 __u32 page_offset; 161 __u32 size; 162 #else 163 __u16 page_offset; 164 __u16 size; 165 #endif 166 }; 167 168 static inline unsigned int skb_frag_size(const skb_frag_t *frag) 169 { 170 return frag->size; 171 } 172 173 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) 174 { 175 frag->size = size; 176 } 177 178 static inline void skb_frag_size_add(skb_frag_t *frag, int delta) 179 { 180 frag->size += delta; 181 } 182 183 static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) 184 { 185 frag->size -= delta; 186 } 187 188 #define HAVE_HW_TIME_STAMP 189 190 /** 191 * struct skb_shared_hwtstamps - hardware time stamps 192 * @hwtstamp: hardware time stamp transformed into duration 193 * since arbitrary point in time 194 * @syststamp: hwtstamp transformed to system time base 195 * 196 * Software time stamps generated by ktime_get_real() are stored in 197 * skb->tstamp. The relation between the different kinds of time 198 * stamps is as follows: 199 * 200 * syststamp and tstamp can be compared against each other in 201 * arbitrary combinations. The accuracy of a 202 * syststamp/tstamp/"syststamp from other device" comparison is 203 * limited by the accuracy of the transformation into system time 204 * base. This depends on the device driver and its underlying 205 * hardware. 206 * 207 * hwtstamps can only be compared against other hwtstamps from 208 * the same device. 209 * 210 * This structure is attached to packets as part of the 211 * &skb_shared_info. Use skb_hwtstamps() to get a pointer. 212 */ 213 struct skb_shared_hwtstamps { 214 ktime_t hwtstamp; 215 ktime_t syststamp; 216 }; 217 218 /* Definitions for tx_flags in struct skb_shared_info */ 219 enum { 220 /* generate hardware time stamp */ 221 SKBTX_HW_TSTAMP = 1 << 0, 222 223 /* generate software time stamp */ 224 SKBTX_SW_TSTAMP = 1 << 1, 225 226 /* device driver is going to provide hardware time stamp */ 227 SKBTX_IN_PROGRESS = 1 << 2, 228 229 /* device driver supports TX zero-copy buffers */ 230 SKBTX_DEV_ZEROCOPY = 1 << 3, 231 232 /* generate wifi status information (where possible) */ 233 SKBTX_WIFI_STATUS = 1 << 4, 234 235 /* This indicates at least one fragment might be overwritten 236 * (as in vmsplice(), sendfile() ...) 237 * If we need to compute a TX checksum, we'll need to copy 238 * all frags to avoid possible bad checksum 239 */ 240 SKBTX_SHARED_FRAG = 1 << 5, 241 }; 242 243 /* 244 * The callback notifies userspace to release buffers when skb DMA is done in 245 * lower device, the skb last reference should be 0 when calling this. 246 * The zerocopy_success argument is true if zero copy transmit occurred, 247 * false on data copy or out of memory error caused by data copy attempt. 248 * The ctx field is used to track device context. 249 * The desc field is used to track userspace buffer index. 250 */ 251 struct ubuf_info { 252 void (*callback)(struct ubuf_info *, bool zerocopy_success); 253 void *ctx; 254 unsigned long desc; 255 }; 256 257 /* This data is invariant across clones and lives at 258 * the end of the header data, ie. at skb->end. 259 */ 260 struct skb_shared_info { 261 unsigned char nr_frags; 262 __u8 tx_flags; 263 unsigned short gso_size; 264 /* Warning: this field is not always filled in (UFO)! */ 265 unsigned short gso_segs; 266 unsigned short gso_type; 267 struct sk_buff *frag_list; 268 struct skb_shared_hwtstamps hwtstamps; 269 __be32 ip6_frag_id; 270 271 /* 272 * Warning : all fields before dataref are cleared in __alloc_skb() 273 */ 274 atomic_t dataref; 275 276 /* Intermediate layers must ensure that destructor_arg 277 * remains valid until skb destructor */ 278 void * destructor_arg; 279 280 /* must be last field, see pskb_expand_head() */ 281 skb_frag_t frags[MAX_SKB_FRAGS]; 282 }; 283 284 /* We divide dataref into two halves. The higher 16 bits hold references 285 * to the payload part of skb->data. The lower 16 bits hold references to 286 * the entire skb->data. A clone of a headerless skb holds the length of 287 * the header in skb->hdr_len. 288 * 289 * All users must obey the rule that the skb->data reference count must be 290 * greater than or equal to the payload reference count. 291 * 292 * Holding a reference to the payload part means that the user does not 293 * care about modifications to the header part of skb->data. 294 */ 295 #define SKB_DATAREF_SHIFT 16 296 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) 297 298 299 enum { 300 SKB_FCLONE_UNAVAILABLE, 301 SKB_FCLONE_ORIG, 302 SKB_FCLONE_CLONE, 303 }; 304 305 enum { 306 SKB_GSO_TCPV4 = 1 << 0, 307 SKB_GSO_UDP = 1 << 1, 308 309 /* This indicates the skb is from an untrusted source. */ 310 SKB_GSO_DODGY = 1 << 2, 311 312 /* This indicates the tcp segment has CWR set. */ 313 SKB_GSO_TCP_ECN = 1 << 3, 314 315 SKB_GSO_TCPV6 = 1 << 4, 316 317 SKB_GSO_FCOE = 1 << 5, 318 319 SKB_GSO_GRE = 1 << 6, 320 321 SKB_GSO_UDP_TUNNEL = 1 << 7, 322 }; 323 324 #if BITS_PER_LONG > 32 325 #define NET_SKBUFF_DATA_USES_OFFSET 1 326 #endif 327 328 #ifdef NET_SKBUFF_DATA_USES_OFFSET 329 typedef unsigned int sk_buff_data_t; 330 #else 331 typedef unsigned char *sk_buff_data_t; 332 #endif 333 334 /** 335 * struct sk_buff - socket buffer 336 * @next: Next buffer in list 337 * @prev: Previous buffer in list 338 * @tstamp: Time we arrived 339 * @sk: Socket we are owned by 340 * @dev: Device we arrived on/are leaving by 341 * @cb: Control buffer. Free for use by every layer. Put private vars here 342 * @_skb_refdst: destination entry (with norefcount bit) 343 * @sp: the security path, used for xfrm 344 * @len: Length of actual data 345 * @data_len: Data length 346 * @mac_len: Length of link layer header 347 * @hdr_len: writable header length of cloned skb 348 * @csum: Checksum (must include start/offset pair) 349 * @csum_start: Offset from skb->head where checksumming should start 350 * @csum_offset: Offset from csum_start where checksum should be stored 351 * @priority: Packet queueing priority 352 * @local_df: allow local fragmentation 353 * @cloned: Head may be cloned (check refcnt to be sure) 354 * @ip_summed: Driver fed us an IP checksum 355 * @nohdr: Payload reference only, must not modify header 356 * @nfctinfo: Relationship of this skb to the connection 357 * @pkt_type: Packet class 358 * @fclone: skbuff clone status 359 * @ipvs_property: skbuff is owned by ipvs 360 * @peeked: this packet has been seen already, so stats have been 361 * done for it, don't do them again 362 * @nf_trace: netfilter packet trace flag 363 * @protocol: Packet protocol from driver 364 * @destructor: Destruct function 365 * @nfct: Associated connection, if any 366 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 367 * @skb_iif: ifindex of device we arrived on 368 * @tc_index: Traffic control index 369 * @tc_verd: traffic control verdict 370 * @rxhash: the packet hash computed on receive 371 * @queue_mapping: Queue mapping for multiqueue devices 372 * @ndisc_nodetype: router type (from link layer) 373 * @ooo_okay: allow the mapping of a socket to a queue to be changed 374 * @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport 375 * ports. 376 * @wifi_acked_valid: wifi_acked was set 377 * @wifi_acked: whether frame was acked on wifi or not 378 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS 379 * @dma_cookie: a cookie to one of several possible DMA operations 380 * done by skb DMA functions 381 * @secmark: security marking 382 * @mark: Generic packet mark 383 * @dropcount: total number of sk_receive_queue overflows 384 * @vlan_proto: vlan encapsulation protocol 385 * @vlan_tci: vlan tag control information 386 * @inner_transport_header: Inner transport layer header (encapsulation) 387 * @inner_network_header: Network layer header (encapsulation) 388 * @inner_mac_header: Link layer header (encapsulation) 389 * @transport_header: Transport layer header 390 * @network_header: Network layer header 391 * @mac_header: Link layer header 392 * @tail: Tail pointer 393 * @end: End pointer 394 * @head: Head of buffer 395 * @data: Data head pointer 396 * @truesize: Buffer size 397 * @users: User count - see {datagram,tcp}.c 398 */ 399 400 struct sk_buff { 401 /* These two members must be first. */ 402 struct sk_buff *next; 403 struct sk_buff *prev; 404 405 ktime_t tstamp; 406 407 struct sock *sk; 408 struct net_device *dev; 409 410 /* 411 * This is the control buffer. It is free to use for every 412 * layer. Please put your private variables there. If you 413 * want to keep them across layers you have to do a skb_clone() 414 * first. This is owned by whoever has the skb queued ATM. 415 */ 416 char cb[48] __aligned(8); 417 418 unsigned long _skb_refdst; 419 #ifdef CONFIG_XFRM 420 struct sec_path *sp; 421 #endif 422 unsigned int len, 423 data_len; 424 __u16 mac_len, 425 hdr_len; 426 union { 427 __wsum csum; 428 struct { 429 __u16 csum_start; 430 __u16 csum_offset; 431 }; 432 }; 433 __u32 priority; 434 kmemcheck_bitfield_begin(flags1); 435 __u8 local_df:1, 436 cloned:1, 437 ip_summed:2, 438 nohdr:1, 439 nfctinfo:3; 440 __u8 pkt_type:3, 441 fclone:2, 442 ipvs_property:1, 443 peeked:1, 444 nf_trace:1; 445 kmemcheck_bitfield_end(flags1); 446 __be16 protocol; 447 448 void (*destructor)(struct sk_buff *skb); 449 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 450 struct nf_conntrack *nfct; 451 #endif 452 #ifdef CONFIG_BRIDGE_NETFILTER 453 struct nf_bridge_info *nf_bridge; 454 #endif 455 456 int skb_iif; 457 458 __u32 rxhash; 459 460 __be16 vlan_proto; 461 __u16 vlan_tci; 462 463 #ifdef CONFIG_NET_SCHED 464 __u16 tc_index; /* traffic control index */ 465 #ifdef CONFIG_NET_CLS_ACT 466 __u16 tc_verd; /* traffic control verdict */ 467 #endif 468 #endif 469 470 __u16 queue_mapping; 471 kmemcheck_bitfield_begin(flags2); 472 #ifdef CONFIG_IPV6_NDISC_NODETYPE 473 __u8 ndisc_nodetype:2; 474 #endif 475 __u8 pfmemalloc:1; 476 __u8 ooo_okay:1; 477 __u8 l4_rxhash:1; 478 __u8 wifi_acked_valid:1; 479 __u8 wifi_acked:1; 480 __u8 no_fcs:1; 481 __u8 head_frag:1; 482 /* Encapsulation protocol and NIC drivers should use 483 * this flag to indicate to each other if the skb contains 484 * encapsulated packet or not and maybe use the inner packet 485 * headers if needed 486 */ 487 __u8 encapsulation:1; 488 /* 7/9 bit hole (depending on ndisc_nodetype presence) */ 489 kmemcheck_bitfield_end(flags2); 490 491 #ifdef CONFIG_NET_DMA 492 dma_cookie_t dma_cookie; 493 #endif 494 #ifdef CONFIG_NETWORK_SECMARK 495 __u32 secmark; 496 #endif 497 union { 498 __u32 mark; 499 __u32 dropcount; 500 __u32 reserved_tailroom; 501 }; 502 503 sk_buff_data_t inner_transport_header; 504 sk_buff_data_t inner_network_header; 505 sk_buff_data_t inner_mac_header; 506 sk_buff_data_t transport_header; 507 sk_buff_data_t network_header; 508 sk_buff_data_t mac_header; 509 /* These elements must be at the end, see alloc_skb() for details. */ 510 sk_buff_data_t tail; 511 sk_buff_data_t end; 512 unsigned char *head, 513 *data; 514 unsigned int truesize; 515 atomic_t users; 516 }; 517 518 #ifdef __KERNEL__ 519 /* 520 * Handling routines are only of interest to the kernel 521 */ 522 #include <linux/slab.h> 523 524 525 #define SKB_ALLOC_FCLONE 0x01 526 #define SKB_ALLOC_RX 0x02 527 528 /* Returns true if the skb was allocated from PFMEMALLOC reserves */ 529 static inline bool skb_pfmemalloc(const struct sk_buff *skb) 530 { 531 return unlikely(skb->pfmemalloc); 532 } 533 534 /* 535 * skb might have a dst pointer attached, refcounted or not. 536 * _skb_refdst low order bit is set if refcount was _not_ taken 537 */ 538 #define SKB_DST_NOREF 1UL 539 #define SKB_DST_PTRMASK ~(SKB_DST_NOREF) 540 541 /** 542 * skb_dst - returns skb dst_entry 543 * @skb: buffer 544 * 545 * Returns skb dst_entry, regardless of reference taken or not. 546 */ 547 static inline struct dst_entry *skb_dst(const struct sk_buff *skb) 548 { 549 /* If refdst was not refcounted, check we still are in a 550 * rcu_read_lock section 551 */ 552 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && 553 !rcu_read_lock_held() && 554 !rcu_read_lock_bh_held()); 555 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); 556 } 557 558 /** 559 * skb_dst_set - sets skb dst 560 * @skb: buffer 561 * @dst: dst entry 562 * 563 * Sets skb dst, assuming a reference was taken on dst and should 564 * be released by skb_dst_drop() 565 */ 566 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) 567 { 568 skb->_skb_refdst = (unsigned long)dst; 569 } 570 571 extern void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, 572 bool force); 573 574 /** 575 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference 576 * @skb: buffer 577 * @dst: dst entry 578 * 579 * Sets skb dst, assuming a reference was not taken on dst. 580 * If dst entry is cached, we do not take reference and dst_release 581 * will be avoided by refdst_drop. If dst entry is not cached, we take 582 * reference, so that last dst_release can destroy the dst immediately. 583 */ 584 static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) 585 { 586 __skb_dst_set_noref(skb, dst, false); 587 } 588 589 /** 590 * skb_dst_set_noref_force - sets skb dst, without taking reference 591 * @skb: buffer 592 * @dst: dst entry 593 * 594 * Sets skb dst, assuming a reference was not taken on dst. 595 * No reference is taken and no dst_release will be called. While for 596 * cached dsts deferred reclaim is a basic feature, for entries that are 597 * not cached it is caller's job to guarantee that last dst_release for 598 * provided dst happens when nobody uses it, eg. after a RCU grace period. 599 */ 600 static inline void skb_dst_set_noref_force(struct sk_buff *skb, 601 struct dst_entry *dst) 602 { 603 __skb_dst_set_noref(skb, dst, true); 604 } 605 606 /** 607 * skb_dst_is_noref - Test if skb dst isn't refcounted 608 * @skb: buffer 609 */ 610 static inline bool skb_dst_is_noref(const struct sk_buff *skb) 611 { 612 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); 613 } 614 615 static inline struct rtable *skb_rtable(const struct sk_buff *skb) 616 { 617 return (struct rtable *)skb_dst(skb); 618 } 619 620 extern void kfree_skb(struct sk_buff *skb); 621 extern void kfree_skb_list(struct sk_buff *segs); 622 extern void skb_tx_error(struct sk_buff *skb); 623 extern void consume_skb(struct sk_buff *skb); 624 extern void __kfree_skb(struct sk_buff *skb); 625 extern struct kmem_cache *skbuff_head_cache; 626 627 extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen); 628 extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 629 bool *fragstolen, int *delta_truesize); 630 631 extern struct sk_buff *__alloc_skb(unsigned int size, 632 gfp_t priority, int flags, int node); 633 extern struct sk_buff *build_skb(void *data, unsigned int frag_size); 634 static inline struct sk_buff *alloc_skb(unsigned int size, 635 gfp_t priority) 636 { 637 return __alloc_skb(size, priority, 0, NUMA_NO_NODE); 638 } 639 640 static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 641 gfp_t priority) 642 { 643 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE); 644 } 645 646 extern struct sk_buff *__alloc_skb_head(gfp_t priority, int node); 647 static inline struct sk_buff *alloc_skb_head(gfp_t priority) 648 { 649 return __alloc_skb_head(priority, -1); 650 } 651 652 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 653 extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); 654 extern struct sk_buff *skb_clone(struct sk_buff *skb, 655 gfp_t priority); 656 extern struct sk_buff *skb_copy(const struct sk_buff *skb, 657 gfp_t priority); 658 extern struct sk_buff *__pskb_copy(struct sk_buff *skb, 659 int headroom, gfp_t gfp_mask); 660 661 extern int pskb_expand_head(struct sk_buff *skb, 662 int nhead, int ntail, 663 gfp_t gfp_mask); 664 extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 665 unsigned int headroom); 666 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 667 int newheadroom, int newtailroom, 668 gfp_t priority); 669 extern int skb_to_sgvec(struct sk_buff *skb, 670 struct scatterlist *sg, int offset, 671 int len); 672 extern int skb_cow_data(struct sk_buff *skb, int tailbits, 673 struct sk_buff **trailer); 674 extern int skb_pad(struct sk_buff *skb, int pad); 675 #define dev_kfree_skb(a) consume_skb(a) 676 677 extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 678 int getfrag(void *from, char *to, int offset, 679 int len,int odd, struct sk_buff *skb), 680 void *from, int length); 681 682 struct skb_seq_state { 683 __u32 lower_offset; 684 __u32 upper_offset; 685 __u32 frag_idx; 686 __u32 stepped_offset; 687 struct sk_buff *root_skb; 688 struct sk_buff *cur_skb; 689 __u8 *frag_data; 690 }; 691 692 extern void skb_prepare_seq_read(struct sk_buff *skb, 693 unsigned int from, unsigned int to, 694 struct skb_seq_state *st); 695 extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 696 struct skb_seq_state *st); 697 extern void skb_abort_seq_read(struct skb_seq_state *st); 698 699 extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 700 unsigned int to, struct ts_config *config, 701 struct ts_state *state); 702 703 extern void __skb_get_rxhash(struct sk_buff *skb); 704 static inline __u32 skb_get_rxhash(struct sk_buff *skb) 705 { 706 if (!skb->l4_rxhash) 707 __skb_get_rxhash(skb); 708 709 return skb->rxhash; 710 } 711 712 #ifdef NET_SKBUFF_DATA_USES_OFFSET 713 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 714 { 715 return skb->head + skb->end; 716 } 717 718 static inline unsigned int skb_end_offset(const struct sk_buff *skb) 719 { 720 return skb->end; 721 } 722 #else 723 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 724 { 725 return skb->end; 726 } 727 728 static inline unsigned int skb_end_offset(const struct sk_buff *skb) 729 { 730 return skb->end - skb->head; 731 } 732 #endif 733 734 /* Internal */ 735 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) 736 737 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) 738 { 739 return &skb_shinfo(skb)->hwtstamps; 740 } 741 742 /** 743 * skb_queue_empty - check if a queue is empty 744 * @list: queue head 745 * 746 * Returns true if the queue is empty, false otherwise. 747 */ 748 static inline int skb_queue_empty(const struct sk_buff_head *list) 749 { 750 return list->next == (struct sk_buff *)list; 751 } 752 753 /** 754 * skb_queue_is_last - check if skb is the last entry in the queue 755 * @list: queue head 756 * @skb: buffer 757 * 758 * Returns true if @skb is the last buffer on the list. 759 */ 760 static inline bool skb_queue_is_last(const struct sk_buff_head *list, 761 const struct sk_buff *skb) 762 { 763 return skb->next == (struct sk_buff *)list; 764 } 765 766 /** 767 * skb_queue_is_first - check if skb is the first entry in the queue 768 * @list: queue head 769 * @skb: buffer 770 * 771 * Returns true if @skb is the first buffer on the list. 772 */ 773 static inline bool skb_queue_is_first(const struct sk_buff_head *list, 774 const struct sk_buff *skb) 775 { 776 return skb->prev == (struct sk_buff *)list; 777 } 778 779 /** 780 * skb_queue_next - return the next packet in the queue 781 * @list: queue head 782 * @skb: current buffer 783 * 784 * Return the next packet in @list after @skb. It is only valid to 785 * call this if skb_queue_is_last() evaluates to false. 786 */ 787 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, 788 const struct sk_buff *skb) 789 { 790 /* This BUG_ON may seem severe, but if we just return then we 791 * are going to dereference garbage. 792 */ 793 BUG_ON(skb_queue_is_last(list, skb)); 794 return skb->next; 795 } 796 797 /** 798 * skb_queue_prev - return the prev packet in the queue 799 * @list: queue head 800 * @skb: current buffer 801 * 802 * Return the prev packet in @list before @skb. It is only valid to 803 * call this if skb_queue_is_first() evaluates to false. 804 */ 805 static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, 806 const struct sk_buff *skb) 807 { 808 /* This BUG_ON may seem severe, but if we just return then we 809 * are going to dereference garbage. 810 */ 811 BUG_ON(skb_queue_is_first(list, skb)); 812 return skb->prev; 813 } 814 815 /** 816 * skb_get - reference buffer 817 * @skb: buffer to reference 818 * 819 * Makes another reference to a socket buffer and returns a pointer 820 * to the buffer. 821 */ 822 static inline struct sk_buff *skb_get(struct sk_buff *skb) 823 { 824 atomic_inc(&skb->users); 825 return skb; 826 } 827 828 /* 829 * If users == 1, we are the only owner and are can avoid redundant 830 * atomic change. 831 */ 832 833 /** 834 * skb_cloned - is the buffer a clone 835 * @skb: buffer to check 836 * 837 * Returns true if the buffer was generated with skb_clone() and is 838 * one of multiple shared copies of the buffer. Cloned buffers are 839 * shared data so must not be written to under normal circumstances. 840 */ 841 static inline int skb_cloned(const struct sk_buff *skb) 842 { 843 return skb->cloned && 844 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; 845 } 846 847 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri) 848 { 849 might_sleep_if(pri & __GFP_WAIT); 850 851 if (skb_cloned(skb)) 852 return pskb_expand_head(skb, 0, 0, pri); 853 854 return 0; 855 } 856 857 /** 858 * skb_header_cloned - is the header a clone 859 * @skb: buffer to check 860 * 861 * Returns true if modifying the header part of the buffer requires 862 * the data to be copied. 863 */ 864 static inline int skb_header_cloned(const struct sk_buff *skb) 865 { 866 int dataref; 867 868 if (!skb->cloned) 869 return 0; 870 871 dataref = atomic_read(&skb_shinfo(skb)->dataref); 872 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); 873 return dataref != 1; 874 } 875 876 /** 877 * skb_header_release - release reference to header 878 * @skb: buffer to operate on 879 * 880 * Drop a reference to the header part of the buffer. This is done 881 * by acquiring a payload reference. You must not read from the header 882 * part of skb->data after this. 883 */ 884 static inline void skb_header_release(struct sk_buff *skb) 885 { 886 BUG_ON(skb->nohdr); 887 skb->nohdr = 1; 888 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); 889 } 890 891 /** 892 * skb_shared - is the buffer shared 893 * @skb: buffer to check 894 * 895 * Returns true if more than one person has a reference to this 896 * buffer. 897 */ 898 static inline int skb_shared(const struct sk_buff *skb) 899 { 900 return atomic_read(&skb->users) != 1; 901 } 902 903 /** 904 * skb_share_check - check if buffer is shared and if so clone it 905 * @skb: buffer to check 906 * @pri: priority for memory allocation 907 * 908 * If the buffer is shared the buffer is cloned and the old copy 909 * drops a reference. A new clone with a single reference is returned. 910 * If the buffer is not shared the original buffer is returned. When 911 * being called from interrupt status or with spinlocks held pri must 912 * be GFP_ATOMIC. 913 * 914 * NULL is returned on a memory allocation failure. 915 */ 916 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri) 917 { 918 might_sleep_if(pri & __GFP_WAIT); 919 if (skb_shared(skb)) { 920 struct sk_buff *nskb = skb_clone(skb, pri); 921 922 if (likely(nskb)) 923 consume_skb(skb); 924 else 925 kfree_skb(skb); 926 skb = nskb; 927 } 928 return skb; 929 } 930 931 /* 932 * Copy shared buffers into a new sk_buff. We effectively do COW on 933 * packets to handle cases where we have a local reader and forward 934 * and a couple of other messy ones. The normal one is tcpdumping 935 * a packet thats being forwarded. 936 */ 937 938 /** 939 * skb_unshare - make a copy of a shared buffer 940 * @skb: buffer to check 941 * @pri: priority for memory allocation 942 * 943 * If the socket buffer is a clone then this function creates a new 944 * copy of the data, drops a reference count on the old copy and returns 945 * the new copy with the reference count at 1. If the buffer is not a clone 946 * the original buffer is returned. When called with a spinlock held or 947 * from interrupt state @pri must be %GFP_ATOMIC 948 * 949 * %NULL is returned on a memory allocation failure. 950 */ 951 static inline struct sk_buff *skb_unshare(struct sk_buff *skb, 952 gfp_t pri) 953 { 954 might_sleep_if(pri & __GFP_WAIT); 955 if (skb_cloned(skb)) { 956 struct sk_buff *nskb = skb_copy(skb, pri); 957 kfree_skb(skb); /* Free our shared copy */ 958 skb = nskb; 959 } 960 return skb; 961 } 962 963 /** 964 * skb_peek - peek at the head of an &sk_buff_head 965 * @list_: list to peek at 966 * 967 * Peek an &sk_buff. Unlike most other operations you _MUST_ 968 * be careful with this one. A peek leaves the buffer on the 969 * list and someone else may run off with it. You must hold 970 * the appropriate locks or have a private queue to do this. 971 * 972 * Returns %NULL for an empty list or a pointer to the head element. 973 * The reference count is not incremented and the reference is therefore 974 * volatile. Use with caution. 975 */ 976 static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_) 977 { 978 struct sk_buff *skb = list_->next; 979 980 if (skb == (struct sk_buff *)list_) 981 skb = NULL; 982 return skb; 983 } 984 985 /** 986 * skb_peek_next - peek skb following the given one from a queue 987 * @skb: skb to start from 988 * @list_: list to peek at 989 * 990 * Returns %NULL when the end of the list is met or a pointer to the 991 * next element. The reference count is not incremented and the 992 * reference is therefore volatile. Use with caution. 993 */ 994 static inline struct sk_buff *skb_peek_next(struct sk_buff *skb, 995 const struct sk_buff_head *list_) 996 { 997 struct sk_buff *next = skb->next; 998 999 if (next == (struct sk_buff *)list_) 1000 next = NULL; 1001 return next; 1002 } 1003 1004 /** 1005 * skb_peek_tail - peek at the tail of an &sk_buff_head 1006 * @list_: list to peek at 1007 * 1008 * Peek an &sk_buff. Unlike most other operations you _MUST_ 1009 * be careful with this one. A peek leaves the buffer on the 1010 * list and someone else may run off with it. You must hold 1011 * the appropriate locks or have a private queue to do this. 1012 * 1013 * Returns %NULL for an empty list or a pointer to the tail element. 1014 * The reference count is not incremented and the reference is therefore 1015 * volatile. Use with caution. 1016 */ 1017 static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_) 1018 { 1019 struct sk_buff *skb = list_->prev; 1020 1021 if (skb == (struct sk_buff *)list_) 1022 skb = NULL; 1023 return skb; 1024 1025 } 1026 1027 /** 1028 * skb_queue_len - get queue length 1029 * @list_: list to measure 1030 * 1031 * Return the length of an &sk_buff queue. 1032 */ 1033 static inline __u32 skb_queue_len(const struct sk_buff_head *list_) 1034 { 1035 return list_->qlen; 1036 } 1037 1038 /** 1039 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head 1040 * @list: queue to initialize 1041 * 1042 * This initializes only the list and queue length aspects of 1043 * an sk_buff_head object. This allows to initialize the list 1044 * aspects of an sk_buff_head without reinitializing things like 1045 * the spinlock. It can also be used for on-stack sk_buff_head 1046 * objects where the spinlock is known to not be used. 1047 */ 1048 static inline void __skb_queue_head_init(struct sk_buff_head *list) 1049 { 1050 list->prev = list->next = (struct sk_buff *)list; 1051 list->qlen = 0; 1052 } 1053 1054 /* 1055 * This function creates a split out lock class for each invocation; 1056 * this is needed for now since a whole lot of users of the skb-queue 1057 * infrastructure in drivers have different locking usage (in hardirq) 1058 * than the networking core (in softirq only). In the long run either the 1059 * network layer or drivers should need annotation to consolidate the 1060 * main types of usage into 3 classes. 1061 */ 1062 static inline void skb_queue_head_init(struct sk_buff_head *list) 1063 { 1064 spin_lock_init(&list->lock); 1065 __skb_queue_head_init(list); 1066 } 1067 1068 static inline void skb_queue_head_init_class(struct sk_buff_head *list, 1069 struct lock_class_key *class) 1070 { 1071 skb_queue_head_init(list); 1072 lockdep_set_class(&list->lock, class); 1073 } 1074 1075 /* 1076 * Insert an sk_buff on a list. 1077 * 1078 * The "__skb_xxxx()" functions are the non-atomic ones that 1079 * can only be called with interrupts disabled. 1080 */ 1081 extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); 1082 static inline void __skb_insert(struct sk_buff *newsk, 1083 struct sk_buff *prev, struct sk_buff *next, 1084 struct sk_buff_head *list) 1085 { 1086 newsk->next = next; 1087 newsk->prev = prev; 1088 next->prev = prev->next = newsk; 1089 list->qlen++; 1090 } 1091 1092 static inline void __skb_queue_splice(const struct sk_buff_head *list, 1093 struct sk_buff *prev, 1094 struct sk_buff *next) 1095 { 1096 struct sk_buff *first = list->next; 1097 struct sk_buff *last = list->prev; 1098 1099 first->prev = prev; 1100 prev->next = first; 1101 1102 last->next = next; 1103 next->prev = last; 1104 } 1105 1106 /** 1107 * skb_queue_splice - join two skb lists, this is designed for stacks 1108 * @list: the new list to add 1109 * @head: the place to add it in the first list 1110 */ 1111 static inline void skb_queue_splice(const struct sk_buff_head *list, 1112 struct sk_buff_head *head) 1113 { 1114 if (!skb_queue_empty(list)) { 1115 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 1116 head->qlen += list->qlen; 1117 } 1118 } 1119 1120 /** 1121 * skb_queue_splice_init - join two skb lists and reinitialise the emptied list 1122 * @list: the new list to add 1123 * @head: the place to add it in the first list 1124 * 1125 * The list at @list is reinitialised 1126 */ 1127 static inline void skb_queue_splice_init(struct sk_buff_head *list, 1128 struct sk_buff_head *head) 1129 { 1130 if (!skb_queue_empty(list)) { 1131 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 1132 head->qlen += list->qlen; 1133 __skb_queue_head_init(list); 1134 } 1135 } 1136 1137 /** 1138 * skb_queue_splice_tail - join two skb lists, each list being a queue 1139 * @list: the new list to add 1140 * @head: the place to add it in the first list 1141 */ 1142 static inline void skb_queue_splice_tail(const struct sk_buff_head *list, 1143 struct sk_buff_head *head) 1144 { 1145 if (!skb_queue_empty(list)) { 1146 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 1147 head->qlen += list->qlen; 1148 } 1149 } 1150 1151 /** 1152 * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list 1153 * @list: the new list to add 1154 * @head: the place to add it in the first list 1155 * 1156 * Each of the lists is a queue. 1157 * The list at @list is reinitialised 1158 */ 1159 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list, 1160 struct sk_buff_head *head) 1161 { 1162 if (!skb_queue_empty(list)) { 1163 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 1164 head->qlen += list->qlen; 1165 __skb_queue_head_init(list); 1166 } 1167 } 1168 1169 /** 1170 * __skb_queue_after - queue a buffer at the list head 1171 * @list: list to use 1172 * @prev: place after this buffer 1173 * @newsk: buffer to queue 1174 * 1175 * Queue a buffer int the middle of a list. This function takes no locks 1176 * and you must therefore hold required locks before calling it. 1177 * 1178 * A buffer cannot be placed on two lists at the same time. 1179 */ 1180 static inline void __skb_queue_after(struct sk_buff_head *list, 1181 struct sk_buff *prev, 1182 struct sk_buff *newsk) 1183 { 1184 __skb_insert(newsk, prev, prev->next, list); 1185 } 1186 1187 extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, 1188 struct sk_buff_head *list); 1189 1190 static inline void __skb_queue_before(struct sk_buff_head *list, 1191 struct sk_buff *next, 1192 struct sk_buff *newsk) 1193 { 1194 __skb_insert(newsk, next->prev, next, list); 1195 } 1196 1197 /** 1198 * __skb_queue_head - queue a buffer at the list head 1199 * @list: list to use 1200 * @newsk: buffer to queue 1201 * 1202 * Queue a buffer at the start of a list. This function takes no locks 1203 * and you must therefore hold required locks before calling it. 1204 * 1205 * A buffer cannot be placed on two lists at the same time. 1206 */ 1207 extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 1208 static inline void __skb_queue_head(struct sk_buff_head *list, 1209 struct sk_buff *newsk) 1210 { 1211 __skb_queue_after(list, (struct sk_buff *)list, newsk); 1212 } 1213 1214 /** 1215 * __skb_queue_tail - queue a buffer at the list tail 1216 * @list: list to use 1217 * @newsk: buffer to queue 1218 * 1219 * Queue a buffer at the end of a list. This function takes no locks 1220 * and you must therefore hold required locks before calling it. 1221 * 1222 * A buffer cannot be placed on two lists at the same time. 1223 */ 1224 extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); 1225 static inline void __skb_queue_tail(struct sk_buff_head *list, 1226 struct sk_buff *newsk) 1227 { 1228 __skb_queue_before(list, (struct sk_buff *)list, newsk); 1229 } 1230 1231 /* 1232 * remove sk_buff from list. _Must_ be called atomically, and with 1233 * the list known.. 1234 */ 1235 extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); 1236 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 1237 { 1238 struct sk_buff *next, *prev; 1239 1240 list->qlen--; 1241 next = skb->next; 1242 prev = skb->prev; 1243 skb->next = skb->prev = NULL; 1244 next->prev = prev; 1245 prev->next = next; 1246 } 1247 1248 /** 1249 * __skb_dequeue - remove from the head of the queue 1250 * @list: list to dequeue from 1251 * 1252 * Remove the head of the list. This function does not take any locks 1253 * so must be used with appropriate locks held only. The head item is 1254 * returned or %NULL if the list is empty. 1255 */ 1256 extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); 1257 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) 1258 { 1259 struct sk_buff *skb = skb_peek(list); 1260 if (skb) 1261 __skb_unlink(skb, list); 1262 return skb; 1263 } 1264 1265 /** 1266 * __skb_dequeue_tail - remove from the tail of the queue 1267 * @list: list to dequeue from 1268 * 1269 * Remove the tail of the list. This function does not take any locks 1270 * so must be used with appropriate locks held only. The tail item is 1271 * returned or %NULL if the list is empty. 1272 */ 1273 extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); 1274 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) 1275 { 1276 struct sk_buff *skb = skb_peek_tail(list); 1277 if (skb) 1278 __skb_unlink(skb, list); 1279 return skb; 1280 } 1281 1282 1283 static inline bool skb_is_nonlinear(const struct sk_buff *skb) 1284 { 1285 return skb->data_len; 1286 } 1287 1288 static inline unsigned int skb_headlen(const struct sk_buff *skb) 1289 { 1290 return skb->len - skb->data_len; 1291 } 1292 1293 static inline int skb_pagelen(const struct sk_buff *skb) 1294 { 1295 int i, len = 0; 1296 1297 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) 1298 len += skb_frag_size(&skb_shinfo(skb)->frags[i]); 1299 return len + skb_headlen(skb); 1300 } 1301 1302 static inline bool skb_has_frags(const struct sk_buff *skb) 1303 { 1304 return skb_shinfo(skb)->nr_frags; 1305 } 1306 1307 /** 1308 * __skb_fill_page_desc - initialise a paged fragment in an skb 1309 * @skb: buffer containing fragment to be initialised 1310 * @i: paged fragment index to initialise 1311 * @page: the page to use for this fragment 1312 * @off: the offset to the data with @page 1313 * @size: the length of the data 1314 * 1315 * Initialises the @i'th fragment of @skb to point to &size bytes at 1316 * offset @off within @page. 1317 * 1318 * Does not take any additional reference on the fragment. 1319 */ 1320 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, 1321 struct page *page, int off, int size) 1322 { 1323 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1324 1325 /* 1326 * Propagate page->pfmemalloc to the skb if we can. The problem is 1327 * that not all callers have unique ownership of the page. If 1328 * pfmemalloc is set, we check the mapping as a mapping implies 1329 * page->index is set (index and pfmemalloc share space). 1330 * If it's a valid mapping, we cannot use page->pfmemalloc but we 1331 * do not lose pfmemalloc information as the pages would not be 1332 * allocated using __GFP_MEMALLOC. 1333 */ 1334 frag->page.p = page; 1335 frag->page_offset = off; 1336 skb_frag_size_set(frag, size); 1337 1338 page = compound_head(page); 1339 if (page->pfmemalloc && !page->mapping) 1340 skb->pfmemalloc = true; 1341 } 1342 1343 /** 1344 * skb_fill_page_desc - initialise a paged fragment in an skb 1345 * @skb: buffer containing fragment to be initialised 1346 * @i: paged fragment index to initialise 1347 * @page: the page to use for this fragment 1348 * @off: the offset to the data with @page 1349 * @size: the length of the data 1350 * 1351 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of 1352 * @skb to point to &size bytes at offset @off within @page. In 1353 * addition updates @skb such that @i is the last fragment. 1354 * 1355 * Does not take any additional reference on the fragment. 1356 */ 1357 static inline void skb_fill_page_desc(struct sk_buff *skb, int i, 1358 struct page *page, int off, int size) 1359 { 1360 __skb_fill_page_desc(skb, i, page, off, size); 1361 skb_shinfo(skb)->nr_frags = i + 1; 1362 } 1363 1364 extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, 1365 int off, int size, unsigned int truesize); 1366 1367 #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) 1368 #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb)) 1369 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) 1370 1371 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1372 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 1373 { 1374 return skb->head + skb->tail; 1375 } 1376 1377 static inline void skb_reset_tail_pointer(struct sk_buff *skb) 1378 { 1379 skb->tail = skb->data - skb->head; 1380 } 1381 1382 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 1383 { 1384 skb_reset_tail_pointer(skb); 1385 skb->tail += offset; 1386 } 1387 #else /* NET_SKBUFF_DATA_USES_OFFSET */ 1388 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 1389 { 1390 return skb->tail; 1391 } 1392 1393 static inline void skb_reset_tail_pointer(struct sk_buff *skb) 1394 { 1395 skb->tail = skb->data; 1396 } 1397 1398 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 1399 { 1400 skb->tail = skb->data + offset; 1401 } 1402 1403 #endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1404 1405 /* 1406 * Add data to an sk_buff 1407 */ 1408 extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len); 1409 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) 1410 { 1411 unsigned char *tmp = skb_tail_pointer(skb); 1412 SKB_LINEAR_ASSERT(skb); 1413 skb->tail += len; 1414 skb->len += len; 1415 return tmp; 1416 } 1417 1418 extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len); 1419 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) 1420 { 1421 skb->data -= len; 1422 skb->len += len; 1423 return skb->data; 1424 } 1425 1426 extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); 1427 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) 1428 { 1429 skb->len -= len; 1430 BUG_ON(skb->len < skb->data_len); 1431 return skb->data += len; 1432 } 1433 1434 static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len) 1435 { 1436 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); 1437 } 1438 1439 extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); 1440 1441 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) 1442 { 1443 if (len > skb_headlen(skb) && 1444 !__pskb_pull_tail(skb, len - skb_headlen(skb))) 1445 return NULL; 1446 skb->len -= len; 1447 return skb->data += len; 1448 } 1449 1450 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) 1451 { 1452 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); 1453 } 1454 1455 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) 1456 { 1457 if (likely(len <= skb_headlen(skb))) 1458 return 1; 1459 if (unlikely(len > skb->len)) 1460 return 0; 1461 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; 1462 } 1463 1464 /** 1465 * skb_headroom - bytes at buffer head 1466 * @skb: buffer to check 1467 * 1468 * Return the number of bytes of free space at the head of an &sk_buff. 1469 */ 1470 static inline unsigned int skb_headroom(const struct sk_buff *skb) 1471 { 1472 return skb->data - skb->head; 1473 } 1474 1475 /** 1476 * skb_tailroom - bytes at buffer end 1477 * @skb: buffer to check 1478 * 1479 * Return the number of bytes of free space at the tail of an sk_buff 1480 */ 1481 static inline int skb_tailroom(const struct sk_buff *skb) 1482 { 1483 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; 1484 } 1485 1486 /** 1487 * skb_availroom - bytes at buffer end 1488 * @skb: buffer to check 1489 * 1490 * Return the number of bytes of free space at the tail of an sk_buff 1491 * allocated by sk_stream_alloc() 1492 */ 1493 static inline int skb_availroom(const struct sk_buff *skb) 1494 { 1495 if (skb_is_nonlinear(skb)) 1496 return 0; 1497 1498 return skb->end - skb->tail - skb->reserved_tailroom; 1499 } 1500 1501 /** 1502 * skb_reserve - adjust headroom 1503 * @skb: buffer to alter 1504 * @len: bytes to move 1505 * 1506 * Increase the headroom of an empty &sk_buff by reducing the tail 1507 * room. This is only allowed for an empty buffer. 1508 */ 1509 static inline void skb_reserve(struct sk_buff *skb, int len) 1510 { 1511 skb->data += len; 1512 skb->tail += len; 1513 } 1514 1515 static inline void skb_reset_inner_headers(struct sk_buff *skb) 1516 { 1517 skb->inner_mac_header = skb->mac_header; 1518 skb->inner_network_header = skb->network_header; 1519 skb->inner_transport_header = skb->transport_header; 1520 } 1521 1522 static inline void skb_reset_mac_len(struct sk_buff *skb) 1523 { 1524 skb->mac_len = skb->network_header - skb->mac_header; 1525 } 1526 1527 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1528 static inline unsigned char *skb_inner_transport_header(const struct sk_buff 1529 *skb) 1530 { 1531 return skb->head + skb->inner_transport_header; 1532 } 1533 1534 static inline void skb_reset_inner_transport_header(struct sk_buff *skb) 1535 { 1536 skb->inner_transport_header = skb->data - skb->head; 1537 } 1538 1539 static inline void skb_set_inner_transport_header(struct sk_buff *skb, 1540 const int offset) 1541 { 1542 skb_reset_inner_transport_header(skb); 1543 skb->inner_transport_header += offset; 1544 } 1545 1546 static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb) 1547 { 1548 return skb->head + skb->inner_network_header; 1549 } 1550 1551 static inline void skb_reset_inner_network_header(struct sk_buff *skb) 1552 { 1553 skb->inner_network_header = skb->data - skb->head; 1554 } 1555 1556 static inline void skb_set_inner_network_header(struct sk_buff *skb, 1557 const int offset) 1558 { 1559 skb_reset_inner_network_header(skb); 1560 skb->inner_network_header += offset; 1561 } 1562 1563 static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb) 1564 { 1565 return skb->head + skb->inner_mac_header; 1566 } 1567 1568 static inline void skb_reset_inner_mac_header(struct sk_buff *skb) 1569 { 1570 skb->inner_mac_header = skb->data - skb->head; 1571 } 1572 1573 static inline void skb_set_inner_mac_header(struct sk_buff *skb, 1574 const int offset) 1575 { 1576 skb_reset_inner_mac_header(skb); 1577 skb->inner_mac_header += offset; 1578 } 1579 static inline bool skb_transport_header_was_set(const struct sk_buff *skb) 1580 { 1581 return skb->transport_header != ~0U; 1582 } 1583 1584 static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1585 { 1586 return skb->head + skb->transport_header; 1587 } 1588 1589 static inline void skb_reset_transport_header(struct sk_buff *skb) 1590 { 1591 skb->transport_header = skb->data - skb->head; 1592 } 1593 1594 static inline void skb_set_transport_header(struct sk_buff *skb, 1595 const int offset) 1596 { 1597 skb_reset_transport_header(skb); 1598 skb->transport_header += offset; 1599 } 1600 1601 static inline unsigned char *skb_network_header(const struct sk_buff *skb) 1602 { 1603 return skb->head + skb->network_header; 1604 } 1605 1606 static inline void skb_reset_network_header(struct sk_buff *skb) 1607 { 1608 skb->network_header = skb->data - skb->head; 1609 } 1610 1611 static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 1612 { 1613 skb_reset_network_header(skb); 1614 skb->network_header += offset; 1615 } 1616 1617 static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 1618 { 1619 return skb->head + skb->mac_header; 1620 } 1621 1622 static inline int skb_mac_header_was_set(const struct sk_buff *skb) 1623 { 1624 return skb->mac_header != ~0U; 1625 } 1626 1627 static inline void skb_reset_mac_header(struct sk_buff *skb) 1628 { 1629 skb->mac_header = skb->data - skb->head; 1630 } 1631 1632 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 1633 { 1634 skb_reset_mac_header(skb); 1635 skb->mac_header += offset; 1636 } 1637 1638 #else /* NET_SKBUFF_DATA_USES_OFFSET */ 1639 static inline unsigned char *skb_inner_transport_header(const struct sk_buff 1640 *skb) 1641 { 1642 return skb->inner_transport_header; 1643 } 1644 1645 static inline void skb_reset_inner_transport_header(struct sk_buff *skb) 1646 { 1647 skb->inner_transport_header = skb->data; 1648 } 1649 1650 static inline void skb_set_inner_transport_header(struct sk_buff *skb, 1651 const int offset) 1652 { 1653 skb->inner_transport_header = skb->data + offset; 1654 } 1655 1656 static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb) 1657 { 1658 return skb->inner_network_header; 1659 } 1660 1661 static inline void skb_reset_inner_network_header(struct sk_buff *skb) 1662 { 1663 skb->inner_network_header = skb->data; 1664 } 1665 1666 static inline void skb_set_inner_network_header(struct sk_buff *skb, 1667 const int offset) 1668 { 1669 skb->inner_network_header = skb->data + offset; 1670 } 1671 1672 static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb) 1673 { 1674 return skb->inner_mac_header; 1675 } 1676 1677 static inline void skb_reset_inner_mac_header(struct sk_buff *skb) 1678 { 1679 skb->inner_mac_header = skb->data; 1680 } 1681 1682 static inline void skb_set_inner_mac_header(struct sk_buff *skb, 1683 const int offset) 1684 { 1685 skb->inner_mac_header = skb->data + offset; 1686 } 1687 static inline bool skb_transport_header_was_set(const struct sk_buff *skb) 1688 { 1689 return skb->transport_header != NULL; 1690 } 1691 1692 static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1693 { 1694 return skb->transport_header; 1695 } 1696 1697 static inline void skb_reset_transport_header(struct sk_buff *skb) 1698 { 1699 skb->transport_header = skb->data; 1700 } 1701 1702 static inline void skb_set_transport_header(struct sk_buff *skb, 1703 const int offset) 1704 { 1705 skb->transport_header = skb->data + offset; 1706 } 1707 1708 static inline unsigned char *skb_network_header(const struct sk_buff *skb) 1709 { 1710 return skb->network_header; 1711 } 1712 1713 static inline void skb_reset_network_header(struct sk_buff *skb) 1714 { 1715 skb->network_header = skb->data; 1716 } 1717 1718 static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 1719 { 1720 skb->network_header = skb->data + offset; 1721 } 1722 1723 static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 1724 { 1725 return skb->mac_header; 1726 } 1727 1728 static inline int skb_mac_header_was_set(const struct sk_buff *skb) 1729 { 1730 return skb->mac_header != NULL; 1731 } 1732 1733 static inline void skb_reset_mac_header(struct sk_buff *skb) 1734 { 1735 skb->mac_header = skb->data; 1736 } 1737 1738 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 1739 { 1740 skb->mac_header = skb->data + offset; 1741 } 1742 #endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1743 1744 static inline void skb_pop_mac_header(struct sk_buff *skb) 1745 { 1746 skb->mac_header = skb->network_header; 1747 } 1748 1749 static inline void skb_probe_transport_header(struct sk_buff *skb, 1750 const int offset_hint) 1751 { 1752 struct flow_keys keys; 1753 1754 if (skb_transport_header_was_set(skb)) 1755 return; 1756 else if (skb_flow_dissect(skb, &keys)) 1757 skb_set_transport_header(skb, keys.thoff); 1758 else 1759 skb_set_transport_header(skb, offset_hint); 1760 } 1761 1762 static inline void skb_mac_header_rebuild(struct sk_buff *skb) 1763 { 1764 if (skb_mac_header_was_set(skb)) { 1765 const unsigned char *old_mac = skb_mac_header(skb); 1766 1767 skb_set_mac_header(skb, -skb->mac_len); 1768 memmove(skb_mac_header(skb), old_mac, skb->mac_len); 1769 } 1770 } 1771 1772 static inline int skb_checksum_start_offset(const struct sk_buff *skb) 1773 { 1774 return skb->csum_start - skb_headroom(skb); 1775 } 1776 1777 static inline int skb_transport_offset(const struct sk_buff *skb) 1778 { 1779 return skb_transport_header(skb) - skb->data; 1780 } 1781 1782 static inline u32 skb_network_header_len(const struct sk_buff *skb) 1783 { 1784 return skb->transport_header - skb->network_header; 1785 } 1786 1787 static inline u32 skb_inner_network_header_len(const struct sk_buff *skb) 1788 { 1789 return skb->inner_transport_header - skb->inner_network_header; 1790 } 1791 1792 static inline int skb_network_offset(const struct sk_buff *skb) 1793 { 1794 return skb_network_header(skb) - skb->data; 1795 } 1796 1797 static inline int skb_inner_network_offset(const struct sk_buff *skb) 1798 { 1799 return skb_inner_network_header(skb) - skb->data; 1800 } 1801 1802 static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) 1803 { 1804 return pskb_may_pull(skb, skb_network_offset(skb) + len); 1805 } 1806 1807 /* 1808 * CPUs often take a performance hit when accessing unaligned memory 1809 * locations. The actual performance hit varies, it can be small if the 1810 * hardware handles it or large if we have to take an exception and fix it 1811 * in software. 1812 * 1813 * Since an ethernet header is 14 bytes network drivers often end up with 1814 * the IP header at an unaligned offset. The IP header can be aligned by 1815 * shifting the start of the packet by 2 bytes. Drivers should do this 1816 * with: 1817 * 1818 * skb_reserve(skb, NET_IP_ALIGN); 1819 * 1820 * The downside to this alignment of the IP header is that the DMA is now 1821 * unaligned. On some architectures the cost of an unaligned DMA is high 1822 * and this cost outweighs the gains made by aligning the IP header. 1823 * 1824 * Since this trade off varies between architectures, we allow NET_IP_ALIGN 1825 * to be overridden. 1826 */ 1827 #ifndef NET_IP_ALIGN 1828 #define NET_IP_ALIGN 2 1829 #endif 1830 1831 /* 1832 * The networking layer reserves some headroom in skb data (via 1833 * dev_alloc_skb). This is used to avoid having to reallocate skb data when 1834 * the header has to grow. In the default case, if the header has to grow 1835 * 32 bytes or less we avoid the reallocation. 1836 * 1837 * Unfortunately this headroom changes the DMA alignment of the resulting 1838 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive 1839 * on some architectures. An architecture can override this value, 1840 * perhaps setting it to a cacheline in size (since that will maintain 1841 * cacheline alignment of the DMA). It must be a power of 2. 1842 * 1843 * Various parts of the networking layer expect at least 32 bytes of 1844 * headroom, you should not reduce this. 1845 * 1846 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) 1847 * to reduce average number of cache lines per packet. 1848 * get_rps_cpus() for example only access one 64 bytes aligned block : 1849 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) 1850 */ 1851 #ifndef NET_SKB_PAD 1852 #define NET_SKB_PAD max(32, L1_CACHE_BYTES) 1853 #endif 1854 1855 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); 1856 1857 static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 1858 { 1859 if (unlikely(skb_is_nonlinear(skb))) { 1860 WARN_ON(1); 1861 return; 1862 } 1863 skb->len = len; 1864 skb_set_tail_pointer(skb, len); 1865 } 1866 1867 extern void skb_trim(struct sk_buff *skb, unsigned int len); 1868 1869 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) 1870 { 1871 if (skb->data_len) 1872 return ___pskb_trim(skb, len); 1873 __skb_trim(skb, len); 1874 return 0; 1875 } 1876 1877 static inline int pskb_trim(struct sk_buff *skb, unsigned int len) 1878 { 1879 return (len < skb->len) ? __pskb_trim(skb, len) : 0; 1880 } 1881 1882 /** 1883 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer 1884 * @skb: buffer to alter 1885 * @len: new length 1886 * 1887 * This is identical to pskb_trim except that the caller knows that 1888 * the skb is not cloned so we should never get an error due to out- 1889 * of-memory. 1890 */ 1891 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) 1892 { 1893 int err = pskb_trim(skb, len); 1894 BUG_ON(err); 1895 } 1896 1897 /** 1898 * skb_orphan - orphan a buffer 1899 * @skb: buffer to orphan 1900 * 1901 * If a buffer currently has an owner then we call the owner's 1902 * destructor function and make the @skb unowned. The buffer continues 1903 * to exist but is no longer charged to its former owner. 1904 */ 1905 static inline void skb_orphan(struct sk_buff *skb) 1906 { 1907 if (skb->destructor) 1908 skb->destructor(skb); 1909 skb->destructor = NULL; 1910 skb->sk = NULL; 1911 } 1912 1913 /** 1914 * skb_orphan_frags - orphan the frags contained in a buffer 1915 * @skb: buffer to orphan frags from 1916 * @gfp_mask: allocation mask for replacement pages 1917 * 1918 * For each frag in the SKB which needs a destructor (i.e. has an 1919 * owner) create a copy of that frag and release the original 1920 * page by calling the destructor. 1921 */ 1922 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask) 1923 { 1924 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY))) 1925 return 0; 1926 return skb_copy_ubufs(skb, gfp_mask); 1927 } 1928 1929 /** 1930 * __skb_queue_purge - empty a list 1931 * @list: list to empty 1932 * 1933 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1934 * the list and one reference dropped. This function does not take the 1935 * list lock and the caller must hold the relevant locks to use it. 1936 */ 1937 extern void skb_queue_purge(struct sk_buff_head *list); 1938 static inline void __skb_queue_purge(struct sk_buff_head *list) 1939 { 1940 struct sk_buff *skb; 1941 while ((skb = __skb_dequeue(list)) != NULL) 1942 kfree_skb(skb); 1943 } 1944 1945 #define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768) 1946 #define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER) 1947 #define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE 1948 1949 extern void *netdev_alloc_frag(unsigned int fragsz); 1950 1951 extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 1952 unsigned int length, 1953 gfp_t gfp_mask); 1954 1955 /** 1956 * netdev_alloc_skb - allocate an skbuff for rx on a specific device 1957 * @dev: network device to receive on 1958 * @length: length to allocate 1959 * 1960 * Allocate a new &sk_buff and assign it a usage count of one. The 1961 * buffer has unspecified headroom built in. Users should allocate 1962 * the headroom they think they need without accounting for the 1963 * built in space. The built in space is used for optimisations. 1964 * 1965 * %NULL is returned if there is no free memory. Although this function 1966 * allocates memory it can be called from an interrupt. 1967 */ 1968 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, 1969 unsigned int length) 1970 { 1971 return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 1972 } 1973 1974 /* legacy helper around __netdev_alloc_skb() */ 1975 static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1976 gfp_t gfp_mask) 1977 { 1978 return __netdev_alloc_skb(NULL, length, gfp_mask); 1979 } 1980 1981 /* legacy helper around netdev_alloc_skb() */ 1982 static inline struct sk_buff *dev_alloc_skb(unsigned int length) 1983 { 1984 return netdev_alloc_skb(NULL, length); 1985 } 1986 1987 1988 static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, 1989 unsigned int length, gfp_t gfp) 1990 { 1991 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp); 1992 1993 if (NET_IP_ALIGN && skb) 1994 skb_reserve(skb, NET_IP_ALIGN); 1995 return skb; 1996 } 1997 1998 static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, 1999 unsigned int length) 2000 { 2001 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); 2002 } 2003 2004 /* 2005 * __skb_alloc_page - allocate pages for ps-rx on a skb and preserve pfmemalloc data 2006 * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX 2007 * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used 2008 * @order: size of the allocation 2009 * 2010 * Allocate a new page. 2011 * 2012 * %NULL is returned if there is no free memory. 2013 */ 2014 static inline struct page *__skb_alloc_pages(gfp_t gfp_mask, 2015 struct sk_buff *skb, 2016 unsigned int order) 2017 { 2018 struct page *page; 2019 2020 gfp_mask |= __GFP_COLD; 2021 2022 if (!(gfp_mask & __GFP_NOMEMALLOC)) 2023 gfp_mask |= __GFP_MEMALLOC; 2024 2025 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); 2026 if (skb && page && page->pfmemalloc) 2027 skb->pfmemalloc = true; 2028 2029 return page; 2030 } 2031 2032 /** 2033 * __skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data 2034 * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX 2035 * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used 2036 * 2037 * Allocate a new page. 2038 * 2039 * %NULL is returned if there is no free memory. 2040 */ 2041 static inline struct page *__skb_alloc_page(gfp_t gfp_mask, 2042 struct sk_buff *skb) 2043 { 2044 return __skb_alloc_pages(gfp_mask, skb, 0); 2045 } 2046 2047 /** 2048 * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page 2049 * @page: The page that was allocated from skb_alloc_page 2050 * @skb: The skb that may need pfmemalloc set 2051 */ 2052 static inline void skb_propagate_pfmemalloc(struct page *page, 2053 struct sk_buff *skb) 2054 { 2055 if (page && page->pfmemalloc) 2056 skb->pfmemalloc = true; 2057 } 2058 2059 /** 2060 * skb_frag_page - retrieve the page refered to by a paged fragment 2061 * @frag: the paged fragment 2062 * 2063 * Returns the &struct page associated with @frag. 2064 */ 2065 static inline struct page *skb_frag_page(const skb_frag_t *frag) 2066 { 2067 return frag->page.p; 2068 } 2069 2070 /** 2071 * __skb_frag_ref - take an addition reference on a paged fragment. 2072 * @frag: the paged fragment 2073 * 2074 * Takes an additional reference on the paged fragment @frag. 2075 */ 2076 static inline void __skb_frag_ref(skb_frag_t *frag) 2077 { 2078 get_page(skb_frag_page(frag)); 2079 } 2080 2081 /** 2082 * skb_frag_ref - take an addition reference on a paged fragment of an skb. 2083 * @skb: the buffer 2084 * @f: the fragment offset. 2085 * 2086 * Takes an additional reference on the @f'th paged fragment of @skb. 2087 */ 2088 static inline void skb_frag_ref(struct sk_buff *skb, int f) 2089 { 2090 __skb_frag_ref(&skb_shinfo(skb)->frags[f]); 2091 } 2092 2093 /** 2094 * __skb_frag_unref - release a reference on a paged fragment. 2095 * @frag: the paged fragment 2096 * 2097 * Releases a reference on the paged fragment @frag. 2098 */ 2099 static inline void __skb_frag_unref(skb_frag_t *frag) 2100 { 2101 put_page(skb_frag_page(frag)); 2102 } 2103 2104 /** 2105 * skb_frag_unref - release a reference on a paged fragment of an skb. 2106 * @skb: the buffer 2107 * @f: the fragment offset 2108 * 2109 * Releases a reference on the @f'th paged fragment of @skb. 2110 */ 2111 static inline void skb_frag_unref(struct sk_buff *skb, int f) 2112 { 2113 __skb_frag_unref(&skb_shinfo(skb)->frags[f]); 2114 } 2115 2116 /** 2117 * skb_frag_address - gets the address of the data contained in a paged fragment 2118 * @frag: the paged fragment buffer 2119 * 2120 * Returns the address of the data within @frag. The page must already 2121 * be mapped. 2122 */ 2123 static inline void *skb_frag_address(const skb_frag_t *frag) 2124 { 2125 return page_address(skb_frag_page(frag)) + frag->page_offset; 2126 } 2127 2128 /** 2129 * skb_frag_address_safe - gets the address of the data contained in a paged fragment 2130 * @frag: the paged fragment buffer 2131 * 2132 * Returns the address of the data within @frag. Checks that the page 2133 * is mapped and returns %NULL otherwise. 2134 */ 2135 static inline void *skb_frag_address_safe(const skb_frag_t *frag) 2136 { 2137 void *ptr = page_address(skb_frag_page(frag)); 2138 if (unlikely(!ptr)) 2139 return NULL; 2140 2141 return ptr + frag->page_offset; 2142 } 2143 2144 /** 2145 * __skb_frag_set_page - sets the page contained in a paged fragment 2146 * @frag: the paged fragment 2147 * @page: the page to set 2148 * 2149 * Sets the fragment @frag to contain @page. 2150 */ 2151 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) 2152 { 2153 frag->page.p = page; 2154 } 2155 2156 /** 2157 * skb_frag_set_page - sets the page contained in a paged fragment of an skb 2158 * @skb: the buffer 2159 * @f: the fragment offset 2160 * @page: the page to set 2161 * 2162 * Sets the @f'th fragment of @skb to contain @page. 2163 */ 2164 static inline void skb_frag_set_page(struct sk_buff *skb, int f, 2165 struct page *page) 2166 { 2167 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page); 2168 } 2169 2170 /** 2171 * skb_frag_dma_map - maps a paged fragment via the DMA API 2172 * @dev: the device to map the fragment to 2173 * @frag: the paged fragment to map 2174 * @offset: the offset within the fragment (starting at the 2175 * fragment's own offset) 2176 * @size: the number of bytes to map 2177 * @dir: the direction of the mapping (%PCI_DMA_*) 2178 * 2179 * Maps the page associated with @frag to @device. 2180 */ 2181 static inline dma_addr_t skb_frag_dma_map(struct device *dev, 2182 const skb_frag_t *frag, 2183 size_t offset, size_t size, 2184 enum dma_data_direction dir) 2185 { 2186 return dma_map_page(dev, skb_frag_page(frag), 2187 frag->page_offset + offset, size, dir); 2188 } 2189 2190 static inline struct sk_buff *pskb_copy(struct sk_buff *skb, 2191 gfp_t gfp_mask) 2192 { 2193 return __pskb_copy(skb, skb_headroom(skb), gfp_mask); 2194 } 2195 2196 /** 2197 * skb_clone_writable - is the header of a clone writable 2198 * @skb: buffer to check 2199 * @len: length up to which to write 2200 * 2201 * Returns true if modifying the header part of the cloned buffer 2202 * does not requires the data to be copied. 2203 */ 2204 static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len) 2205 { 2206 return !skb_header_cloned(skb) && 2207 skb_headroom(skb) + len <= skb->hdr_len; 2208 } 2209 2210 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, 2211 int cloned) 2212 { 2213 int delta = 0; 2214 2215 if (headroom > skb_headroom(skb)) 2216 delta = headroom - skb_headroom(skb); 2217 2218 if (delta || cloned) 2219 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, 2220 GFP_ATOMIC); 2221 return 0; 2222 } 2223 2224 /** 2225 * skb_cow - copy header of skb when it is required 2226 * @skb: buffer to cow 2227 * @headroom: needed headroom 2228 * 2229 * If the skb passed lacks sufficient headroom or its data part 2230 * is shared, data is reallocated. If reallocation fails, an error 2231 * is returned and original skb is not changed. 2232 * 2233 * The result is skb with writable area skb->head...skb->tail 2234 * and at least @headroom of space at head. 2235 */ 2236 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) 2237 { 2238 return __skb_cow(skb, headroom, skb_cloned(skb)); 2239 } 2240 2241 /** 2242 * skb_cow_head - skb_cow but only making the head writable 2243 * @skb: buffer to cow 2244 * @headroom: needed headroom 2245 * 2246 * This function is identical to skb_cow except that we replace the 2247 * skb_cloned check by skb_header_cloned. It should be used when 2248 * you only need to push on some header and do not need to modify 2249 * the data. 2250 */ 2251 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) 2252 { 2253 return __skb_cow(skb, headroom, skb_header_cloned(skb)); 2254 } 2255 2256 /** 2257 * skb_padto - pad an skbuff up to a minimal size 2258 * @skb: buffer to pad 2259 * @len: minimal length 2260 * 2261 * Pads up a buffer to ensure the trailing bytes exist and are 2262 * blanked. If the buffer already contains sufficient data it 2263 * is untouched. Otherwise it is extended. Returns zero on 2264 * success. The skb is freed on error. 2265 */ 2266 2267 static inline int skb_padto(struct sk_buff *skb, unsigned int len) 2268 { 2269 unsigned int size = skb->len; 2270 if (likely(size >= len)) 2271 return 0; 2272 return skb_pad(skb, len - size); 2273 } 2274 2275 static inline int skb_add_data(struct sk_buff *skb, 2276 char __user *from, int copy) 2277 { 2278 const int off = skb->len; 2279 2280 if (skb->ip_summed == CHECKSUM_NONE) { 2281 int err = 0; 2282 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy), 2283 copy, 0, &err); 2284 if (!err) { 2285 skb->csum = csum_block_add(skb->csum, csum, off); 2286 return 0; 2287 } 2288 } else if (!copy_from_user(skb_put(skb, copy), from, copy)) 2289 return 0; 2290 2291 __skb_trim(skb, off); 2292 return -EFAULT; 2293 } 2294 2295 static inline bool skb_can_coalesce(struct sk_buff *skb, int i, 2296 const struct page *page, int off) 2297 { 2298 if (i) { 2299 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 2300 2301 return page == skb_frag_page(frag) && 2302 off == frag->page_offset + skb_frag_size(frag); 2303 } 2304 return false; 2305 } 2306 2307 static inline int __skb_linearize(struct sk_buff *skb) 2308 { 2309 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; 2310 } 2311 2312 /** 2313 * skb_linearize - convert paged skb to linear one 2314 * @skb: buffer to linarize 2315 * 2316 * If there is no free memory -ENOMEM is returned, otherwise zero 2317 * is returned and the old skb data released. 2318 */ 2319 static inline int skb_linearize(struct sk_buff *skb) 2320 { 2321 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; 2322 } 2323 2324 /** 2325 * skb_has_shared_frag - can any frag be overwritten 2326 * @skb: buffer to test 2327 * 2328 * Return true if the skb has at least one frag that might be modified 2329 * by an external entity (as in vmsplice()/sendfile()) 2330 */ 2331 static inline bool skb_has_shared_frag(const struct sk_buff *skb) 2332 { 2333 return skb_is_nonlinear(skb) && 2334 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; 2335 } 2336 2337 /** 2338 * skb_linearize_cow - make sure skb is linear and writable 2339 * @skb: buffer to process 2340 * 2341 * If there is no free memory -ENOMEM is returned, otherwise zero 2342 * is returned and the old skb data released. 2343 */ 2344 static inline int skb_linearize_cow(struct sk_buff *skb) 2345 { 2346 return skb_is_nonlinear(skb) || skb_cloned(skb) ? 2347 __skb_linearize(skb) : 0; 2348 } 2349 2350 /** 2351 * skb_postpull_rcsum - update checksum for received skb after pull 2352 * @skb: buffer to update 2353 * @start: start of data before pull 2354 * @len: length of data pulled 2355 * 2356 * After doing a pull on a received packet, you need to call this to 2357 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to 2358 * CHECKSUM_NONE so that it can be recomputed from scratch. 2359 */ 2360 2361 static inline void skb_postpull_rcsum(struct sk_buff *skb, 2362 const void *start, unsigned int len) 2363 { 2364 if (skb->ip_summed == CHECKSUM_COMPLETE) 2365 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); 2366 else if (skb->ip_summed == CHECKSUM_PARTIAL && 2367 skb_checksum_start_offset(skb) < 0) 2368 skb->ip_summed = CHECKSUM_NONE; 2369 } 2370 2371 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 2372 2373 /** 2374 * pskb_trim_rcsum - trim received skb and update checksum 2375 * @skb: buffer to trim 2376 * @len: new length 2377 * 2378 * This is exactly the same as pskb_trim except that it ensures the 2379 * checksum of received packets are still valid after the operation. 2380 */ 2381 2382 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) 2383 { 2384 if (likely(len >= skb->len)) 2385 return 0; 2386 if (skb->ip_summed == CHECKSUM_COMPLETE) 2387 skb->ip_summed = CHECKSUM_NONE; 2388 return __pskb_trim(skb, len); 2389 } 2390 2391 #define skb_queue_walk(queue, skb) \ 2392 for (skb = (queue)->next; \ 2393 skb != (struct sk_buff *)(queue); \ 2394 skb = skb->next) 2395 2396 #define skb_queue_walk_safe(queue, skb, tmp) \ 2397 for (skb = (queue)->next, tmp = skb->next; \ 2398 skb != (struct sk_buff *)(queue); \ 2399 skb = tmp, tmp = skb->next) 2400 2401 #define skb_queue_walk_from(queue, skb) \ 2402 for (; skb != (struct sk_buff *)(queue); \ 2403 skb = skb->next) 2404 2405 #define skb_queue_walk_from_safe(queue, skb, tmp) \ 2406 for (tmp = skb->next; \ 2407 skb != (struct sk_buff *)(queue); \ 2408 skb = tmp, tmp = skb->next) 2409 2410 #define skb_queue_reverse_walk(queue, skb) \ 2411 for (skb = (queue)->prev; \ 2412 skb != (struct sk_buff *)(queue); \ 2413 skb = skb->prev) 2414 2415 #define skb_queue_reverse_walk_safe(queue, skb, tmp) \ 2416 for (skb = (queue)->prev, tmp = skb->prev; \ 2417 skb != (struct sk_buff *)(queue); \ 2418 skb = tmp, tmp = skb->prev) 2419 2420 #define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \ 2421 for (tmp = skb->prev; \ 2422 skb != (struct sk_buff *)(queue); \ 2423 skb = tmp, tmp = skb->prev) 2424 2425 static inline bool skb_has_frag_list(const struct sk_buff *skb) 2426 { 2427 return skb_shinfo(skb)->frag_list != NULL; 2428 } 2429 2430 static inline void skb_frag_list_init(struct sk_buff *skb) 2431 { 2432 skb_shinfo(skb)->frag_list = NULL; 2433 } 2434 2435 static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag) 2436 { 2437 frag->next = skb_shinfo(skb)->frag_list; 2438 skb_shinfo(skb)->frag_list = frag; 2439 } 2440 2441 #define skb_walk_frags(skb, iter) \ 2442 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) 2443 2444 extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, 2445 int *peeked, int *off, int *err); 2446 extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, 2447 int noblock, int *err); 2448 extern unsigned int datagram_poll(struct file *file, struct socket *sock, 2449 struct poll_table_struct *wait); 2450 extern int skb_copy_datagram_iovec(const struct sk_buff *from, 2451 int offset, struct iovec *to, 2452 int size); 2453 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, 2454 int hlen, 2455 struct iovec *iov); 2456 extern int skb_copy_datagram_from_iovec(struct sk_buff *skb, 2457 int offset, 2458 const struct iovec *from, 2459 int from_offset, 2460 int len); 2461 extern int skb_copy_datagram_const_iovec(const struct sk_buff *from, 2462 int offset, 2463 const struct iovec *to, 2464 int to_offset, 2465 int size); 2466 extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 2467 extern void skb_free_datagram_locked(struct sock *sk, 2468 struct sk_buff *skb); 2469 extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, 2470 unsigned int flags); 2471 extern __wsum skb_checksum(const struct sk_buff *skb, int offset, 2472 int len, __wsum csum); 2473 extern int skb_copy_bits(const struct sk_buff *skb, int offset, 2474 void *to, int len); 2475 extern int skb_store_bits(struct sk_buff *skb, int offset, 2476 const void *from, int len); 2477 extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, 2478 int offset, u8 *to, int len, 2479 __wsum csum); 2480 extern int skb_splice_bits(struct sk_buff *skb, 2481 unsigned int offset, 2482 struct pipe_inode_info *pipe, 2483 unsigned int len, 2484 unsigned int flags); 2485 extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 2486 extern void skb_split(struct sk_buff *skb, 2487 struct sk_buff *skb1, const u32 len); 2488 extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, 2489 int shiftlen); 2490 2491 extern struct sk_buff *skb_segment(struct sk_buff *skb, 2492 netdev_features_t features); 2493 2494 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); 2495 2496 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 2497 int len, void *buffer) 2498 { 2499 int hlen = skb_headlen(skb); 2500 2501 if (hlen - offset >= len) 2502 return skb->data + offset; 2503 2504 if (skb_copy_bits(skb, offset, buffer, len) < 0) 2505 return NULL; 2506 2507 return buffer; 2508 } 2509 2510 static inline void skb_copy_from_linear_data(const struct sk_buff *skb, 2511 void *to, 2512 const unsigned int len) 2513 { 2514 memcpy(to, skb->data, len); 2515 } 2516 2517 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, 2518 const int offset, void *to, 2519 const unsigned int len) 2520 { 2521 memcpy(to, skb->data + offset, len); 2522 } 2523 2524 static inline void skb_copy_to_linear_data(struct sk_buff *skb, 2525 const void *from, 2526 const unsigned int len) 2527 { 2528 memcpy(skb->data, from, len); 2529 } 2530 2531 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, 2532 const int offset, 2533 const void *from, 2534 const unsigned int len) 2535 { 2536 memcpy(skb->data + offset, from, len); 2537 } 2538 2539 extern void skb_init(void); 2540 2541 static inline ktime_t skb_get_ktime(const struct sk_buff *skb) 2542 { 2543 return skb->tstamp; 2544 } 2545 2546 /** 2547 * skb_get_timestamp - get timestamp from a skb 2548 * @skb: skb to get stamp from 2549 * @stamp: pointer to struct timeval to store stamp in 2550 * 2551 * Timestamps are stored in the skb as offsets to a base timestamp. 2552 * This function converts the offset back to a struct timeval and stores 2553 * it in stamp. 2554 */ 2555 static inline void skb_get_timestamp(const struct sk_buff *skb, 2556 struct timeval *stamp) 2557 { 2558 *stamp = ktime_to_timeval(skb->tstamp); 2559 } 2560 2561 static inline void skb_get_timestampns(const struct sk_buff *skb, 2562 struct timespec *stamp) 2563 { 2564 *stamp = ktime_to_timespec(skb->tstamp); 2565 } 2566 2567 static inline void __net_timestamp(struct sk_buff *skb) 2568 { 2569 skb->tstamp = ktime_get_real(); 2570 } 2571 2572 static inline ktime_t net_timedelta(ktime_t t) 2573 { 2574 return ktime_sub(ktime_get_real(), t); 2575 } 2576 2577 static inline ktime_t net_invalid_timestamp(void) 2578 { 2579 return ktime_set(0, 0); 2580 } 2581 2582 extern void skb_timestamping_init(void); 2583 2584 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING 2585 2586 extern void skb_clone_tx_timestamp(struct sk_buff *skb); 2587 extern bool skb_defer_rx_timestamp(struct sk_buff *skb); 2588 2589 #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */ 2590 2591 static inline void skb_clone_tx_timestamp(struct sk_buff *skb) 2592 { 2593 } 2594 2595 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) 2596 { 2597 return false; 2598 } 2599 2600 #endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */ 2601 2602 /** 2603 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps 2604 * 2605 * PHY drivers may accept clones of transmitted packets for 2606 * timestamping via their phy_driver.txtstamp method. These drivers 2607 * must call this function to return the skb back to the stack, with 2608 * or without a timestamp. 2609 * 2610 * @skb: clone of the the original outgoing packet 2611 * @hwtstamps: hardware time stamps, may be NULL if not available 2612 * 2613 */ 2614 void skb_complete_tx_timestamp(struct sk_buff *skb, 2615 struct skb_shared_hwtstamps *hwtstamps); 2616 2617 /** 2618 * skb_tstamp_tx - queue clone of skb with send time stamps 2619 * @orig_skb: the original outgoing packet 2620 * @hwtstamps: hardware time stamps, may be NULL if not available 2621 * 2622 * If the skb has a socket associated, then this function clones the 2623 * skb (thus sharing the actual data and optional structures), stores 2624 * the optional hardware time stamping information (if non NULL) or 2625 * generates a software time stamp (otherwise), then queues the clone 2626 * to the error queue of the socket. Errors are silently ignored. 2627 */ 2628 extern void skb_tstamp_tx(struct sk_buff *orig_skb, 2629 struct skb_shared_hwtstamps *hwtstamps); 2630 2631 static inline void sw_tx_timestamp(struct sk_buff *skb) 2632 { 2633 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP && 2634 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) 2635 skb_tstamp_tx(skb, NULL); 2636 } 2637 2638 /** 2639 * skb_tx_timestamp() - Driver hook for transmit timestamping 2640 * 2641 * Ethernet MAC Drivers should call this function in their hard_xmit() 2642 * function immediately before giving the sk_buff to the MAC hardware. 2643 * 2644 * @skb: A socket buffer. 2645 */ 2646 static inline void skb_tx_timestamp(struct sk_buff *skb) 2647 { 2648 skb_clone_tx_timestamp(skb); 2649 sw_tx_timestamp(skb); 2650 } 2651 2652 /** 2653 * skb_complete_wifi_ack - deliver skb with wifi status 2654 * 2655 * @skb: the original outgoing packet 2656 * @acked: ack status 2657 * 2658 */ 2659 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked); 2660 2661 extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); 2662 extern __sum16 __skb_checksum_complete(struct sk_buff *skb); 2663 2664 static inline int skb_csum_unnecessary(const struct sk_buff *skb) 2665 { 2666 return skb->ip_summed & CHECKSUM_UNNECESSARY; 2667 } 2668 2669 /** 2670 * skb_checksum_complete - Calculate checksum of an entire packet 2671 * @skb: packet to process 2672 * 2673 * This function calculates the checksum over the entire packet plus 2674 * the value of skb->csum. The latter can be used to supply the 2675 * checksum of a pseudo header as used by TCP/UDP. It returns the 2676 * checksum. 2677 * 2678 * For protocols that contain complete checksums such as ICMP/TCP/UDP, 2679 * this function can be used to verify that checksum on received 2680 * packets. In that case the function should return zero if the 2681 * checksum is correct. In particular, this function will return zero 2682 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the 2683 * hardware has already verified the correctness of the checksum. 2684 */ 2685 static inline __sum16 skb_checksum_complete(struct sk_buff *skb) 2686 { 2687 return skb_csum_unnecessary(skb) ? 2688 0 : __skb_checksum_complete(skb); 2689 } 2690 2691 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2692 extern void nf_conntrack_destroy(struct nf_conntrack *nfct); 2693 static inline void nf_conntrack_put(struct nf_conntrack *nfct) 2694 { 2695 if (nfct && atomic_dec_and_test(&nfct->use)) 2696 nf_conntrack_destroy(nfct); 2697 } 2698 static inline void nf_conntrack_get(struct nf_conntrack *nfct) 2699 { 2700 if (nfct) 2701 atomic_inc(&nfct->use); 2702 } 2703 #endif 2704 #ifdef CONFIG_BRIDGE_NETFILTER 2705 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) 2706 { 2707 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) 2708 kfree(nf_bridge); 2709 } 2710 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) 2711 { 2712 if (nf_bridge) 2713 atomic_inc(&nf_bridge->use); 2714 } 2715 #endif /* CONFIG_BRIDGE_NETFILTER */ 2716 static inline void nf_reset(struct sk_buff *skb) 2717 { 2718 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2719 nf_conntrack_put(skb->nfct); 2720 skb->nfct = NULL; 2721 #endif 2722 #ifdef CONFIG_BRIDGE_NETFILTER 2723 nf_bridge_put(skb->nf_bridge); 2724 skb->nf_bridge = NULL; 2725 #endif 2726 } 2727 2728 static inline void nf_reset_trace(struct sk_buff *skb) 2729 { 2730 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) 2731 skb->nf_trace = 0; 2732 #endif 2733 } 2734 2735 /* Note: This doesn't put any conntrack and bridge info in dst. */ 2736 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) 2737 { 2738 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2739 dst->nfct = src->nfct; 2740 nf_conntrack_get(src->nfct); 2741 dst->nfctinfo = src->nfctinfo; 2742 #endif 2743 #ifdef CONFIG_BRIDGE_NETFILTER 2744 dst->nf_bridge = src->nf_bridge; 2745 nf_bridge_get(src->nf_bridge); 2746 #endif 2747 } 2748 2749 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) 2750 { 2751 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2752 nf_conntrack_put(dst->nfct); 2753 #endif 2754 #ifdef CONFIG_BRIDGE_NETFILTER 2755 nf_bridge_put(dst->nf_bridge); 2756 #endif 2757 __nf_copy(dst, src); 2758 } 2759 2760 #ifdef CONFIG_NETWORK_SECMARK 2761 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 2762 { 2763 to->secmark = from->secmark; 2764 } 2765 2766 static inline void skb_init_secmark(struct sk_buff *skb) 2767 { 2768 skb->secmark = 0; 2769 } 2770 #else 2771 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 2772 { } 2773 2774 static inline void skb_init_secmark(struct sk_buff *skb) 2775 { } 2776 #endif 2777 2778 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) 2779 { 2780 skb->queue_mapping = queue_mapping; 2781 } 2782 2783 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb) 2784 { 2785 return skb->queue_mapping; 2786 } 2787 2788 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) 2789 { 2790 to->queue_mapping = from->queue_mapping; 2791 } 2792 2793 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue) 2794 { 2795 skb->queue_mapping = rx_queue + 1; 2796 } 2797 2798 static inline u16 skb_get_rx_queue(const struct sk_buff *skb) 2799 { 2800 return skb->queue_mapping - 1; 2801 } 2802 2803 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) 2804 { 2805 return skb->queue_mapping != 0; 2806 } 2807 2808 extern u16 __skb_tx_hash(const struct net_device *dev, 2809 const struct sk_buff *skb, 2810 unsigned int num_tx_queues); 2811 2812 #ifdef CONFIG_XFRM 2813 static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 2814 { 2815 return skb->sp; 2816 } 2817 #else 2818 static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 2819 { 2820 return NULL; 2821 } 2822 #endif 2823 2824 /* Keeps track of mac header offset relative to skb->head. 2825 * It is useful for TSO of Tunneling protocol. e.g. GRE. 2826 * For non-tunnel skb it points to skb_mac_header() and for 2827 * tunnel skb it points to outer mac header. */ 2828 struct skb_gso_cb { 2829 int mac_offset; 2830 }; 2831 #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb) 2832 2833 static inline int skb_tnl_header_len(const struct sk_buff *inner_skb) 2834 { 2835 return (skb_mac_header(inner_skb) - inner_skb->head) - 2836 SKB_GSO_CB(inner_skb)->mac_offset; 2837 } 2838 2839 static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) 2840 { 2841 int new_headroom, headroom; 2842 int ret; 2843 2844 headroom = skb_headroom(skb); 2845 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC); 2846 if (ret) 2847 return ret; 2848 2849 new_headroom = skb_headroom(skb); 2850 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom); 2851 return 0; 2852 } 2853 2854 static inline bool skb_is_gso(const struct sk_buff *skb) 2855 { 2856 return skb_shinfo(skb)->gso_size; 2857 } 2858 2859 static inline bool skb_is_gso_v6(const struct sk_buff *skb) 2860 { 2861 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; 2862 } 2863 2864 extern void __skb_warn_lro_forwarding(const struct sk_buff *skb); 2865 2866 static inline bool skb_warn_if_lro(const struct sk_buff *skb) 2867 { 2868 /* LRO sets gso_size but not gso_type, whereas if GSO is really 2869 * wanted then gso_type will be set. */ 2870 const struct skb_shared_info *shinfo = skb_shinfo(skb); 2871 2872 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && 2873 unlikely(shinfo->gso_type == 0)) { 2874 __skb_warn_lro_forwarding(skb); 2875 return true; 2876 } 2877 return false; 2878 } 2879 2880 static inline void skb_forward_csum(struct sk_buff *skb) 2881 { 2882 /* Unfortunately we don't support this one. Any brave souls? */ 2883 if (skb->ip_summed == CHECKSUM_COMPLETE) 2884 skb->ip_summed = CHECKSUM_NONE; 2885 } 2886 2887 /** 2888 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE 2889 * @skb: skb to check 2890 * 2891 * fresh skbs have their ip_summed set to CHECKSUM_NONE. 2892 * Instead of forcing ip_summed to CHECKSUM_NONE, we can 2893 * use this helper, to document places where we make this assertion. 2894 */ 2895 static inline void skb_checksum_none_assert(const struct sk_buff *skb) 2896 { 2897 #ifdef DEBUG 2898 BUG_ON(skb->ip_summed != CHECKSUM_NONE); 2899 #endif 2900 } 2901 2902 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 2903 2904 u32 __skb_get_poff(const struct sk_buff *skb); 2905 2906 /** 2907 * skb_head_is_locked - Determine if the skb->head is locked down 2908 * @skb: skb to check 2909 * 2910 * The head on skbs build around a head frag can be removed if they are 2911 * not cloned. This function returns true if the skb head is locked down 2912 * due to either being allocated via kmalloc, or by being a clone with 2913 * multiple references to the head. 2914 */ 2915 static inline bool skb_head_is_locked(const struct sk_buff *skb) 2916 { 2917 return !skb->head_frag || skb_cloned(skb); 2918 } 2919 2920 /** 2921 * skb_gso_network_seglen - Return length of individual segments of a gso packet 2922 * 2923 * @skb: GSO skb 2924 * 2925 * skb_gso_network_seglen is used to determine the real size of the 2926 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). 2927 * 2928 * The MAC/L2 header is not accounted for. 2929 */ 2930 static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb) 2931 { 2932 unsigned int hdr_len = skb_transport_header(skb) - 2933 skb_network_header(skb); 2934 return hdr_len + skb_gso_transport_seglen(skb); 2935 } 2936 #endif /* __KERNEL__ */ 2937 #endif /* _LINUX_SKBUFF_H */ 2938
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.