~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/skbuff.h

Version: ~ [ linux-5.14-rc3 ] ~ [ linux-5.13.5 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.53 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.135 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.198 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.240 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.276 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.276 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *      Definitions for the 'struct sk_buff' memory handlers.
  3  *
  4  *      Authors:
  5  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
  6  *              Florian La Roche, <rzsfl@rz.uni-sb.de>
  7  *
  8  *      This program is free software; you can redistribute it and/or
  9  *      modify it under the terms of the GNU General Public License
 10  *      as published by the Free Software Foundation; either version
 11  *      2 of the License, or (at your option) any later version.
 12  */
 13 
 14 #ifndef _LINUX_SKBUFF_H
 15 #define _LINUX_SKBUFF_H
 16 
 17 #include <linux/config.h>
 18 #include <linux/kernel.h>
 19 #include <linux/compiler.h>
 20 #include <linux/time.h>
 21 #include <linux/cache.h>
 22 
 23 #include <asm/atomic.h>
 24 #include <asm/types.h>
 25 #include <linux/spinlock.h>
 26 #include <linux/mm.h>
 27 #include <linux/highmem.h>
 28 #include <linux/poll.h>
 29 #include <linux/net.h>
 30 
 31 #define HAVE_ALLOC_SKB          /* For the drivers to know */
 32 #define HAVE_ALIGNABLE_SKB      /* Ditto 8)                */
 33 #define SLAB_SKB                /* Slabified skbuffs       */
 34 
 35 #define CHECKSUM_NONE 0
 36 #define CHECKSUM_HW 1
 37 #define CHECKSUM_UNNECESSARY 2
 38 
 39 #define SKB_DATA_ALIGN(X)       (((X) + (SMP_CACHE_BYTES - 1)) & \
 40                                  ~(SMP_CACHE_BYTES - 1))
 41 #define SKB_MAX_ORDER(X, ORDER) (((PAGE_SIZE << (ORDER)) - (X) - \
 42                                   sizeof(struct skb_shared_info)) & \
 43                                   ~(SMP_CACHE_BYTES - 1))
 44 #define SKB_MAX_HEAD(X)         (SKB_MAX_ORDER((X), 0))
 45 #define SKB_MAX_ALLOC           (SKB_MAX_ORDER(0, 2))
 46 
 47 /* A. Checksumming of received packets by device.
 48  *
 49  *      NONE: device failed to checksum this packet.
 50  *              skb->csum is undefined.
 51  *
 52  *      UNNECESSARY: device parsed packet and wouldbe verified checksum.
 53  *              skb->csum is undefined.
 54  *            It is bad option, but, unfortunately, many of vendors do this.
 55  *            Apparently with secret goal to sell you new device, when you
 56  *            will add new protocol to your host. F.e. IPv6. 8)
 57  *
 58  *      HW: the most generic way. Device supplied checksum of _all_
 59  *          the packet as seen by netif_rx in skb->csum.
 60  *          NOTE: Even if device supports only some protocols, but
 61  *          is able to produce some skb->csum, it MUST use HW,
 62  *          not UNNECESSARY.
 63  *
 64  * B. Checksumming on output.
 65  *
 66  *      NONE: skb is checksummed by protocol or csum is not required.
 67  *
 68  *      HW: device is required to csum packet as seen by hard_start_xmit
 69  *      from skb->h.raw to the end and to record the checksum
 70  *      at skb->h.raw+skb->csum.
 71  *
 72  *      Device must show its capabilities in dev->features, set
 73  *      at device setup time.
 74  *      NETIF_F_HW_CSUM - it is clever device, it is able to checksum
 75  *                        everything.
 76  *      NETIF_F_NO_CSUM - loopback or reliable single hop media.
 77  *      NETIF_F_IP_CSUM - device is dumb. It is able to csum only
 78  *                        TCP/UDP over IPv4. Sigh. Vendors like this
 79  *                        way by an unknown reason. Though, see comment above
 80  *                        about CHECKSUM_UNNECESSARY. 8)
 81  *
 82  *      Any questions? No questions, good.              --ANK
 83  */
 84 
 85 #ifdef __i386__
 86 #define NET_CALLER(arg) (*(((void **)&arg) - 1))
 87 #else
 88 #define NET_CALLER(arg) __builtin_return_address(0)
 89 #endif
 90 
 91 #ifdef CONFIG_NETFILTER
 92 struct nf_conntrack {
 93         atomic_t use;
 94         void (*destroy)(struct nf_conntrack *);
 95 };
 96 
 97 struct nf_ct_info {
 98         struct nf_conntrack *master;
 99 };
100 
101 #ifdef CONFIG_BRIDGE_NETFILTER
102 struct nf_bridge_info {
103         atomic_t use;
104         struct net_device *physindev;
105         struct net_device *physoutdev;
106 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
107         struct net_device *netoutdev;
108 #endif
109         unsigned int mask;
110         unsigned long hh[32 / sizeof(unsigned long)];
111 };
112 #endif
113 
114 #endif
115 
116 struct sk_buff_head {
117         /* These two members must be first. */
118         struct sk_buff  *next;
119         struct sk_buff  *prev;
120 
121         __u32           qlen;
122         spinlock_t      lock;
123 };
124 
125 struct sk_buff;
126 
127 /* To allow 64K frame to be packed as single skb without frag_list */
128 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
129 
130 typedef struct skb_frag_struct skb_frag_t;
131 
132 struct skb_frag_struct {
133         struct page *page;
134         __u16 page_offset;
135         __u16 size;
136 };
137 
138 /* This data is invariant across clones and lives at
139  * the end of the header data, ie. at skb->end.
140  */
141 struct skb_shared_info {
142         atomic_t        dataref;
143         unsigned int    nr_frags;
144         unsigned short  tso_size;
145         unsigned short  tso_segs;
146         struct sk_buff  *frag_list;
147         skb_frag_t      frags[MAX_SKB_FRAGS];
148 };
149 
150 /** 
151  *      struct sk_buff - socket buffer
152  *      @next: Next buffer in list
153  *      @prev: Previous buffer in list
154  *      @list: List we are on
155  *      @sk: Socket we are owned by
156  *      @stamp: Time we arrived
157  *      @dev: Device we arrived on/are leaving by
158  *      @real_dev: The real device we are using
159  *      @h: Transport layer header
160  *      @nh: Network layer header
161  *      @mac: Link layer header
162  *      @dst: FIXME: Describe this field
163  *      @cb: Control buffer. Free for use by every layer. Put private vars here
164  *      @len: Length of actual data
165  *      @data_len: Data length
166  *      @csum: Checksum
167  *      @__unused: Dead field, may be reused
168  *      @cloned: Head may be cloned (check refcnt to be sure)
169  *      @pkt_type: Packet class
170  *      @ip_summed: Driver fed us an IP checksum
171  *      @priority: Packet queueing priority
172  *      @users: User count - see {datagram,tcp}.c
173  *      @protocol: Packet protocol from driver
174  *      @security: Security level of packet
175  *      @truesize: Buffer size 
176  *      @head: Head of buffer
177  *      @data: Data head pointer
178  *      @tail: Tail pointer
179  *      @end: End pointer
180  *      @destructor: Destruct function
181  *      @nfmark: Can be used for communication between hooks
182  *      @nfcache: Cache info
183  *      @nfct: Associated connection, if any
184  *      @nf_debug: Netfilter debugging
185  *      @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
186  *      @private: Data which is private to the HIPPI implementation
187  *      @tc_index: Traffic control index
188  */
189 
190 struct sk_buff {
191         /* These two members must be first. */
192         struct sk_buff          *next;
193         struct sk_buff          *prev;
194 
195         struct sk_buff_head     *list;
196         struct sock             *sk;
197         struct timeval          stamp;
198         struct net_device       *dev;
199         struct net_device       *real_dev;
200 
201         union {
202                 struct tcphdr   *th;
203                 struct udphdr   *uh;
204                 struct icmphdr  *icmph;
205                 struct igmphdr  *igmph;
206                 struct iphdr    *ipiph;
207                 unsigned char   *raw;
208         } h;
209 
210         union {
211                 struct iphdr    *iph;
212                 struct ipv6hdr  *ipv6h;
213                 struct arphdr   *arph;
214                 unsigned char   *raw;
215         } nh;
216 
217         union {
218                 struct ethhdr   *ethernet;
219                 unsigned char   *raw;
220         } mac;
221 
222         struct  dst_entry       *dst;
223         struct  sec_path        *sp;
224 
225         /*
226          * This is the control buffer. It is free to use for every
227          * layer. Please put your private variables there. If you
228          * want to keep them across layers you have to do a skb_clone()
229          * first. This is owned by whoever has the skb queued ATM.
230          */
231         char                    cb[48];
232 
233         unsigned int            len,
234                                 data_len,
235                                 csum;
236         unsigned char           local_df,
237                                 cloned,
238                                 pkt_type,
239                                 ip_summed;
240         __u32                   priority;
241         unsigned short          protocol,
242                                 security;
243 
244         void                    (*destructor)(struct sk_buff *skb);
245 #ifdef CONFIG_NETFILTER
246         unsigned long           nfmark;
247         __u32                   nfcache;
248         struct nf_ct_info       *nfct;
249 #ifdef CONFIG_NETFILTER_DEBUG
250         unsigned int            nf_debug;
251 #endif
252 #ifdef CONFIG_BRIDGE_NETFILTER
253         struct nf_bridge_info   *nf_bridge;
254 #endif
255 #endif /* CONFIG_NETFILTER */
256 #if defined(CONFIG_HIPPI)
257         union {
258                 __u32           ifield;
259         } private;
260 #endif
261 #ifdef CONFIG_NET_SCHED
262        __u32                    tc_index;               /* traffic control index */
263 #endif
264 
265         /* These elements must be at the end, see alloc_skb() for details.  */
266         unsigned int            truesize;
267         atomic_t                users;
268         unsigned char           *head,
269                                 *data,
270                                 *tail,
271                                 *end;
272 };
273 
274 #define SK_WMEM_MAX     65535
275 #define SK_RMEM_MAX     65535
276 
277 #ifdef __KERNEL__
278 /*
279  *      Handling routines are only of interest to the kernel
280  */
281 #include <linux/slab.h>
282 
283 #include <asm/system.h>
284 
285 extern void            __kfree_skb(struct sk_buff *skb);
286 extern struct sk_buff *alloc_skb(unsigned int size, int priority);
287 extern void            kfree_skbmem(struct sk_buff *skb);
288 extern struct sk_buff *skb_clone(struct sk_buff *skb, int priority);
289 extern struct sk_buff *skb_copy(const struct sk_buff *skb, int priority);
290 extern struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask);
291 extern int             pskb_expand_head(struct sk_buff *skb,
292                                         int nhead, int ntail, int gfp_mask);
293 extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
294                                             unsigned int headroom);
295 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
296                                        int newheadroom, int newtailroom,
297                                        int priority);
298 extern struct sk_buff *         skb_pad(struct sk_buff *skb, int pad);
299 #define dev_kfree_skb(a)        kfree_skb(a)
300 extern void           skb_over_panic(struct sk_buff *skb, int len,
301                                      void *here);
302 extern void           skb_under_panic(struct sk_buff *skb, int len,
303                                       void *here);
304 
305 /* Internal */
306 #define skb_shinfo(SKB)         ((struct skb_shared_info *)((SKB)->end))
307 
308 /**
309  *      skb_queue_empty - check if a queue is empty
310  *      @list: queue head
311  *
312  *      Returns true if the queue is empty, false otherwise.
313  */
314 static inline int skb_queue_empty(const struct sk_buff_head *list)
315 {
316         return list->next == (struct sk_buff *)list;
317 }
318 
319 /**
320  *      skb_get - reference buffer
321  *      @skb: buffer to reference
322  *
323  *      Makes another reference to a socket buffer and returns a pointer
324  *      to the buffer.
325  */
326 static inline struct sk_buff *skb_get(struct sk_buff *skb)
327 {
328         atomic_inc(&skb->users);
329         return skb;
330 }
331 
332 /*
333  * If users == 1, we are the only owner and are can avoid redundant
334  * atomic change.
335  */
336 
337 /**
338  *      kfree_skb - free an sk_buff
339  *      @skb: buffer to free
340  *
341  *      Drop a reference to the buffer and free it if the usage count has
342  *      hit zero.
343  */
344 static inline void kfree_skb(struct sk_buff *skb)
345 {
346         if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
347                 __kfree_skb(skb);
348 }
349 
350 /* Use this if you didn't touch the skb state [for fast switching] */
351 static inline void kfree_skb_fast(struct sk_buff *skb)
352 {
353         if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
354                 kfree_skbmem(skb);
355 }
356 
357 /**
358  *      skb_cloned - is the buffer a clone
359  *      @skb: buffer to check
360  *
361  *      Returns true if the buffer was generated with skb_clone() and is
362  *      one of multiple shared copies of the buffer. Cloned buffers are
363  *      shared data so must not be written to under normal circumstances.
364  */
365 static inline int skb_cloned(const struct sk_buff *skb)
366 {
367         return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1;
368 }
369 
370 /**
371  *      skb_shared - is the buffer shared
372  *      @skb: buffer to check
373  *
374  *      Returns true if more than one person has a reference to this
375  *      buffer.
376  */
377 static inline int skb_shared(const struct sk_buff *skb)
378 {
379         return atomic_read(&skb->users) != 1;
380 }
381 
382 /**
383  *      skb_share_check - check if buffer is shared and if so clone it
384  *      @skb: buffer to check
385  *      @pri: priority for memory allocation
386  *
387  *      If the buffer is shared the buffer is cloned and the old copy
388  *      drops a reference. A new clone with a single reference is returned.
389  *      If the buffer is not shared the original buffer is returned. When
390  *      being called from interrupt status or with spinlocks held pri must
391  *      be GFP_ATOMIC.
392  *
393  *      NULL is returned on a memory allocation failure.
394  */
395 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
396 {
397         might_sleep_if(pri & __GFP_WAIT);
398         if (skb_shared(skb)) {
399                 struct sk_buff *nskb = skb_clone(skb, pri);
400                 kfree_skb(skb);
401                 skb = nskb;
402         }
403         return skb;
404 }
405 
406 /*
407  *      Copy shared buffers into a new sk_buff. We effectively do COW on
408  *      packets to handle cases where we have a local reader and forward
409  *      and a couple of other messy ones. The normal one is tcpdumping
410  *      a packet thats being forwarded.
411  */
412 
413 /**
414  *      skb_unshare - make a copy of a shared buffer
415  *      @skb: buffer to check
416  *      @pri: priority for memory allocation
417  *
418  *      If the socket buffer is a clone then this function creates a new
419  *      copy of the data, drops a reference count on the old copy and returns
420  *      the new copy with the reference count at 1. If the buffer is not a clone
421  *      the original buffer is returned. When called with a spinlock held or
422  *      from interrupt state @pri must be %GFP_ATOMIC
423  *
424  *      %NULL is returned on a memory allocation failure.
425  */
426 static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
427 {
428         might_sleep_if(pri & __GFP_WAIT);
429         if (skb_cloned(skb)) {
430                 struct sk_buff *nskb = skb_copy(skb, pri);
431                 kfree_skb(skb); /* Free our shared copy */
432                 skb = nskb;
433         }
434         return skb;
435 }
436 
437 /**
438  *      skb_peek
439  *      @list_: list to peek at
440  *
441  *      Peek an &sk_buff. Unlike most other operations you _MUST_
442  *      be careful with this one. A peek leaves the buffer on the
443  *      list and someone else may run off with it. You must hold
444  *      the appropriate locks or have a private queue to do this.
445  *
446  *      Returns %NULL for an empty list or a pointer to the head element.
447  *      The reference count is not incremented and the reference is therefore
448  *      volatile. Use with caution.
449  */
450 static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
451 {
452         struct sk_buff *list = ((struct sk_buff *)list_)->next;
453         if (list == (struct sk_buff *)list_)
454                 list = NULL;
455         return list;
456 }
457 
458 /**
459  *      skb_peek_tail
460  *      @list_: list to peek at
461  *
462  *      Peek an &sk_buff. Unlike most other operations you _MUST_
463  *      be careful with this one. A peek leaves the buffer on the
464  *      list and someone else may run off with it. You must hold
465  *      the appropriate locks or have a private queue to do this.
466  *
467  *      Returns %NULL for an empty list or a pointer to the tail element.
468  *      The reference count is not incremented and the reference is therefore
469  *      volatile. Use with caution.
470  */
471 static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
472 {
473         struct sk_buff *list = ((struct sk_buff *)list_)->prev;
474         if (list == (struct sk_buff *)list_)
475                 list = NULL;
476         return list;
477 }
478 
479 /**
480  *      skb_queue_len   - get queue length
481  *      @list_: list to measure
482  *
483  *      Return the length of an &sk_buff queue.
484  */
485 static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
486 {
487         return list_->qlen;
488 }
489 
490 static inline void skb_queue_head_init(struct sk_buff_head *list)
491 {
492         spin_lock_init(&list->lock);
493         list->prev = list->next = (struct sk_buff *)list;
494         list->qlen = 0;
495 }
496 
497 /*
498  *      Insert an sk_buff at the start of a list.
499  *
500  *      The "__skb_xxxx()" functions are the non-atomic ones that
501  *      can only be called with interrupts disabled.
502  */
503 
504 /**
505  *      __skb_queue_head - queue a buffer at the list head
506  *      @list: list to use
507  *      @newsk: buffer to queue
508  *
509  *      Queue a buffer at the start of a list. This function takes no locks
510  *      and you must therefore hold required locks before calling it.
511  *
512  *      A buffer cannot be placed on two lists at the same time.
513  */
514 static inline void __skb_queue_head(struct sk_buff_head *list,
515                                     struct sk_buff *newsk)
516 {
517         struct sk_buff *prev, *next;
518 
519         newsk->list = list;
520         list->qlen++;
521         prev = (struct sk_buff *)list;
522         next = prev->next;
523         newsk->next = next;
524         newsk->prev = prev;
525         next->prev  = prev->next = newsk;
526 }
527 
528 
529 /**
530  *      skb_queue_head - queue a buffer at the list head
531  *      @list: list to use
532  *      @newsk: buffer to queue
533  *
534  *      Queue a buffer at the start of the list. This function takes the
535  *      list lock and can be used safely with other locking &sk_buff functions
536  *      safely.
537  *
538  *      A buffer cannot be placed on two lists at the same time.
539  */
540 static inline void skb_queue_head(struct sk_buff_head *list,
541                                   struct sk_buff *newsk)
542 {
543         unsigned long flags;
544 
545         spin_lock_irqsave(&list->lock, flags);
546         __skb_queue_head(list, newsk);
547         spin_unlock_irqrestore(&list->lock, flags);
548 }
549 
550 /**
551  *      __skb_queue_tail - queue a buffer at the list tail
552  *      @list: list to use
553  *      @newsk: buffer to queue
554  *
555  *      Queue a buffer at the end of a list. This function takes no locks
556  *      and you must therefore hold required locks before calling it.
557  *
558  *      A buffer cannot be placed on two lists at the same time.
559  */
560 static inline void __skb_queue_tail(struct sk_buff_head *list,
561                                    struct sk_buff *newsk)
562 {
563         struct sk_buff *prev, *next;
564 
565         newsk->list = list;
566         list->qlen++;
567         next = (struct sk_buff *)list;
568         prev = next->prev;
569         newsk->next = next;
570         newsk->prev = prev;
571         next->prev  = prev->next = newsk;
572 }
573 
574 /**
575  *      skb_queue_tail - queue a buffer at the list tail
576  *      @list: list to use
577  *      @newsk: buffer to queue
578  *
579  *      Queue a buffer at the tail of the list. This function takes the
580  *      list lock and can be used safely with other locking &sk_buff functions
581  *      safely.
582  *
583  *      A buffer cannot be placed on two lists at the same time.
584  */
585 static inline void skb_queue_tail(struct sk_buff_head *list,
586                                   struct sk_buff *newsk)
587 {
588         unsigned long flags;
589 
590         spin_lock_irqsave(&list->lock, flags);
591         __skb_queue_tail(list, newsk);
592         spin_unlock_irqrestore(&list->lock, flags);
593 }
594 
595 /**
596  *      __skb_dequeue - remove from the head of the queue
597  *      @list: list to dequeue from
598  *
599  *      Remove the head of the list. This function does not take any locks
600  *      so must be used with appropriate locks held only. The head item is
601  *      returned or %NULL if the list is empty.
602  */
603 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
604 {
605         struct sk_buff *next, *prev, *result;
606 
607         prev = (struct sk_buff *) list;
608         next = prev->next;
609         result = NULL;
610         if (next != prev) {
611                 result       = next;
612                 next         = next->next;
613                 list->qlen--;
614                 next->prev   = prev;
615                 prev->next   = next;
616                 result->next = result->prev = NULL;
617                 result->list = NULL;
618         }
619         return result;
620 }
621 
622 /**
623  *      skb_dequeue - remove from the head of the queue
624  *      @list: list to dequeue from
625  *
626  *      Remove the head of the list. The list lock is taken so the function
627  *      may be used safely with other locking list functions. The head item is
628  *      returned or %NULL if the list is empty.
629  */
630 
631 static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list)
632 {
633         unsigned long flags;
634         struct sk_buff *result;
635 
636         spin_lock_irqsave(&list->lock, flags);
637         result = __skb_dequeue(list);
638         spin_unlock_irqrestore(&list->lock, flags);
639         return result;
640 }
641 
642 /*
643  *      Insert a packet on a list.
644  */
645 
646 static inline void __skb_insert(struct sk_buff *newsk,
647                                 struct sk_buff *prev, struct sk_buff *next,
648                                 struct sk_buff_head *list)
649 {
650         newsk->next = next;
651         newsk->prev = prev;
652         next->prev  = prev->next = newsk;
653         newsk->list = list;
654         list->qlen++;
655 }
656 
657 /**
658  *      skb_insert      -       insert a buffer
659  *      @old: buffer to insert before
660  *      @newsk: buffer to insert
661  *
662  *      Place a packet before a given packet in a list. The list locks are taken
663  *      and this function is atomic with respect to other list locked calls
664  *      A buffer cannot be placed on two lists at the same time.
665  */
666 
667 static inline void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
668 {
669         unsigned long flags;
670 
671         spin_lock_irqsave(&old->list->lock, flags);
672         __skb_insert(newsk, old->prev, old, old->list);
673         spin_unlock_irqrestore(&old->list->lock, flags);
674 }
675 
676 /*
677  *      Place a packet after a given packet in a list.
678  */
679 
680 static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
681 {
682         __skb_insert(newsk, old, old->next, old->list);
683 }
684 
685 /**
686  *      skb_append      -       append a buffer
687  *      @old: buffer to insert after
688  *      @newsk: buffer to insert
689  *
690  *      Place a packet after a given packet in a list. The list locks are taken
691  *      and this function is atomic with respect to other list locked calls.
692  *      A buffer cannot be placed on two lists at the same time.
693  */
694 
695 
696 static inline void skb_append(struct sk_buff *old, struct sk_buff *newsk)
697 {
698         unsigned long flags;
699 
700         spin_lock_irqsave(&old->list->lock, flags);
701         __skb_append(old, newsk);
702         spin_unlock_irqrestore(&old->list->lock, flags);
703 }
704 
705 /*
706  * remove sk_buff from list. _Must_ be called atomically, and with
707  * the list known..
708  */
709 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
710 {
711         struct sk_buff *next, *prev;
712 
713         list->qlen--;
714         next       = skb->next;
715         prev       = skb->prev;
716         skb->next  = skb->prev = NULL;
717         skb->list  = NULL;
718         next->prev = prev;
719         prev->next = next;
720 }
721 
722 /**
723  *      skb_unlink      -       remove a buffer from a list
724  *      @skb: buffer to remove
725  *
726  *      Place a packet after a given packet in a list. The list locks are taken
727  *      and this function is atomic with respect to other list locked calls
728  *
729  *      Works even without knowing the list it is sitting on, which can be
730  *      handy at times. It also means that THE LIST MUST EXIST when you
731  *      unlink. Thus a list must have its contents unlinked before it is
732  *      destroyed.
733  */
734 static inline void skb_unlink(struct sk_buff *skb)
735 {
736         struct sk_buff_head *list = skb->list;
737 
738         if (list) {
739                 unsigned long flags;
740 
741                 spin_lock_irqsave(&list->lock, flags);
742                 if (skb->list == list)
743                         __skb_unlink(skb, skb->list);
744                 spin_unlock_irqrestore(&list->lock, flags);
745         }
746 }
747 
748 /* XXX: more streamlined implementation */
749 
750 /**
751  *      __skb_dequeue_tail - remove from the tail of the queue
752  *      @list: list to dequeue from
753  *
754  *      Remove the tail of the list. This function does not take any locks
755  *      so must be used with appropriate locks held only. The tail item is
756  *      returned or %NULL if the list is empty.
757  */
758 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
759 {
760         struct sk_buff *skb = skb_peek_tail(list);
761         if (skb)
762                 __skb_unlink(skb, list);
763         return skb;
764 }
765 
766 /**
767  *      skb_dequeue - remove from the head of the queue
768  *      @list: list to dequeue from
769  *
770  *      Remove the head of the list. The list lock is taken so the function
771  *      may be used safely with other locking list functions. The tail item is
772  *      returned or %NULL if the list is empty.
773  */
774 static inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
775 {
776         unsigned long flags;
777         struct sk_buff *result;
778 
779         spin_lock_irqsave(&list->lock, flags);
780         result = __skb_dequeue_tail(list);
781         spin_unlock_irqrestore(&list->lock, flags);
782         return result;
783 }
784 
785 static inline int skb_is_nonlinear(const struct sk_buff *skb)
786 {
787         return skb->data_len;
788 }
789 
790 static inline unsigned int skb_headlen(const struct sk_buff *skb)
791 {
792         return skb->len - skb->data_len;
793 }
794 
795 static inline int skb_pagelen(const struct sk_buff *skb)
796 {
797         int i, len = 0;
798 
799         for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
800                 len += skb_shinfo(skb)->frags[i].size;
801         return len + skb_headlen(skb);
802 }
803 
804 static inline void skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size)
805 {
806         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
807         frag->page = page;
808         frag->page_offset = off;
809         frag->size = size;
810         skb_shinfo(skb)->nr_frags = i+1;
811 }
812 
813 #define SKB_PAGE_ASSERT(skb)    BUG_ON(skb_shinfo(skb)->nr_frags)
814 #define SKB_FRAG_ASSERT(skb)    BUG_ON(skb_shinfo(skb)->frag_list)
815 #define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
816 
817 /*
818  *      Add data to an sk_buff
819  */
820 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
821 {
822         unsigned char *tmp = skb->tail;
823         SKB_LINEAR_ASSERT(skb);
824         skb->tail += len;
825         skb->len  += len;
826         return tmp;
827 }
828 
829 /**
830  *      skb_put - add data to a buffer
831  *      @skb: buffer to use
832  *      @len: amount of data to add
833  *
834  *      This function extends the used data area of the buffer. If this would
835  *      exceed the total buffer size the kernel will panic. A pointer to the
836  *      first byte of the extra data is returned.
837  */
838 static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
839 {
840         unsigned char *tmp = skb->tail;
841         SKB_LINEAR_ASSERT(skb);
842         skb->tail += len;
843         skb->len  += len;
844         if (unlikely(skb->tail>skb->end))
845                 skb_over_panic(skb, len, current_text_addr());
846         return tmp;
847 }
848 
849 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
850 {
851         skb->data -= len;
852         skb->len  += len;
853         return skb->data;
854 }
855 
856 /**
857  *      skb_push - add data to the start of a buffer
858  *      @skb: buffer to use
859  *      @len: amount of data to add
860  *
861  *      This function extends the used data area of the buffer at the buffer
862  *      start. If this would exceed the total buffer headroom the kernel will
863  *      panic. A pointer to the first byte of the extra data is returned.
864  */
865 static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
866 {
867         skb->data -= len;
868         skb->len  += len;
869         if (unlikely(skb->data<skb->head))
870                 skb_under_panic(skb, len, current_text_addr());
871         return skb->data;
872 }
873 
874 static inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
875 {
876         skb->len -= len;
877         BUG_ON(skb->len < skb->data_len);
878         return skb->data += len;
879 }
880 
881 /**
882  *      skb_pull - remove data from the start of a buffer
883  *      @skb: buffer to use
884  *      @len: amount of data to remove
885  *
886  *      This function removes data from the start of a buffer, returning
887  *      the memory to the headroom. A pointer to the next data in the buffer
888  *      is returned. Once the data has been pulled future pushes will overwrite
889  *      the old data.
890  */
891 static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
892 {
893         return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
894 }
895 
896 extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
897 
898 static inline char *__pskb_pull(struct sk_buff *skb, unsigned int len)
899 {
900         if (len > skb_headlen(skb) &&
901             !__pskb_pull_tail(skb, len-skb_headlen(skb)))
902                 return NULL;
903         skb->len -= len;
904         return skb->data += len;
905 }
906 
907 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
908 {
909         return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
910 }
911 
912 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
913 {
914         if (likely(len <= skb_headlen(skb)))
915                 return 1;
916         if (unlikely(len > skb->len))
917                 return 0;
918         return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;
919 }
920 
921 /**
922  *      skb_headroom - bytes at buffer head
923  *      @skb: buffer to check
924  *
925  *      Return the number of bytes of free space at the head of an &sk_buff.
926  */
927 static inline int skb_headroom(const struct sk_buff *skb)
928 {
929         return skb->data - skb->head;
930 }
931 
932 /**
933  *      skb_tailroom - bytes at buffer end
934  *      @skb: buffer to check
935  *
936  *      Return the number of bytes of free space at the tail of an sk_buff
937  */
938 static inline int skb_tailroom(const struct sk_buff *skb)
939 {
940         return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
941 }
942 
943 /**
944  *      skb_reserve - adjust headroom
945  *      @skb: buffer to alter
946  *      @len: bytes to move
947  *
948  *      Increase the headroom of an empty &sk_buff by reducing the tail
949  *      room. This is only allowed for an empty buffer.
950  */
951 static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
952 {
953         skb->data += len;
954         skb->tail += len;
955 }
956 
957 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
958 
959 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
960 {
961         if (!skb->data_len) {
962                 skb->len  = len;
963                 skb->tail = skb->data + len;
964         } else
965                 ___pskb_trim(skb, len, 0);
966 }
967 
968 /**
969  *      skb_trim - remove end from a buffer
970  *      @skb: buffer to alter
971  *      @len: new length
972  *
973  *      Cut the length of a buffer down by removing data from the tail. If
974  *      the buffer is already under the length specified it is not modified.
975  */
976 static inline void skb_trim(struct sk_buff *skb, unsigned int len)
977 {
978         if (skb->len > len)
979                 __skb_trim(skb, len);
980 }
981 
982 
983 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
984 {
985         if (!skb->data_len) {
986                 skb->len  = len;
987                 skb->tail = skb->data+len;
988                 return 0;
989         }
990         return ___pskb_trim(skb, len, 1);
991 }
992 
993 static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
994 {
995         return (len < skb->len) ? __pskb_trim(skb, len) : 0;
996 }
997 
998 /**
999  *      skb_orphan - orphan a buffer
1000  *      @skb: buffer to orphan
1001  *
1002  *      If a buffer currently has an owner then we call the owner's
1003  *      destructor function and make the @skb unowned. The buffer continues
1004  *      to exist but is no longer charged to its former owner.
1005  */
1006 static inline void skb_orphan(struct sk_buff *skb)
1007 {
1008         if (skb->destructor)
1009                 skb->destructor(skb);
1010         skb->destructor = NULL;
1011         skb->sk         = NULL;
1012 }
1013 
1014 /**
1015  *      skb_queue_purge - empty a list
1016  *      @list: list to empty
1017  *
1018  *      Delete all buffers on an &sk_buff list. Each buffer is removed from
1019  *      the list and one reference dropped. This function takes the list
1020  *      lock and is atomic with respect to other list locking functions.
1021  */
1022 static inline void skb_queue_purge(struct sk_buff_head *list)
1023 {
1024         struct sk_buff *skb;
1025         while ((skb = skb_dequeue(list)) != NULL)
1026                 kfree_skb(skb);
1027 }
1028 
1029 /**
1030  *      __skb_queue_purge - empty a list
1031  *      @list: list to empty
1032  *
1033  *      Delete all buffers on an &sk_buff list. Each buffer is removed from
1034  *      the list and one reference dropped. This function does not take the
1035  *      list lock and the caller must hold the relevant locks to use it.
1036  */
1037 static inline void __skb_queue_purge(struct sk_buff_head *list)
1038 {
1039         struct sk_buff *skb;
1040         while ((skb = __skb_dequeue(list)) != NULL)
1041                 kfree_skb(skb);
1042 }
1043 
1044 /**
1045  *      __dev_alloc_skb - allocate an skbuff for sending
1046  *      @length: length to allocate
1047  *      @gfp_mask: get_free_pages mask, passed to alloc_skb
1048  *
1049  *      Allocate a new &sk_buff and assign it a usage count of one. The
1050  *      buffer has unspecified headroom built in. Users should allocate
1051  *      the headroom they think they need without accounting for the
1052  *      built in space. The built in space is used for optimisations.
1053  *
1054  *      %NULL is returned in there is no free memory.
1055  */
1056 static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1057                                               int gfp_mask)
1058 {
1059         struct sk_buff *skb = alloc_skb(length + 16, gfp_mask);
1060         if (likely(skb))
1061                 skb_reserve(skb, 16);
1062         return skb;
1063 }
1064 
1065 /**
1066  *      dev_alloc_skb - allocate an skbuff for sending
1067  *      @length: length to allocate
1068  *
1069  *      Allocate a new &sk_buff and assign it a usage count of one. The
1070  *      buffer has unspecified headroom built in. Users should allocate
1071  *      the headroom they think they need without accounting for the
1072  *      built in space. The built in space is used for optimisations.
1073  *
1074  *      %NULL is returned in there is no free memory. Although this function
1075  *      allocates memory it can be called from an interrupt.
1076  */
1077 static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1078 {
1079         return __dev_alloc_skb(length, GFP_ATOMIC);
1080 }
1081 
1082 /**
1083  *      skb_cow - copy header of skb when it is required
1084  *      @skb: buffer to cow
1085  *      @headroom: needed headroom
1086  *
1087  *      If the skb passed lacks sufficient headroom or its data part
1088  *      is shared, data is reallocated. If reallocation fails, an error
1089  *      is returned and original skb is not changed.
1090  *
1091  *      The result is skb with writable area skb->head...skb->tail
1092  *      and at least @headroom of space at head.
1093  */
1094 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1095 {
1096         int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb);
1097 
1098         if (delta < 0)
1099                 delta = 0;
1100 
1101         if (delta || skb_cloned(skb))
1102                 return pskb_expand_head(skb, (delta + 15) & ~15, 0, GFP_ATOMIC);
1103         return 0;
1104 }
1105 
1106 /**
1107  *      skb_padto       - pad an skbuff up to a minimal size
1108  *      @skb: buffer to pad
1109  *      @len: minimal length
1110  *
1111  *      Pads up a buffer to ensure the trailing bytes exist and are
1112  *      blanked. If the buffer already contains sufficient data it
1113  *      is untouched. Returns the buffer, which may be a replacement
1114  *      for the original, or NULL for out of memory - in which case
1115  *      the original buffer is still freed.
1116  */
1117  
1118 static inline struct sk_buff *skb_padto(struct sk_buff *skb, unsigned int len)
1119 {
1120         unsigned int size = skb->len;
1121         if (likely(size >= len))
1122                 return skb;
1123         return skb_pad(skb, len-size);
1124 }
1125 
1126 /**
1127  *      skb_linearize - convert paged skb to linear one
1128  *      @skb: buffer to linarize
1129  *      @gfp: allocation mode
1130  *
1131  *      If there is no free memory -ENOMEM is returned, otherwise zero
1132  *      is returned and the old skb data released.
1133  */
1134 extern int __skb_linearize(struct sk_buff *skb, int gfp);
1135 static inline int __deprecated skb_linearize(struct sk_buff *skb, int gfp)
1136 {
1137         return __skb_linearize(skb, gfp);
1138 }
1139 
1140 static inline void *kmap_skb_frag(const skb_frag_t *frag)
1141 {
1142 #ifdef CONFIG_HIGHMEM
1143         BUG_ON(in_irq());
1144 
1145         local_bh_disable();
1146 #endif
1147         return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
1148 }
1149 
1150 static inline void kunmap_skb_frag(void *vaddr)
1151 {
1152         kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
1153 #ifdef CONFIG_HIGHMEM
1154         local_bh_enable();
1155 #endif
1156 }
1157 
1158 #define skb_queue_walk(queue, skb) \
1159                 for (skb = (queue)->next, prefetch(skb->next);  \
1160                      (skb != (struct sk_buff *)(queue));        \
1161                      skb = skb->next, prefetch(skb->next))
1162 
1163 
1164 extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
1165                                          int noblock, int *err);
1166 extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
1167                                      struct poll_table_struct *wait);
1168 extern int             skb_copy_datagram(const struct sk_buff *from,
1169                                          int offset, char *to, int size);
1170 extern int             skb_copy_datagram_iovec(const struct sk_buff *from,
1171                                                int offset, struct iovec *to,
1172                                                int size);
1173 extern int             skb_copy_and_csum_datagram(const struct sk_buff *skb,
1174                                                   int offset, u8 *to, int len,
1175                                                   unsigned int *csump);
1176 extern int             skb_copy_and_csum_datagram_iovec(const
1177                                                         struct sk_buff *skb,
1178                                                         int hlen,
1179                                                         struct iovec *iov);
1180 extern void            skb_free_datagram(struct sock *sk, struct sk_buff *skb);
1181 extern unsigned int    skb_checksum(const struct sk_buff *skb, int offset,
1182                                     int len, unsigned int csum);
1183 extern int             skb_copy_bits(const struct sk_buff *skb, int offset,
1184                                      void *to, int len);
1185 extern unsigned int    skb_copy_and_csum_bits(const struct sk_buff *skb,
1186                                               int offset, u8 *to, int len,
1187                                               unsigned int csum);
1188 extern void            skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1189 
1190 extern void skb_init(void);
1191 extern void skb_add_mtu(int mtu);
1192 
1193 #ifdef CONFIG_NETFILTER
1194 static inline void nf_conntrack_put(struct nf_ct_info *nfct)
1195 {
1196         if (nfct && atomic_dec_and_test(&nfct->master->use))
1197                 nfct->master->destroy(nfct->master);
1198 }
1199 static inline void nf_conntrack_get(struct nf_ct_info *nfct)
1200 {
1201         if (nfct)
1202                 atomic_inc(&nfct->master->use);
1203 }
1204 
1205 #ifdef CONFIG_BRIDGE_NETFILTER
1206 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
1207 {
1208         if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
1209                 kfree(nf_bridge);
1210 }
1211 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
1212 {
1213         if (nf_bridge)
1214                 atomic_inc(&nf_bridge->use);
1215 }
1216 #endif
1217 
1218 #endif
1219 
1220 #endif  /* __KERNEL__ */
1221 #endif  /* _LINUX_SKBUFF_H */
1222 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp