~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/net/dst.h

Version: ~ [ linux-5.6-rc1 ] ~ [ linux-5.5.2 ] ~ [ linux-5.4.17 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.102 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.170 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.213 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.213 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.81 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * net/dst.h    Protocol independent destination cache definitions.
  3  *
  4  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  5  *
  6  */
  7 
  8 #ifndef _NET_DST_H
  9 #define _NET_DST_H
 10 
 11 #include <net/dst_ops.h>
 12 #include <linux/netdevice.h>
 13 #include <linux/rtnetlink.h>
 14 #include <linux/rcupdate.h>
 15 #include <linux/bug.h>
 16 #include <linux/jiffies.h>
 17 #include <net/neighbour.h>
 18 #include <asm/processor.h>
 19 
 20 #define DST_GC_MIN      (HZ/10)
 21 #define DST_GC_INC      (HZ/2)
 22 #define DST_GC_MAX      (120*HZ)
 23 
 24 /* Each dst_entry has reference count and sits in some parent list(s).
 25  * When it is removed from parent list, it is "freed" (dst_free).
 26  * After this it enters dead state (dst->obsolete > 0) and if its refcnt
 27  * is zero, it can be destroyed immediately, otherwise it is added
 28  * to gc list and garbage collector periodically checks the refcnt.
 29  */
 30 
 31 struct sk_buff;
 32 
 33 struct dst_entry {
 34         struct rcu_head         rcu_head;
 35         struct dst_entry        *child;
 36         struct net_device       *dev;
 37         struct  dst_ops         *ops;
 38         unsigned long           _metrics;
 39         unsigned long           expires;
 40         struct dst_entry        *path;
 41         struct dst_entry        *from;
 42 #ifdef CONFIG_XFRM
 43         struct xfrm_state       *xfrm;
 44 #else
 45         void                    *__pad1;
 46 #endif
 47         int                     (*input)(struct sk_buff *);
 48         int                     (*output)(struct sock *sk, struct sk_buff *skb);
 49 
 50         unsigned short          flags;
 51 #define DST_HOST                0x0001
 52 #define DST_NOXFRM              0x0002
 53 #define DST_NOPOLICY            0x0004
 54 #define DST_NOHASH              0x0008
 55 #define DST_NOCACHE             0x0010
 56 #define DST_NOCOUNT             0x0020
 57 #define DST_FAKE_RTABLE         0x0040
 58 #define DST_XFRM_TUNNEL         0x0080
 59 #define DST_XFRM_QUEUE          0x0100
 60 
 61         unsigned short          pending_confirm;
 62 
 63         short                   error;
 64 
 65         /* A non-zero value of dst->obsolete forces by-hand validation
 66          * of the route entry.  Positive values are set by the generic
 67          * dst layer to indicate that the entry has been forcefully
 68          * destroyed.
 69          *
 70          * Negative values are used by the implementation layer code to
 71          * force invocation of the dst_ops->check() method.
 72          */
 73         short                   obsolete;
 74 #define DST_OBSOLETE_NONE       0
 75 #define DST_OBSOLETE_DEAD       2
 76 #define DST_OBSOLETE_FORCE_CHK  -1
 77 #define DST_OBSOLETE_KILL       -2
 78         unsigned short          header_len;     /* more space at head required */
 79         unsigned short          trailer_len;    /* space to reserve at tail */
 80 #ifdef CONFIG_IP_ROUTE_CLASSID
 81         __u32                   tclassid;
 82 #else
 83         __u32                   __pad2;
 84 #endif
 85 
 86         /*
 87          * Align __refcnt to a 64 bytes alignment
 88          * (L1_CACHE_SIZE would be too much)
 89          */
 90 #ifdef CONFIG_64BIT
 91         long                    __pad_to_align_refcnt[2];
 92 #endif
 93         /*
 94          * __refcnt wants to be on a different cache line from
 95          * input/output/ops or performance tanks badly
 96          */
 97         atomic_t                __refcnt;       /* client references    */
 98         int                     __use;
 99         unsigned long           lastuse;
100         union {
101                 struct dst_entry        *next;
102                 struct rtable __rcu     *rt_next;
103                 struct rt6_info         *rt6_next;
104                 struct dn_route __rcu   *dn_next;
105         };
106 };
107 
108 void *dst_alloc_metrics(gfp_t flags);
109 void dst_free_metrics(void *metrics);
110 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
111 
112 #define DST_METRICS_READ_ONLY           0x1UL
113 #define DST_METRICS_FORCE_OVERWRITE     0x2UL
114 #define DST_METRICS_REFCOUNTED          0x4UL
115 #define DST_METRICS_FLAGS               0x7UL
116 #define DST_METRICS_ALIGNMENT           0x8UL
117 #define __DST_METRICS_PTR(Y)    \
118         ((u32 *)((Y) & ~DST_METRICS_FLAGS))
119 #define DST_METRICS_PTR(X)      __DST_METRICS_PTR((X)->_metrics)
120 
121 struct dst_metrics {
122         u32             metrics[RTAX_MAX];
123         atomic_t        refcnt;
124 } __aligned(DST_METRICS_ALIGNMENT);
125 extern const struct dst_metrics dst_default_metrics;
126 
127 static inline bool dst_metrics_read_only(const struct dst_entry *dst)
128 {
129         return dst->_metrics & DST_METRICS_READ_ONLY;
130 }
131 
132 static inline void dst_metrics_set_force_overwrite(struct dst_entry *dst)
133 {
134         dst->_metrics |= DST_METRICS_FORCE_OVERWRITE;
135 }
136 
137 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
138 
139 static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
140 {
141         unsigned long val = dst->_metrics;
142         if (!(val & DST_METRICS_READ_ONLY))
143                 __dst_destroy_metrics_generic(dst, val);
144 }
145 
146 static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
147 {
148         unsigned long p = dst->_metrics;
149 
150         BUG_ON(!p);
151 
152         if (p & DST_METRICS_READ_ONLY)
153                 return dst->ops->cow_metrics(dst, p);
154         return __DST_METRICS_PTR(p);
155 }
156 
157 /* This may only be invoked before the entry has reached global
158  * visibility.
159  */
160 static inline void dst_init_metrics(struct dst_entry *dst,
161                                     const u32 *src_metrics,
162                                     bool read_only)
163 {
164         dst->_metrics = ((unsigned long) src_metrics) |
165                 (read_only ? DST_METRICS_READ_ONLY : 0);
166 }
167 
168 static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
169 {
170         u32 *dst_metrics = dst_metrics_write_ptr(dest);
171 
172         if (dst_metrics) {
173                 u32 *src_metrics = DST_METRICS_PTR(src);
174 
175                 memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
176         }
177 }
178 
179 static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
180 {
181         return DST_METRICS_PTR(dst);
182 }
183 
184 static inline u32
185 dst_metric_raw(const struct dst_entry *dst, const int metric)
186 {
187         u32 *p = DST_METRICS_PTR(dst);
188 
189         return p[metric-1];
190 }
191 
192 static inline u32
193 dst_metric(const struct dst_entry *dst, const int metric)
194 {
195         WARN_ON_ONCE(metric == RTAX_HOPLIMIT ||
196                      metric == RTAX_ADVMSS ||
197                      metric == RTAX_MTU);
198         return dst_metric_raw(dst, metric);
199 }
200 
201 static inline u32
202 dst_metric_advmss(const struct dst_entry *dst)
203 {
204         u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS);
205 
206         if (!advmss)
207                 advmss = dst->ops->default_advmss(dst);
208 
209         return advmss;
210 }
211 
212 static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
213 {
214         u32 *p = dst_metrics_write_ptr(dst);
215 
216         if (p)
217                 p[metric-1] = val;
218 }
219 
220 static inline u32
221 dst_feature(const struct dst_entry *dst, u32 feature)
222 {
223         return dst_metric(dst, RTAX_FEATURES) & feature;
224 }
225 
226 static inline u32 dst_mtu(const struct dst_entry *dst)
227 {
228         return dst->ops->mtu(dst);
229 }
230 
231 /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
232 static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
233 {
234         return msecs_to_jiffies(dst_metric(dst, metric));
235 }
236 
237 static inline u32
238 dst_allfrag(const struct dst_entry *dst)
239 {
240         int ret = dst_feature(dst,  RTAX_FEATURE_ALLFRAG);
241         return ret;
242 }
243 
244 static inline int
245 dst_metric_locked(const struct dst_entry *dst, int metric)
246 {
247         return dst_metric(dst, RTAX_LOCK) & (1<<metric);
248 }
249 
250 static inline void dst_hold(struct dst_entry *dst)
251 {
252         /*
253          * If your kernel compilation stops here, please check
254          * __pad_to_align_refcnt declaration in struct dst_entry
255          */
256         BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
257         atomic_inc(&dst->__refcnt);
258 }
259 
260 static inline void dst_use(struct dst_entry *dst, unsigned long time)
261 {
262         dst_hold(dst);
263         dst->__use++;
264         dst->lastuse = time;
265 }
266 
267 static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
268 {
269         dst->__use++;
270         dst->lastuse = time;
271 }
272 
273 static inline struct dst_entry *dst_clone(struct dst_entry *dst)
274 {
275         if (dst)
276                 atomic_inc(&dst->__refcnt);
277         return dst;
278 }
279 
280 void dst_release(struct dst_entry *dst);
281 
282 static inline void refdst_drop(unsigned long refdst)
283 {
284         if (!(refdst & SKB_DST_NOREF))
285                 dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK));
286 }
287 
288 /**
289  * skb_dst_drop - drops skb dst
290  * @skb: buffer
291  *
292  * Drops dst reference count if a reference was taken.
293  */
294 static inline void skb_dst_drop(struct sk_buff *skb)
295 {
296         if (skb->_skb_refdst) {
297                 refdst_drop(skb->_skb_refdst);
298                 skb->_skb_refdst = 0UL;
299         }
300 }
301 
302 static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
303 {
304         nskb->_skb_refdst = oskb->_skb_refdst;
305         if (!(nskb->_skb_refdst & SKB_DST_NOREF))
306                 dst_clone(skb_dst(nskb));
307 }
308 
309 /**
310  * skb_dst_force - makes sure skb dst is refcounted
311  * @skb: buffer
312  *
313  * If dst is not yet refcounted, let's do it
314  */
315 static inline void skb_dst_force(struct sk_buff *skb)
316 {
317         if (skb_dst_is_noref(skb)) {
318                 WARN_ON(!rcu_read_lock_held());
319                 skb->_skb_refdst &= ~SKB_DST_NOREF;
320                 dst_clone(skb_dst(skb));
321         }
322 }
323 
324 
325 /**
326  *      __skb_tunnel_rx - prepare skb for rx reinsert
327  *      @skb: buffer
328  *      @dev: tunnel device
329  *      @net: netns for packet i/o
330  *
331  *      After decapsulation, packet is going to re-enter (netif_rx()) our stack,
332  *      so make some cleanups. (no accounting done)
333  */
334 static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
335                                    struct net *net)
336 {
337         skb->dev = dev;
338 
339         /*
340          * Clear hash so that we can recalulate the hash for the
341          * encapsulated packet, unless we have already determine the hash
342          * over the L4 4-tuple.
343          */
344         skb_clear_hash_if_not_l4(skb);
345         skb_set_queue_mapping(skb, 0);
346         skb_scrub_packet(skb, !net_eq(net, dev_net(dev)));
347 }
348 
349 /**
350  *      skb_tunnel_rx - prepare skb for rx reinsert
351  *      @skb: buffer
352  *      @dev: tunnel device
353  *      @net: netns for packet i/o
354  *
355  *      After decapsulation, packet is going to re-enter (netif_rx()) our stack,
356  *      so make some cleanups, and perform accounting.
357  *      Note: this accounting is not SMP safe.
358  */
359 static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
360                                  struct net *net)
361 {
362         /* TODO : stats should be SMP safe */
363         dev->stats.rx_packets++;
364         dev->stats.rx_bytes += skb->len;
365         __skb_tunnel_rx(skb, dev, net);
366 }
367 
368 /* Children define the path of the packet through the
369  * Linux networking.  Thus, destinations are stackable.
370  */
371 
372 static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
373 {
374         struct dst_entry *child = dst_clone(skb_dst(skb)->child);
375 
376         skb_dst_drop(skb);
377         return child;
378 }
379 
380 int dst_discard_sk(struct sock *sk, struct sk_buff *skb);
381 static inline int dst_discard(struct sk_buff *skb)
382 {
383         return dst_discard_sk(skb->sk, skb);
384 }
385 void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
386                 int initial_obsolete, unsigned short flags);
387 void __dst_free(struct dst_entry *dst);
388 struct dst_entry *dst_destroy(struct dst_entry *dst);
389 
390 static inline void dst_free(struct dst_entry *dst)
391 {
392         if (dst->obsolete > 0)
393                 return;
394         if (!atomic_read(&dst->__refcnt)) {
395                 dst = dst_destroy(dst);
396                 if (!dst)
397                         return;
398         }
399         __dst_free(dst);
400 }
401 
402 static inline void dst_rcu_free(struct rcu_head *head)
403 {
404         struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
405         dst_free(dst);
406 }
407 
408 static inline void dst_confirm(struct dst_entry *dst)
409 {
410         dst->pending_confirm = 1;
411 }
412 
413 static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
414                                    struct sk_buff *skb)
415 {
416         const struct hh_cache *hh;
417 
418         if (dst->pending_confirm) {
419                 unsigned long now = jiffies;
420 
421                 dst->pending_confirm = 0;
422                 /* avoid dirtying neighbour */
423                 if (n->confirmed != now)
424                         n->confirmed = now;
425         }
426 
427         hh = &n->hh;
428         if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
429                 return neigh_hh_output(hh, skb);
430         else
431                 return n->output(n, skb);
432 }
433 
434 static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
435 {
436         struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr);
437         return IS_ERR(n) ? NULL : n;
438 }
439 
440 static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
441                                                      struct sk_buff *skb)
442 {
443         struct neighbour *n =  dst->ops->neigh_lookup(dst, skb, NULL);
444         return IS_ERR(n) ? NULL : n;
445 }
446 
447 static inline void dst_link_failure(struct sk_buff *skb)
448 {
449         struct dst_entry *dst = skb_dst(skb);
450         if (dst && dst->ops && dst->ops->link_failure)
451                 dst->ops->link_failure(skb);
452 }
453 
454 static inline void dst_set_expires(struct dst_entry *dst, int timeout)
455 {
456         unsigned long expires = jiffies + timeout;
457 
458         if (expires == 0)
459                 expires = 1;
460 
461         if (dst->expires == 0 || time_before(expires, dst->expires))
462                 dst->expires = expires;
463 }
464 
465 /* Output packet to network from transport.  */
466 static inline int dst_output_sk(struct sock *sk, struct sk_buff *skb)
467 {
468         return skb_dst(skb)->output(sk, skb);
469 }
470 static inline int dst_output(struct sk_buff *skb)
471 {
472         return dst_output_sk(skb->sk, skb);
473 }
474 
475 /* Input packet from network to transport.  */
476 static inline int dst_input(struct sk_buff *skb)
477 {
478         return skb_dst(skb)->input(skb);
479 }
480 
481 static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
482 {
483         if (dst->obsolete)
484                 dst = dst->ops->check(dst, cookie);
485         return dst;
486 }
487 
488 void dst_init(void);
489 
490 /* Flags for xfrm_lookup flags argument. */
491 enum {
492         XFRM_LOOKUP_ICMP = 1 << 0,
493         XFRM_LOOKUP_QUEUE = 1 << 1,
494         XFRM_LOOKUP_KEEP_DST_REF = 1 << 2,
495 };
496 
497 struct flowi;
498 #ifndef CONFIG_XFRM
499 static inline struct dst_entry *xfrm_lookup(struct net *net,
500                                             struct dst_entry *dst_orig,
501                                             const struct flowi *fl, struct sock *sk,
502                                             int flags)
503 {
504         return dst_orig;
505 }
506 
507 static inline struct dst_entry *xfrm_lookup_route(struct net *net,
508                                                   struct dst_entry *dst_orig,
509                                                   const struct flowi *fl,
510                                                   struct sock *sk,
511                                                   int flags)
512 {
513         return dst_orig;
514 }
515 
516 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
517 {
518         return NULL;
519 }
520 
521 #else
522 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
523                               const struct flowi *fl, struct sock *sk,
524                               int flags);
525 
526 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
527                                     const struct flowi *fl, struct sock *sk,
528                                     int flags);
529 
530 /* skb attached with this dst needs transformation if dst->xfrm is valid */
531 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
532 {
533         return dst->xfrm;
534 }
535 #endif
536 
537 #endif /* _NET_DST_H */
538 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp