~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/net/sch_generic.h

Version: ~ [ linux-5.10-rc5 ] ~ [ linux-5.9.10 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.79 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.159 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.208 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.245 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.245 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef __NET_SCHED_GENERIC_H
  3 #define __NET_SCHED_GENERIC_H
  4 
  5 #include <linux/netdevice.h>
  6 #include <linux/types.h>
  7 #include <linux/rcupdate.h>
  8 #include <linux/pkt_sched.h>
  9 #include <linux/pkt_cls.h>
 10 #include <linux/percpu.h>
 11 #include <linux/dynamic_queue_limits.h>
 12 #include <linux/list.h>
 13 #include <linux/refcount.h>
 14 #include <linux/workqueue.h>
 15 #include <linux/mutex.h>
 16 #include <linux/hashtable.h>
 17 #include <net/gen_stats.h>
 18 #include <net/rtnetlink.h>
 19 #include <net/flow_offload.h>
 20 
 21 struct Qdisc_ops;
 22 struct qdisc_walker;
 23 struct tcf_walker;
 24 struct module;
 25 struct bpf_flow_keys;
 26 
 27 typedef int tc_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
 28                                     enum tc_setup_type type, void *type_data);
 29 
 30 struct qdisc_rate_table {
 31         struct tc_ratespec rate;
 32         u32             data[256];
 33         struct qdisc_rate_table *next;
 34         int             refcnt;
 35 };
 36 
 37 enum qdisc_state_t {
 38         __QDISC_STATE_SCHED,
 39         __QDISC_STATE_DEACTIVATED,
 40 };
 41 
 42 struct qdisc_size_table {
 43         struct rcu_head         rcu;
 44         struct list_head        list;
 45         struct tc_sizespec      szopts;
 46         int                     refcnt;
 47         u16                     data[];
 48 };
 49 
 50 /* similar to sk_buff_head, but skb->prev pointer is undefined. */
 51 struct qdisc_skb_head {
 52         struct sk_buff  *head;
 53         struct sk_buff  *tail;
 54         __u32           qlen;
 55         spinlock_t      lock;
 56 };
 57 
 58 struct Qdisc {
 59         int                     (*enqueue)(struct sk_buff *skb,
 60                                            struct Qdisc *sch,
 61                                            struct sk_buff **to_free);
 62         struct sk_buff *        (*dequeue)(struct Qdisc *sch);
 63         unsigned int            flags;
 64 #define TCQ_F_BUILTIN           1
 65 #define TCQ_F_INGRESS           2
 66 #define TCQ_F_CAN_BYPASS        4
 67 #define TCQ_F_MQROOT            8
 68 #define TCQ_F_ONETXQUEUE        0x10 /* dequeue_skb() can assume all skbs are for
 69                                       * q->dev_queue : It can test
 70                                       * netif_xmit_frozen_or_stopped() before
 71                                       * dequeueing next packet.
 72                                       * Its true for MQ/MQPRIO slaves, or non
 73                                       * multiqueue device.
 74                                       */
 75 #define TCQ_F_WARN_NONWC        (1 << 16)
 76 #define TCQ_F_CPUSTATS          0x20 /* run using percpu statistics */
 77 #define TCQ_F_NOPARENT          0x40 /* root of its hierarchy :
 78                                       * qdisc_tree_decrease_qlen() should stop.
 79                                       */
 80 #define TCQ_F_INVISIBLE         0x80 /* invisible by default in dump */
 81 #define TCQ_F_NOLOCK            0x100 /* qdisc does not require locking */
 82 #define TCQ_F_OFFLOADED         0x200 /* qdisc is offloaded to HW */
 83         u32                     limit;
 84         const struct Qdisc_ops  *ops;
 85         struct qdisc_size_table __rcu *stab;
 86         struct hlist_node       hash;
 87         u32                     handle;
 88         u32                     parent;
 89 
 90         struct netdev_queue     *dev_queue;
 91 
 92         struct net_rate_estimator __rcu *rate_est;
 93         struct gnet_stats_basic_cpu __percpu *cpu_bstats;
 94         struct gnet_stats_queue __percpu *cpu_qstats;
 95         int                     padded;
 96         refcount_t              refcnt;
 97 
 98         /*
 99          * For performance sake on SMP, we put highly modified fields at the end
100          */
101         struct sk_buff_head     gso_skb ____cacheline_aligned_in_smp;
102         struct qdisc_skb_head   q;
103         struct gnet_stats_basic_packed bstats;
104         seqcount_t              running;
105         struct gnet_stats_queue qstats;
106         unsigned long           state;
107         struct Qdisc            *next_sched;
108         struct sk_buff_head     skb_bad_txq;
109 
110         spinlock_t              busylock ____cacheline_aligned_in_smp;
111         spinlock_t              seqlock;
112 
113         /* for NOLOCK qdisc, true if there are no enqueued skbs */
114         bool                    empty;
115         struct rcu_head         rcu;
116 };
117 
118 static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
119 {
120         if (qdisc->flags & TCQ_F_BUILTIN)
121                 return;
122         refcount_inc(&qdisc->refcnt);
123 }
124 
125 /* Intended to be used by unlocked users, when concurrent qdisc release is
126  * possible.
127  */
128 
129 static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
130 {
131         if (qdisc->flags & TCQ_F_BUILTIN)
132                 return qdisc;
133         if (refcount_inc_not_zero(&qdisc->refcnt))
134                 return qdisc;
135         return NULL;
136 }
137 
138 static inline bool qdisc_is_running(struct Qdisc *qdisc)
139 {
140         if (qdisc->flags & TCQ_F_NOLOCK)
141                 return spin_is_locked(&qdisc->seqlock);
142         return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
143 }
144 
145 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
146 {
147         return q->flags & TCQ_F_CPUSTATS;
148 }
149 
150 static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
151 {
152         if (qdisc_is_percpu_stats(qdisc))
153                 return qdisc->empty;
154         return !qdisc->q.qlen;
155 }
156 
157 static inline bool qdisc_run_begin(struct Qdisc *qdisc)
158 {
159         if (qdisc->flags & TCQ_F_NOLOCK) {
160                 if (!spin_trylock(&qdisc->seqlock))
161                         return false;
162                 qdisc->empty = false;
163         } else if (qdisc_is_running(qdisc)) {
164                 return false;
165         }
166         /* Variant of write_seqcount_begin() telling lockdep a trylock
167          * was attempted.
168          */
169         raw_write_seqcount_begin(&qdisc->running);
170         seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
171         return true;
172 }
173 
174 static inline void qdisc_run_end(struct Qdisc *qdisc)
175 {
176         write_seqcount_end(&qdisc->running);
177         if (qdisc->flags & TCQ_F_NOLOCK)
178                 spin_unlock(&qdisc->seqlock);
179 }
180 
181 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
182 {
183         return qdisc->flags & TCQ_F_ONETXQUEUE;
184 }
185 
186 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
187 {
188 #ifdef CONFIG_BQL
189         /* Non-BQL migrated drivers will return 0, too. */
190         return dql_avail(&txq->dql);
191 #else
192         return 0;
193 #endif
194 }
195 
196 struct Qdisc_class_ops {
197         unsigned int            flags;
198         /* Child qdisc manipulation */
199         struct netdev_queue *   (*select_queue)(struct Qdisc *, struct tcmsg *);
200         int                     (*graft)(struct Qdisc *, unsigned long cl,
201                                         struct Qdisc *, struct Qdisc **,
202                                         struct netlink_ext_ack *extack);
203         struct Qdisc *          (*leaf)(struct Qdisc *, unsigned long cl);
204         void                    (*qlen_notify)(struct Qdisc *, unsigned long);
205 
206         /* Class manipulation routines */
207         unsigned long           (*find)(struct Qdisc *, u32 classid);
208         int                     (*change)(struct Qdisc *, u32, u32,
209                                         struct nlattr **, unsigned long *,
210                                         struct netlink_ext_ack *);
211         int                     (*delete)(struct Qdisc *, unsigned long);
212         void                    (*walk)(struct Qdisc *, struct qdisc_walker * arg);
213 
214         /* Filter manipulation */
215         struct tcf_block *      (*tcf_block)(struct Qdisc *sch,
216                                              unsigned long arg,
217                                              struct netlink_ext_ack *extack);
218         unsigned long           (*bind_tcf)(struct Qdisc *, unsigned long,
219                                         u32 classid);
220         void                    (*unbind_tcf)(struct Qdisc *, unsigned long);
221 
222         /* rtnetlink specific */
223         int                     (*dump)(struct Qdisc *, unsigned long,
224                                         struct sk_buff *skb, struct tcmsg*);
225         int                     (*dump_stats)(struct Qdisc *, unsigned long,
226                                         struct gnet_dump *);
227 };
228 
229 /* Qdisc_class_ops flag values */
230 
231 /* Implements API that doesn't require rtnl lock */
232 enum qdisc_class_ops_flags {
233         QDISC_CLASS_OPS_DOIT_UNLOCKED = 1,
234 };
235 
236 struct Qdisc_ops {
237         struct Qdisc_ops        *next;
238         const struct Qdisc_class_ops    *cl_ops;
239         char                    id[IFNAMSIZ];
240         int                     priv_size;
241         unsigned int            static_flags;
242 
243         int                     (*enqueue)(struct sk_buff *skb,
244                                            struct Qdisc *sch,
245                                            struct sk_buff **to_free);
246         struct sk_buff *        (*dequeue)(struct Qdisc *);
247         struct sk_buff *        (*peek)(struct Qdisc *);
248 
249         int                     (*init)(struct Qdisc *sch, struct nlattr *arg,
250                                         struct netlink_ext_ack *extack);
251         void                    (*reset)(struct Qdisc *);
252         void                    (*destroy)(struct Qdisc *);
253         int                     (*change)(struct Qdisc *sch,
254                                           struct nlattr *arg,
255                                           struct netlink_ext_ack *extack);
256         void                    (*attach)(struct Qdisc *sch);
257         int                     (*change_tx_queue_len)(struct Qdisc *, unsigned int);
258 
259         int                     (*dump)(struct Qdisc *, struct sk_buff *);
260         int                     (*dump_stats)(struct Qdisc *, struct gnet_dump *);
261 
262         void                    (*ingress_block_set)(struct Qdisc *sch,
263                                                      u32 block_index);
264         void                    (*egress_block_set)(struct Qdisc *sch,
265                                                     u32 block_index);
266         u32                     (*ingress_block_get)(struct Qdisc *sch);
267         u32                     (*egress_block_get)(struct Qdisc *sch);
268 
269         struct module           *owner;
270 };
271 
272 
273 struct tcf_result {
274         union {
275                 struct {
276                         unsigned long   class;
277                         u32             classid;
278                 };
279                 const struct tcf_proto *goto_tp;
280 
281                 /* used in the skb_tc_reinsert function */
282                 struct {
283                         bool            ingress;
284                         struct gnet_stats_queue *qstats;
285                 };
286         };
287 };
288 
289 struct tcf_chain;
290 
291 struct tcf_proto_ops {
292         struct list_head        head;
293         char                    kind[IFNAMSIZ];
294 
295         int                     (*classify)(struct sk_buff *,
296                                             const struct tcf_proto *,
297                                             struct tcf_result *);
298         int                     (*init)(struct tcf_proto*);
299         void                    (*destroy)(struct tcf_proto *tp, bool rtnl_held,
300                                            struct netlink_ext_ack *extack);
301 
302         void*                   (*get)(struct tcf_proto*, u32 handle);
303         void                    (*put)(struct tcf_proto *tp, void *f);
304         int                     (*change)(struct net *net, struct sk_buff *,
305                                         struct tcf_proto*, unsigned long,
306                                         u32 handle, struct nlattr **,
307                                         void **, bool, bool,
308                                         struct netlink_ext_ack *);
309         int                     (*delete)(struct tcf_proto *tp, void *arg,
310                                           bool *last, bool rtnl_held,
311                                           struct netlink_ext_ack *);
312         void                    (*walk)(struct tcf_proto *tp,
313                                         struct tcf_walker *arg, bool rtnl_held);
314         int                     (*reoffload)(struct tcf_proto *tp, bool add,
315                                              flow_setup_cb_t *cb, void *cb_priv,
316                                              struct netlink_ext_ack *extack);
317         void                    (*bind_class)(void *, u32, unsigned long);
318         void *                  (*tmplt_create)(struct net *net,
319                                                 struct tcf_chain *chain,
320                                                 struct nlattr **tca,
321                                                 struct netlink_ext_ack *extack);
322         void                    (*tmplt_destroy)(void *tmplt_priv);
323 
324         /* rtnetlink specific */
325         int                     (*dump)(struct net*, struct tcf_proto*, void *,
326                                         struct sk_buff *skb, struct tcmsg*,
327                                         bool);
328         int                     (*tmplt_dump)(struct sk_buff *skb,
329                                               struct net *net,
330                                               void *tmplt_priv);
331 
332         struct module           *owner;
333         int                     flags;
334 };
335 
336 enum tcf_proto_ops_flags {
337         TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
338 };
339 
340 struct tcf_proto {
341         /* Fast access part */
342         struct tcf_proto __rcu  *next;
343         void __rcu              *root;
344 
345         /* called under RCU BH lock*/
346         int                     (*classify)(struct sk_buff *,
347                                             const struct tcf_proto *,
348                                             struct tcf_result *);
349         __be16                  protocol;
350 
351         /* All the rest */
352         u32                     prio;
353         void                    *data;
354         const struct tcf_proto_ops      *ops;
355         struct tcf_chain        *chain;
356         /* Lock protects tcf_proto shared state and can be used by unlocked
357          * classifiers to protect their private data.
358          */
359         spinlock_t              lock;
360         bool                    deleting;
361         refcount_t              refcnt;
362         struct rcu_head         rcu;
363         struct hlist_node       destroy_ht_node;
364 };
365 
366 struct qdisc_skb_cb {
367         struct {
368                 unsigned int            pkt_len;
369                 u16                     slave_dev_queue_mapping;
370                 u16                     tc_classid;
371         };
372 #define QDISC_CB_PRIV_LEN 20
373         unsigned char           data[QDISC_CB_PRIV_LEN];
374 };
375 
376 typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
377 
378 struct tcf_chain {
379         /* Protects filter_chain. */
380         struct mutex filter_chain_lock;
381         struct tcf_proto __rcu *filter_chain;
382         struct list_head list;
383         struct tcf_block *block;
384         u32 index; /* chain index */
385         unsigned int refcnt;
386         unsigned int action_refcnt;
387         bool explicitly_created;
388         bool flushing;
389         const struct tcf_proto_ops *tmplt_ops;
390         void *tmplt_priv;
391         struct rcu_head rcu;
392 };
393 
394 struct tcf_block {
395         /* Lock protects tcf_block and lifetime-management data of chains
396          * attached to the block (refcnt, action_refcnt, explicitly_created).
397          */
398         struct mutex lock;
399         struct list_head chain_list;
400         u32 index; /* block index for shared blocks */
401         refcount_t refcnt;
402         struct net *net;
403         struct Qdisc *q;
404         struct flow_block flow_block;
405         struct list_head owner_list;
406         bool keep_dst;
407         unsigned int offloadcnt; /* Number of oddloaded filters */
408         unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
409         struct {
410                 struct tcf_chain *chain;
411                 struct list_head filter_chain_list;
412         } chain0;
413         struct rcu_head rcu;
414         DECLARE_HASHTABLE(proto_destroy_ht, 7);
415         struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
416 };
417 
418 #ifdef CONFIG_PROVE_LOCKING
419 static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
420 {
421         return lockdep_is_held(&chain->filter_chain_lock);
422 }
423 
424 static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
425 {
426         return lockdep_is_held(&tp->lock);
427 }
428 #else
429 static inline bool lockdep_tcf_chain_is_locked(struct tcf_block *chain)
430 {
431         return true;
432 }
433 
434 static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
435 {
436         return true;
437 }
438 #endif /* #ifdef CONFIG_PROVE_LOCKING */
439 
440 #define tcf_chain_dereference(p, chain)                                 \
441         rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain))
442 
443 #define tcf_proto_dereference(p, tp)                                    \
444         rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp))
445 
446 static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
447 {
448         if (*flags & TCA_CLS_FLAGS_IN_HW)
449                 return;
450         *flags |= TCA_CLS_FLAGS_IN_HW;
451         block->offloadcnt++;
452 }
453 
454 static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
455 {
456         if (!(*flags & TCA_CLS_FLAGS_IN_HW))
457                 return;
458         *flags &= ~TCA_CLS_FLAGS_IN_HW;
459         block->offloadcnt--;
460 }
461 
462 static inline void
463 tc_cls_offload_cnt_update(struct tcf_block *block, u32 *cnt,
464                           u32 *flags, bool add)
465 {
466         if (add) {
467                 if (!*cnt)
468                         tcf_block_offload_inc(block, flags);
469                 (*cnt)++;
470         } else {
471                 (*cnt)--;
472                 if (!*cnt)
473                         tcf_block_offload_dec(block, flags);
474         }
475 }
476 
477 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
478 {
479         struct qdisc_skb_cb *qcb;
480 
481         BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
482         BUILD_BUG_ON(sizeof(qcb->data) < sz);
483 }
484 
485 static inline int qdisc_qlen_cpu(const struct Qdisc *q)
486 {
487         return this_cpu_ptr(q->cpu_qstats)->qlen;
488 }
489 
490 static inline int qdisc_qlen(const struct Qdisc *q)
491 {
492         return q->q.qlen;
493 }
494 
495 static inline int qdisc_qlen_sum(const struct Qdisc *q)
496 {
497         __u32 qlen = q->qstats.qlen;
498         int i;
499 
500         if (qdisc_is_percpu_stats(q)) {
501                 for_each_possible_cpu(i)
502                         qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
503         } else {
504                 qlen += q->q.qlen;
505         }
506 
507         return qlen;
508 }
509 
510 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
511 {
512         return (struct qdisc_skb_cb *)skb->cb;
513 }
514 
515 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
516 {
517         return &qdisc->q.lock;
518 }
519 
520 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
521 {
522         struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
523 
524         return q;
525 }
526 
527 static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc)
528 {
529         return rcu_dereference_bh(qdisc->dev_queue->qdisc);
530 }
531 
532 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
533 {
534         return qdisc->dev_queue->qdisc_sleeping;
535 }
536 
537 /* The qdisc root lock is a mechanism by which to top level
538  * of a qdisc tree can be locked from any qdisc node in the
539  * forest.  This allows changing the configuration of some
540  * aspect of the qdisc tree while blocking out asynchronous
541  * qdisc access in the packet processing paths.
542  *
543  * It is only legal to do this when the root will not change
544  * on us.  Otherwise we'll potentially lock the wrong qdisc
545  * root.  This is enforced by holding the RTNL semaphore, which
546  * all users of this lock accessor must do.
547  */
548 static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
549 {
550         struct Qdisc *root = qdisc_root(qdisc);
551 
552         ASSERT_RTNL();
553         return qdisc_lock(root);
554 }
555 
556 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
557 {
558         struct Qdisc *root = qdisc_root_sleeping(qdisc);
559 
560         ASSERT_RTNL();
561         return qdisc_lock(root);
562 }
563 
564 static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
565 {
566         struct Qdisc *root = qdisc_root_sleeping(qdisc);
567 
568         ASSERT_RTNL();
569         return &root->running;
570 }
571 
572 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
573 {
574         return qdisc->dev_queue->dev;
575 }
576 
577 static inline void sch_tree_lock(const struct Qdisc *q)
578 {
579         spin_lock_bh(qdisc_root_sleeping_lock(q));
580 }
581 
582 static inline void sch_tree_unlock(const struct Qdisc *q)
583 {
584         spin_unlock_bh(qdisc_root_sleeping_lock(q));
585 }
586 
587 extern struct Qdisc noop_qdisc;
588 extern struct Qdisc_ops noop_qdisc_ops;
589 extern struct Qdisc_ops pfifo_fast_ops;
590 extern struct Qdisc_ops mq_qdisc_ops;
591 extern struct Qdisc_ops noqueue_qdisc_ops;
592 extern const struct Qdisc_ops *default_qdisc_ops;
593 static inline const struct Qdisc_ops *
594 get_default_qdisc_ops(const struct net_device *dev, int ntx)
595 {
596         return ntx < dev->real_num_tx_queues ?
597                         default_qdisc_ops : &pfifo_fast_ops;
598 }
599 
600 struct Qdisc_class_common {
601         u32                     classid;
602         struct hlist_node       hnode;
603 };
604 
605 struct Qdisc_class_hash {
606         struct hlist_head       *hash;
607         unsigned int            hashsize;
608         unsigned int            hashmask;
609         unsigned int            hashelems;
610 };
611 
612 static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
613 {
614         id ^= id >> 8;
615         id ^= id >> 4;
616         return id & mask;
617 }
618 
619 static inline struct Qdisc_class_common *
620 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
621 {
622         struct Qdisc_class_common *cl;
623         unsigned int h;
624 
625         if (!id)
626                 return NULL;
627 
628         h = qdisc_class_hash(id, hash->hashmask);
629         hlist_for_each_entry(cl, &hash->hash[h], hnode) {
630                 if (cl->classid == id)
631                         return cl;
632         }
633         return NULL;
634 }
635 
636 static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid)
637 {
638         u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY;
639 
640         return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL;
641 }
642 
643 int qdisc_class_hash_init(struct Qdisc_class_hash *);
644 void qdisc_class_hash_insert(struct Qdisc_class_hash *,
645                              struct Qdisc_class_common *);
646 void qdisc_class_hash_remove(struct Qdisc_class_hash *,
647                              struct Qdisc_class_common *);
648 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
649 void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
650 
651 int dev_qdisc_change_tx_queue_len(struct net_device *dev);
652 void dev_init_scheduler(struct net_device *dev);
653 void dev_shutdown(struct net_device *dev);
654 void dev_activate(struct net_device *dev);
655 void dev_deactivate(struct net_device *dev);
656 void dev_deactivate_many(struct list_head *head);
657 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
658                               struct Qdisc *qdisc);
659 void qdisc_reset(struct Qdisc *qdisc);
660 void qdisc_put(struct Qdisc *qdisc);
661 void qdisc_put_unlocked(struct Qdisc *qdisc);
662 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
663 #ifdef CONFIG_NET_SCHED
664 int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
665                               void *type_data);
666 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
667                                 struct Qdisc *new, struct Qdisc *old,
668                                 enum tc_setup_type type, void *type_data,
669                                 struct netlink_ext_ack *extack);
670 #else
671 static inline int
672 qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
673                           void *type_data)
674 {
675         q->flags &= ~TCQ_F_OFFLOADED;
676         return 0;
677 }
678 
679 static inline void
680 qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
681                            struct Qdisc *new, struct Qdisc *old,
682                            enum tc_setup_type type, void *type_data,
683                            struct netlink_ext_ack *extack)
684 {
685 }
686 #endif
687 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
688                           const struct Qdisc_ops *ops,
689                           struct netlink_ext_ack *extack);
690 void qdisc_free(struct Qdisc *qdisc);
691 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
692                                 const struct Qdisc_ops *ops, u32 parentid,
693                                 struct netlink_ext_ack *extack);
694 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
695                                const struct qdisc_size_table *stab);
696 int skb_do_redirect(struct sk_buff *);
697 
698 static inline void skb_reset_tc(struct sk_buff *skb)
699 {
700 #ifdef CONFIG_NET_CLS_ACT
701         skb->tc_redirected = 0;
702 #endif
703 }
704 
705 static inline bool skb_is_tc_redirected(const struct sk_buff *skb)
706 {
707 #ifdef CONFIG_NET_CLS_ACT
708         return skb->tc_redirected;
709 #else
710         return false;
711 #endif
712 }
713 
714 static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
715 {
716 #ifdef CONFIG_NET_CLS_ACT
717         return skb->tc_at_ingress;
718 #else
719         return false;
720 #endif
721 }
722 
723 static inline bool skb_skip_tc_classify(struct sk_buff *skb)
724 {
725 #ifdef CONFIG_NET_CLS_ACT
726         if (skb->tc_skip_classify) {
727                 skb->tc_skip_classify = 0;
728                 return true;
729         }
730 #endif
731         return false;
732 }
733 
734 /* Reset all TX qdiscs greater than index of a device.  */
735 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
736 {
737         struct Qdisc *qdisc;
738 
739         for (; i < dev->num_tx_queues; i++) {
740                 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
741                 if (qdisc) {
742                         spin_lock_bh(qdisc_lock(qdisc));
743                         qdisc_reset(qdisc);
744                         spin_unlock_bh(qdisc_lock(qdisc));
745                 }
746         }
747 }
748 
749 static inline void qdisc_reset_all_tx(struct net_device *dev)
750 {
751         qdisc_reset_all_tx_gt(dev, 0);
752 }
753 
754 /* Are all TX queues of the device empty?  */
755 static inline bool qdisc_all_tx_empty(const struct net_device *dev)
756 {
757         unsigned int i;
758 
759         rcu_read_lock();
760         for (i = 0; i < dev->num_tx_queues; i++) {
761                 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
762                 const struct Qdisc *q = rcu_dereference(txq->qdisc);
763 
764                 if (!qdisc_is_empty(q)) {
765                         rcu_read_unlock();
766                         return false;
767                 }
768         }
769         rcu_read_unlock();
770         return true;
771 }
772 
773 /* Are any of the TX qdiscs changing?  */
774 static inline bool qdisc_tx_changing(const struct net_device *dev)
775 {
776         unsigned int i;
777 
778         for (i = 0; i < dev->num_tx_queues; i++) {
779                 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
780                 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
781                         return true;
782         }
783         return false;
784 }
785 
786 /* Is the device using the noop qdisc on all queues?  */
787 static inline bool qdisc_tx_is_noop(const struct net_device *dev)
788 {
789         unsigned int i;
790 
791         for (i = 0; i < dev->num_tx_queues; i++) {
792                 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
793                 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
794                         return false;
795         }
796         return true;
797 }
798 
799 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
800 {
801         return qdisc_skb_cb(skb)->pkt_len;
802 }
803 
804 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
805 enum net_xmit_qdisc_t {
806         __NET_XMIT_STOLEN = 0x00010000,
807         __NET_XMIT_BYPASS = 0x00020000,
808 };
809 
810 #ifdef CONFIG_NET_CLS_ACT
811 #define net_xmit_drop_count(e)  ((e) & __NET_XMIT_STOLEN ? 0 : 1)
812 #else
813 #define net_xmit_drop_count(e)  (1)
814 #endif
815 
816 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
817                                            const struct Qdisc *sch)
818 {
819 #ifdef CONFIG_NET_SCHED
820         struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
821 
822         if (stab)
823                 __qdisc_calculate_pkt_len(skb, stab);
824 #endif
825 }
826 
827 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
828                                 struct sk_buff **to_free)
829 {
830         qdisc_calculate_pkt_len(skb, sch);
831         return sch->enqueue(skb, sch, to_free);
832 }
833 
834 static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
835                                   __u64 bytes, __u32 packets)
836 {
837         bstats->bytes += bytes;
838         bstats->packets += packets;
839 }
840 
841 static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
842                                  const struct sk_buff *skb)
843 {
844         _bstats_update(bstats,
845                        qdisc_pkt_len(skb),
846                        skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
847 }
848 
849 static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
850                                       __u64 bytes, __u32 packets)
851 {
852         u64_stats_update_begin(&bstats->syncp);
853         _bstats_update(&bstats->bstats, bytes, packets);
854         u64_stats_update_end(&bstats->syncp);
855 }
856 
857 static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
858                                      const struct sk_buff *skb)
859 {
860         u64_stats_update_begin(&bstats->syncp);
861         bstats_update(&bstats->bstats, skb);
862         u64_stats_update_end(&bstats->syncp);
863 }
864 
865 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
866                                            const struct sk_buff *skb)
867 {
868         bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
869 }
870 
871 static inline void qdisc_bstats_update(struct Qdisc *sch,
872                                        const struct sk_buff *skb)
873 {
874         bstats_update(&sch->bstats, skb);
875 }
876 
877 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
878                                             const struct sk_buff *skb)
879 {
880         sch->qstats.backlog -= qdisc_pkt_len(skb);
881 }
882 
883 static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch,
884                                                 const struct sk_buff *skb)
885 {
886         this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
887 }
888 
889 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
890                                             const struct sk_buff *skb)
891 {
892         sch->qstats.backlog += qdisc_pkt_len(skb);
893 }
894 
895 static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
896                                                 const struct sk_buff *skb)
897 {
898         this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
899 }
900 
901 static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
902 {
903         this_cpu_inc(sch->cpu_qstats->qlen);
904 }
905 
906 static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
907 {
908         this_cpu_dec(sch->cpu_qstats->qlen);
909 }
910 
911 static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
912 {
913         this_cpu_inc(sch->cpu_qstats->requeues);
914 }
915 
916 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
917 {
918         sch->qstats.drops += count;
919 }
920 
921 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
922 {
923         qstats->drops++;
924 }
925 
926 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
927 {
928         qstats->overlimits++;
929 }
930 
931 static inline void qdisc_qstats_drop(struct Qdisc *sch)
932 {
933         qstats_drop_inc(&sch->qstats);
934 }
935 
936 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
937 {
938         this_cpu_inc(sch->cpu_qstats->drops);
939 }
940 
941 static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
942 {
943         sch->qstats.overlimits++;
944 }
945 
946 static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
947 {
948         __u32 qlen = qdisc_qlen_sum(sch);
949 
950         return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
951 }
952 
953 static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch,  __u32 *qlen,
954                                              __u32 *backlog)
955 {
956         struct gnet_stats_queue qstats = { 0 };
957         __u32 len = qdisc_qlen_sum(sch);
958 
959         __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
960         *qlen = qstats.qlen;
961         *backlog = qstats.backlog;
962 }
963 
964 static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
965 {
966         __u32 qlen, backlog;
967 
968         qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
969         qdisc_tree_reduce_backlog(sch, qlen, backlog);
970 }
971 
972 static inline void qdisc_purge_queue(struct Qdisc *sch)
973 {
974         __u32 qlen, backlog;
975 
976         qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
977         qdisc_reset(sch);
978         qdisc_tree_reduce_backlog(sch, qlen, backlog);
979 }
980 
981 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
982 {
983         qh->head = NULL;
984         qh->tail = NULL;
985         qh->qlen = 0;
986 }
987 
988 static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
989                                         struct qdisc_skb_head *qh)
990 {
991         struct sk_buff *last = qh->tail;
992 
993         if (last) {
994                 skb->next = NULL;
995                 last->next = skb;
996                 qh->tail = skb;
997         } else {
998                 qh->tail = skb;
999                 qh->head = skb;
1000         }
1001         qh->qlen++;
1002 }
1003 
1004 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
1005 {
1006         __qdisc_enqueue_tail(skb, &sch->q);
1007         qdisc_qstats_backlog_inc(sch, skb);
1008         return NET_XMIT_SUCCESS;
1009 }
1010 
1011 static inline void __qdisc_enqueue_head(struct sk_buff *skb,
1012                                         struct qdisc_skb_head *qh)
1013 {
1014         skb->next = qh->head;
1015 
1016         if (!qh->head)
1017                 qh->tail = skb;
1018         qh->head = skb;
1019         qh->qlen++;
1020 }
1021 
1022 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
1023 {
1024         struct sk_buff *skb = qh->head;
1025 
1026         if (likely(skb != NULL)) {
1027                 qh->head = skb->next;
1028                 qh->qlen--;
1029                 if (qh->head == NULL)
1030                         qh->tail = NULL;
1031                 skb->next = NULL;
1032         }
1033 
1034         return skb;
1035 }
1036 
1037 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
1038 {
1039         struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
1040 
1041         if (likely(skb != NULL)) {
1042                 qdisc_qstats_backlog_dec(sch, skb);
1043                 qdisc_bstats_update(sch, skb);
1044         }
1045 
1046         return skb;
1047 }
1048 
1049 /* Instead of calling kfree_skb() while root qdisc lock is held,
1050  * queue the skb for future freeing at end of __dev_xmit_skb()
1051  */
1052 static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
1053 {
1054         skb->next = *to_free;
1055         *to_free = skb;
1056 }
1057 
1058 static inline void __qdisc_drop_all(struct sk_buff *skb,
1059                                     struct sk_buff **to_free)
1060 {
1061         if (skb->prev)
1062                 skb->prev->next = *to_free;
1063         else
1064                 skb->next = *to_free;
1065         *to_free = skb;
1066 }
1067 
1068 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
1069                                                    struct qdisc_skb_head *qh,
1070                                                    struct sk_buff **to_free)
1071 {
1072         struct sk_buff *skb = __qdisc_dequeue_head(qh);
1073 
1074         if (likely(skb != NULL)) {
1075                 unsigned int len = qdisc_pkt_len(skb);
1076 
1077                 qdisc_qstats_backlog_dec(sch, skb);
1078                 __qdisc_drop(skb, to_free);
1079                 return len;
1080         }
1081 
1082         return 0;
1083 }
1084 
1085 static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
1086                                                  struct sk_buff **to_free)
1087 {
1088         return __qdisc_queue_drop_head(sch, &sch->q, to_free);
1089 }
1090 
1091 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
1092 {
1093         const struct qdisc_skb_head *qh = &sch->q;
1094 
1095         return qh->head;
1096 }
1097 
1098 /* generic pseudo peek method for non-work-conserving qdisc */
1099 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
1100 {
1101         struct sk_buff *skb = skb_peek(&sch->gso_skb);
1102 
1103         /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
1104         if (!skb) {
1105                 skb = sch->dequeue(sch);
1106 
1107                 if (skb) {
1108                         __skb_queue_head(&sch->gso_skb, skb);
1109                         /* it's still part of the queue */
1110                         qdisc_qstats_backlog_inc(sch, skb);
1111                         sch->q.qlen++;
1112                 }
1113         }
1114 
1115         return skb;
1116 }
1117 
1118 static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
1119                                                  struct sk_buff *skb)
1120 {
1121         if (qdisc_is_percpu_stats(sch)) {
1122                 qdisc_qstats_cpu_backlog_dec(sch, skb);
1123                 qdisc_bstats_cpu_update(sch, skb);
1124                 qdisc_qstats_cpu_qlen_dec(sch);
1125         } else {
1126                 qdisc_qstats_backlog_dec(sch, skb);
1127                 qdisc_bstats_update(sch, skb);
1128                 sch->q.qlen--;
1129         }
1130 }
1131 
1132 static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
1133                                                  unsigned int pkt_len)
1134 {
1135         if (qdisc_is_percpu_stats(sch)) {
1136                 qdisc_qstats_cpu_qlen_inc(sch);
1137                 this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
1138         } else {
1139                 sch->qstats.backlog += pkt_len;
1140                 sch->q.qlen++;
1141         }
1142 }
1143 
1144 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
1145 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
1146 {
1147         struct sk_buff *skb = skb_peek(&sch->gso_skb);
1148 
1149         if (skb) {
1150                 skb = __skb_dequeue(&sch->gso_skb);
1151                 if (qdisc_is_percpu_stats(sch)) {
1152                         qdisc_qstats_cpu_backlog_dec(sch, skb);
1153                         qdisc_qstats_cpu_qlen_dec(sch);
1154                 } else {
1155                         qdisc_qstats_backlog_dec(sch, skb);
1156                         sch->q.qlen--;
1157                 }
1158         } else {
1159                 skb = sch->dequeue(sch);
1160         }
1161 
1162         return skb;
1163 }
1164 
1165 static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
1166 {
1167         /*
1168          * We do not know the backlog in bytes of this list, it
1169          * is up to the caller to correct it
1170          */
1171         ASSERT_RTNL();
1172         if (qh->qlen) {
1173                 rtnl_kfree_skbs(qh->head, qh->tail);
1174 
1175                 qh->head = NULL;
1176                 qh->tail = NULL;
1177                 qh->qlen = 0;
1178         }
1179 }
1180 
1181 static inline void qdisc_reset_queue(struct Qdisc *sch)
1182 {
1183         __qdisc_reset_queue(&sch->q);
1184         sch->qstats.backlog = 0;
1185 }
1186 
1187 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
1188                                           struct Qdisc **pold)
1189 {
1190         struct Qdisc *old;
1191 
1192         sch_tree_lock(sch);
1193         old = *pold;
1194         *pold = new;
1195         if (old != NULL)
1196                 qdisc_tree_flush_backlog(old);
1197         sch_tree_unlock(sch);
1198 
1199         return old;
1200 }
1201 
1202 static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
1203 {
1204         rtnl_kfree_skbs(skb, skb);
1205         qdisc_qstats_drop(sch);
1206 }
1207 
1208 static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
1209                                  struct sk_buff **to_free)
1210 {
1211         __qdisc_drop(skb, to_free);
1212         qdisc_qstats_cpu_drop(sch);
1213 
1214         return NET_XMIT_DROP;
1215 }
1216 
1217 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
1218                              struct sk_buff **to_free)
1219 {
1220         __qdisc_drop(skb, to_free);
1221         qdisc_qstats_drop(sch);
1222 
1223         return NET_XMIT_DROP;
1224 }
1225 
1226 static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
1227                                  struct sk_buff **to_free)
1228 {
1229         __qdisc_drop_all(skb, to_free);
1230         qdisc_qstats_drop(sch);
1231 
1232         return NET_XMIT_DROP;
1233 }
1234 
1235 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
1236    long it will take to send a packet given its size.
1237  */
1238 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
1239 {
1240         int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
1241         if (slot < 0)
1242                 slot = 0;
1243         slot >>= rtab->rate.cell_log;
1244         if (slot > 255)
1245                 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
1246         return rtab->data[slot];
1247 }
1248 
1249 struct psched_ratecfg {
1250         u64     rate_bytes_ps; /* bytes per second */
1251         u32     mult;
1252         u16     overhead;
1253         u8      linklayer;
1254         u8      shift;
1255 };
1256 
1257 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
1258                                 unsigned int len)
1259 {
1260         len += r->overhead;
1261 
1262         if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
1263                 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
1264 
1265         return ((u64)len * r->mult) >> r->shift;
1266 }
1267 
1268 void psched_ratecfg_precompute(struct psched_ratecfg *r,
1269                                const struct tc_ratespec *conf,
1270                                u64 rate64);
1271 
1272 static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
1273                                           const struct psched_ratecfg *r)
1274 {
1275         memset(res, 0, sizeof(*res));
1276 
1277         /* legacy struct tc_ratespec has a 32bit @rate field
1278          * Qdisc using 64bit rate should add new attributes
1279          * in order to maintain compatibility.
1280          */
1281         res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
1282 
1283         res->overhead = r->overhead;
1284         res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
1285 }
1286 
1287 /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc.
1288  * The fast path only needs to access filter list and to update stats
1289  */
1290 struct mini_Qdisc {
1291         struct tcf_proto *filter_list;
1292         struct gnet_stats_basic_cpu __percpu *cpu_bstats;
1293         struct gnet_stats_queue __percpu *cpu_qstats;
1294         struct rcu_head rcu;
1295 };
1296 
1297 static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
1298                                                 const struct sk_buff *skb)
1299 {
1300         bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb);
1301 }
1302 
1303 static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
1304 {
1305         this_cpu_inc(miniq->cpu_qstats->drops);
1306 }
1307 
1308 struct mini_Qdisc_pair {
1309         struct mini_Qdisc miniq1;
1310         struct mini_Qdisc miniq2;
1311         struct mini_Qdisc __rcu **p_miniq;
1312 };
1313 
1314 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1315                           struct tcf_proto *tp_head);
1316 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
1317                           struct mini_Qdisc __rcu **p_miniq);
1318 
1319 static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
1320 {
1321         struct gnet_stats_queue *stats = res->qstats;
1322         int ret;
1323 
1324         if (res->ingress)
1325                 ret = netif_receive_skb(skb);
1326         else
1327                 ret = dev_queue_xmit(skb);
1328         if (ret && stats)
1329                 qstats_overlimit_inc(res->qstats);
1330 }
1331 
1332 #endif
1333 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp