~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/sched/sch_cbq.c

Version: ~ [ linux-6.6-rc1 ] ~ [ linux-6.5.2 ] ~ [ linux-6.4.15 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.52 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.131 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.194 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.256 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.294 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.325 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * net/sched/sch_cbq.c  Class-Based Queueing discipline.
  3  *
  4  *              This program is free software; you can redistribute it and/or
  5  *              modify it under the terms of the GNU General Public License
  6  *              as published by the Free Software Foundation; either version
  7  *              2 of the License, or (at your option) any later version.
  8  *
  9  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 10  *
 11  */
 12 
 13 #include <linux/module.h>
 14 #include <linux/slab.h>
 15 #include <linux/types.h>
 16 #include <linux/kernel.h>
 17 #include <linux/string.h>
 18 #include <linux/errno.h>
 19 #include <linux/skbuff.h>
 20 #include <net/netlink.h>
 21 #include <net/pkt_sched.h>
 22 #include <net/pkt_cls.h>
 23 
 24 
 25 /*      Class-Based Queueing (CBQ) algorithm.
 26         =======================================
 27 
 28         Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
 29                  Management Models for Packet Networks",
 30                  IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
 31 
 32                  [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
 33 
 34                  [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
 35                  Parameters", 1996
 36 
 37                  [4] Sally Floyd and Michael Speer, "Experimental Results
 38                  for Class-Based Queueing", 1998, not published.
 39 
 40         -----------------------------------------------------------------------
 41 
 42         Algorithm skeleton was taken from NS simulator cbq.cc.
 43         If someone wants to check this code against the LBL version,
 44         he should take into account that ONLY the skeleton was borrowed,
 45         the implementation is different. Particularly:
 46 
 47         --- The WRR algorithm is different. Our version looks more
 48         reasonable (I hope) and works when quanta are allowed to be
 49         less than MTU, which is always the case when real time classes
 50         have small rates. Note, that the statement of [3] is
 51         incomplete, delay may actually be estimated even if class
 52         per-round allotment is less than MTU. Namely, if per-round
 53         allotment is W*r_i, and r_1+...+r_k = r < 1
 54 
 55         delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
 56 
 57         In the worst case we have IntServ estimate with D = W*r+k*MTU
 58         and C = MTU*r. The proof (if correct at all) is trivial.
 59 
 60 
 61         --- It seems that cbq-2.0 is not very accurate. At least, I cannot
 62         interpret some places, which look like wrong translations
 63         from NS. Anyone is advised to find these differences
 64         and explain to me, why I am wrong 8).
 65 
 66         --- Linux has no EOI event, so that we cannot estimate true class
 67         idle time. Workaround is to consider the next dequeue event
 68         as sign that previous packet is finished. This is wrong because of
 69         internal device queueing, but on a permanently loaded link it is true.
 70         Moreover, combined with clock integrator, this scheme looks
 71         very close to an ideal solution.  */
 72 
 73 struct cbq_sched_data;
 74 
 75 
 76 struct cbq_class {
 77         struct Qdisc_class_common common;
 78         struct cbq_class        *next_alive;    /* next class with backlog in this priority band */
 79 
 80 /* Parameters */
 81         unsigned char           priority;       /* class priority */
 82         unsigned char           priority2;      /* priority to be used after overlimit */
 83         unsigned char           ewma_log;       /* time constant for idle time calculation */
 84 
 85         u32                     defmap;
 86 
 87         /* Link-sharing scheduler parameters */
 88         long                    maxidle;        /* Class parameters: see below. */
 89         long                    offtime;
 90         long                    minidle;
 91         u32                     avpkt;
 92         struct qdisc_rate_table *R_tab;
 93 
 94         /* General scheduler (WRR) parameters */
 95         long                    allot;
 96         long                    quantum;        /* Allotment per WRR round */
 97         long                    weight;         /* Relative allotment: see below */
 98 
 99         struct Qdisc            *qdisc;         /* Ptr to CBQ discipline */
100         struct cbq_class        *split;         /* Ptr to split node */
101         struct cbq_class        *share;         /* Ptr to LS parent in the class tree */
102         struct cbq_class        *tparent;       /* Ptr to tree parent in the class tree */
103         struct cbq_class        *borrow;        /* NULL if class is bandwidth limited;
104                                                    parent otherwise */
105         struct cbq_class        *sibling;       /* Sibling chain */
106         struct cbq_class        *children;      /* Pointer to children chain */
107 
108         struct Qdisc            *q;             /* Elementary queueing discipline */
109 
110 
111 /* Variables */
112         unsigned char           cpriority;      /* Effective priority */
113         unsigned char           delayed;
114         unsigned char           level;          /* level of the class in hierarchy:
115                                                    0 for leaf classes, and maximal
116                                                    level of children + 1 for nodes.
117                                                  */
118 
119         psched_time_t           last;           /* Last end of service */
120         psched_time_t           undertime;
121         long                    avgidle;
122         long                    deficit;        /* Saved deficit for WRR */
123         psched_time_t           penalized;
124         struct gnet_stats_basic_packed bstats;
125         struct gnet_stats_queue qstats;
126         struct net_rate_estimator __rcu *rate_est;
127         struct tc_cbq_xstats    xstats;
128 
129         struct tcf_proto __rcu  *filter_list;
130         struct tcf_block        *block;
131 
132         int                     filters;
133 
134         struct cbq_class        *defaults[TC_PRIO_MAX + 1];
135 };
136 
137 struct cbq_sched_data {
138         struct Qdisc_class_hash clhash;                 /* Hash table of all classes */
139         int                     nclasses[TC_CBQ_MAXPRIO + 1];
140         unsigned int            quanta[TC_CBQ_MAXPRIO + 1];
141 
142         struct cbq_class        link;
143 
144         unsigned int            activemask;
145         struct cbq_class        *active[TC_CBQ_MAXPRIO + 1];    /* List of all classes
146                                                                    with backlog */
147 
148 #ifdef CONFIG_NET_CLS_ACT
149         struct cbq_class        *rx_class;
150 #endif
151         struct cbq_class        *tx_class;
152         struct cbq_class        *tx_borrowed;
153         int                     tx_len;
154         psched_time_t           now;            /* Cached timestamp */
155         unsigned int            pmask;
156 
157         struct hrtimer          delay_timer;
158         struct qdisc_watchdog   watchdog;       /* Watchdog timer,
159                                                    started when CBQ has
160                                                    backlog, but cannot
161                                                    transmit just now */
162         psched_tdiff_t          wd_expires;
163         int                     toplevel;
164         u32                     hgenerator;
165 };
166 
167 
168 #define L2T(cl, len)    qdisc_l2t((cl)->R_tab, len)
169 
170 static inline struct cbq_class *
171 cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
172 {
173         struct Qdisc_class_common *clc;
174 
175         clc = qdisc_class_find(&q->clhash, classid);
176         if (clc == NULL)
177                 return NULL;
178         return container_of(clc, struct cbq_class, common);
179 }
180 
181 #ifdef CONFIG_NET_CLS_ACT
182 
183 static struct cbq_class *
184 cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
185 {
186         struct cbq_class *cl;
187 
188         for (cl = this->tparent; cl; cl = cl->tparent) {
189                 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
190 
191                 if (new != NULL && new != this)
192                         return new;
193         }
194         return NULL;
195 }
196 
197 #endif
198 
199 /* Classify packet. The procedure is pretty complicated, but
200  * it allows us to combine link sharing and priority scheduling
201  * transparently.
202  *
203  * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
204  * so that it resolves to split nodes. Then packets are classified
205  * by logical priority, or a more specific classifier may be attached
206  * to the split node.
207  */
208 
209 static struct cbq_class *
210 cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
211 {
212         struct cbq_sched_data *q = qdisc_priv(sch);
213         struct cbq_class *head = &q->link;
214         struct cbq_class **defmap;
215         struct cbq_class *cl = NULL;
216         u32 prio = skb->priority;
217         struct tcf_proto *fl;
218         struct tcf_result res;
219 
220         /*
221          *  Step 1. If skb->priority points to one of our classes, use it.
222          */
223         if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
224             (cl = cbq_class_lookup(q, prio)) != NULL)
225                 return cl;
226 
227         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
228         for (;;) {
229                 int result = 0;
230                 defmap = head->defaults;
231 
232                 fl = rcu_dereference_bh(head->filter_list);
233                 /*
234                  * Step 2+n. Apply classifier.
235                  */
236                 result = tcf_classify(skb, fl, &res, true);
237                 if (!fl || result < 0)
238                         goto fallback;
239 
240                 cl = (void *)res.class;
241                 if (!cl) {
242                         if (TC_H_MAJ(res.classid))
243                                 cl = cbq_class_lookup(q, res.classid);
244                         else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
245                                 cl = defmap[TC_PRIO_BESTEFFORT];
246 
247                         if (cl == NULL)
248                                 goto fallback;
249                 }
250                 if (cl->level >= head->level)
251                         goto fallback;
252 #ifdef CONFIG_NET_CLS_ACT
253                 switch (result) {
254                 case TC_ACT_QUEUED:
255                 case TC_ACT_STOLEN:
256                 case TC_ACT_TRAP:
257                         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
258                         /* fall through */
259                 case TC_ACT_SHOT:
260                         return NULL;
261                 case TC_ACT_RECLASSIFY:
262                         return cbq_reclassify(skb, cl);
263                 }
264 #endif
265                 if (cl->level == 0)
266                         return cl;
267 
268                 /*
269                  * Step 3+n. If classifier selected a link sharing class,
270                  *         apply agency specific classifier.
271                  *         Repeat this procdure until we hit a leaf node.
272                  */
273                 head = cl;
274         }
275 
276 fallback:
277         cl = head;
278 
279         /*
280          * Step 4. No success...
281          */
282         if (TC_H_MAJ(prio) == 0 &&
283             !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
284             !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
285                 return head;
286 
287         return cl;
288 }
289 
290 /*
291  * A packet has just been enqueued on the empty class.
292  * cbq_activate_class adds it to the tail of active class list
293  * of its priority band.
294  */
295 
296 static inline void cbq_activate_class(struct cbq_class *cl)
297 {
298         struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
299         int prio = cl->cpriority;
300         struct cbq_class *cl_tail;
301 
302         cl_tail = q->active[prio];
303         q->active[prio] = cl;
304 
305         if (cl_tail != NULL) {
306                 cl->next_alive = cl_tail->next_alive;
307                 cl_tail->next_alive = cl;
308         } else {
309                 cl->next_alive = cl;
310                 q->activemask |= (1<<prio);
311         }
312 }
313 
314 /*
315  * Unlink class from active chain.
316  * Note that this same procedure is done directly in cbq_dequeue*
317  * during round-robin procedure.
318  */
319 
320 static void cbq_deactivate_class(struct cbq_class *this)
321 {
322         struct cbq_sched_data *q = qdisc_priv(this->qdisc);
323         int prio = this->cpriority;
324         struct cbq_class *cl;
325         struct cbq_class *cl_prev = q->active[prio];
326 
327         do {
328                 cl = cl_prev->next_alive;
329                 if (cl == this) {
330                         cl_prev->next_alive = cl->next_alive;
331                         cl->next_alive = NULL;
332 
333                         if (cl == q->active[prio]) {
334                                 q->active[prio] = cl_prev;
335                                 if (cl == q->active[prio]) {
336                                         q->active[prio] = NULL;
337                                         q->activemask &= ~(1<<prio);
338                                         return;
339                                 }
340                         }
341                         return;
342                 }
343         } while ((cl_prev = cl) != q->active[prio]);
344 }
345 
346 static void
347 cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
348 {
349         int toplevel = q->toplevel;
350 
351         if (toplevel > cl->level) {
352                 psched_time_t now = psched_get_time();
353 
354                 do {
355                         if (cl->undertime < now) {
356                                 q->toplevel = cl->level;
357                                 return;
358                         }
359                 } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
360         }
361 }
362 
363 static int
364 cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
365             struct sk_buff **to_free)
366 {
367         struct cbq_sched_data *q = qdisc_priv(sch);
368         int uninitialized_var(ret);
369         struct cbq_class *cl = cbq_classify(skb, sch, &ret);
370 
371 #ifdef CONFIG_NET_CLS_ACT
372         q->rx_class = cl;
373 #endif
374         if (cl == NULL) {
375                 if (ret & __NET_XMIT_BYPASS)
376                         qdisc_qstats_drop(sch);
377                 __qdisc_drop(skb, to_free);
378                 return ret;
379         }
380 
381         ret = qdisc_enqueue(skb, cl->q, to_free);
382         if (ret == NET_XMIT_SUCCESS) {
383                 sch->q.qlen++;
384                 cbq_mark_toplevel(q, cl);
385                 if (!cl->next_alive)
386                         cbq_activate_class(cl);
387                 return ret;
388         }
389 
390         if (net_xmit_drop_count(ret)) {
391                 qdisc_qstats_drop(sch);
392                 cbq_mark_toplevel(q, cl);
393                 cl->qstats.drops++;
394         }
395         return ret;
396 }
397 
398 /* Overlimit action: penalize leaf class by adding offtime */
399 static void cbq_overlimit(struct cbq_class *cl)
400 {
401         struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
402         psched_tdiff_t delay = cl->undertime - q->now;
403 
404         if (!cl->delayed) {
405                 delay += cl->offtime;
406 
407                 /*
408                  * Class goes to sleep, so that it will have no
409                  * chance to work avgidle. Let's forgive it 8)
410                  *
411                  * BTW cbq-2.0 has a crap in this
412                  * place, apparently they forgot to shift it by cl->ewma_log.
413                  */
414                 if (cl->avgidle < 0)
415                         delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
416                 if (cl->avgidle < cl->minidle)
417                         cl->avgidle = cl->minidle;
418                 if (delay <= 0)
419                         delay = 1;
420                 cl->undertime = q->now + delay;
421 
422                 cl->xstats.overactions++;
423                 cl->delayed = 1;
424         }
425         if (q->wd_expires == 0 || q->wd_expires > delay)
426                 q->wd_expires = delay;
427 
428         /* Dirty work! We must schedule wakeups based on
429          * real available rate, rather than leaf rate,
430          * which may be tiny (even zero).
431          */
432         if (q->toplevel == TC_CBQ_MAXLEVEL) {
433                 struct cbq_class *b;
434                 psched_tdiff_t base_delay = q->wd_expires;
435 
436                 for (b = cl->borrow; b; b = b->borrow) {
437                         delay = b->undertime - q->now;
438                         if (delay < base_delay) {
439                                 if (delay <= 0)
440                                         delay = 1;
441                                 base_delay = delay;
442                         }
443                 }
444 
445                 q->wd_expires = base_delay;
446         }
447 }
448 
449 static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
450                                        psched_time_t now)
451 {
452         struct cbq_class *cl;
453         struct cbq_class *cl_prev = q->active[prio];
454         psched_time_t sched = now;
455 
456         if (cl_prev == NULL)
457                 return 0;
458 
459         do {
460                 cl = cl_prev->next_alive;
461                 if (now - cl->penalized > 0) {
462                         cl_prev->next_alive = cl->next_alive;
463                         cl->next_alive = NULL;
464                         cl->cpriority = cl->priority;
465                         cl->delayed = 0;
466                         cbq_activate_class(cl);
467 
468                         if (cl == q->active[prio]) {
469                                 q->active[prio] = cl_prev;
470                                 if (cl == q->active[prio]) {
471                                         q->active[prio] = NULL;
472                                         return 0;
473                                 }
474                         }
475 
476                         cl = cl_prev->next_alive;
477                 } else if (sched - cl->penalized > 0)
478                         sched = cl->penalized;
479         } while ((cl_prev = cl) != q->active[prio]);
480 
481         return sched - now;
482 }
483 
484 static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
485 {
486         struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
487                                                 delay_timer);
488         struct Qdisc *sch = q->watchdog.qdisc;
489         psched_time_t now;
490         psched_tdiff_t delay = 0;
491         unsigned int pmask;
492 
493         now = psched_get_time();
494 
495         pmask = q->pmask;
496         q->pmask = 0;
497 
498         while (pmask) {
499                 int prio = ffz(~pmask);
500                 psched_tdiff_t tmp;
501 
502                 pmask &= ~(1<<prio);
503 
504                 tmp = cbq_undelay_prio(q, prio, now);
505                 if (tmp > 0) {
506                         q->pmask |= 1<<prio;
507                         if (tmp < delay || delay == 0)
508                                 delay = tmp;
509                 }
510         }
511 
512         if (delay) {
513                 ktime_t time;
514 
515                 time = 0;
516                 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
517                 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
518         }
519 
520         __netif_schedule(qdisc_root(sch));
521         return HRTIMER_NORESTART;
522 }
523 
524 /*
525  * It is mission critical procedure.
526  *
527  * We "regenerate" toplevel cutoff, if transmitting class
528  * has backlog and it is not regulated. It is not part of
529  * original CBQ description, but looks more reasonable.
530  * Probably, it is wrong. This question needs further investigation.
531  */
532 
533 static inline void
534 cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
535                     struct cbq_class *borrowed)
536 {
537         if (cl && q->toplevel >= borrowed->level) {
538                 if (cl->q->q.qlen > 1) {
539                         do {
540                                 if (borrowed->undertime == PSCHED_PASTPERFECT) {
541                                         q->toplevel = borrowed->level;
542                                         return;
543                                 }
544                         } while ((borrowed = borrowed->borrow) != NULL);
545                 }
546 #if 0
547         /* It is not necessary now. Uncommenting it
548            will save CPU cycles, but decrease fairness.
549          */
550                 q->toplevel = TC_CBQ_MAXLEVEL;
551 #endif
552         }
553 }
554 
555 static void
556 cbq_update(struct cbq_sched_data *q)
557 {
558         struct cbq_class *this = q->tx_class;
559         struct cbq_class *cl = this;
560         int len = q->tx_len;
561         psched_time_t now;
562 
563         q->tx_class = NULL;
564         /* Time integrator. We calculate EOS time
565          * by adding expected packet transmission time.
566          */
567         now = q->now + L2T(&q->link, len);
568 
569         for ( ; cl; cl = cl->share) {
570                 long avgidle = cl->avgidle;
571                 long idle;
572 
573                 cl->bstats.packets++;
574                 cl->bstats.bytes += len;
575 
576                 /*
577                  * (now - last) is total time between packet right edges.
578                  * (last_pktlen/rate) is "virtual" busy time, so that
579                  *
580                  *      idle = (now - last) - last_pktlen/rate
581                  */
582 
583                 idle = now - cl->last;
584                 if ((unsigned long)idle > 128*1024*1024) {
585                         avgidle = cl->maxidle;
586                 } else {
587                         idle -= L2T(cl, len);
588 
589                 /* true_avgidle := (1-W)*true_avgidle + W*idle,
590                  * where W=2^{-ewma_log}. But cl->avgidle is scaled:
591                  * cl->avgidle == true_avgidle/W,
592                  * hence:
593                  */
594                         avgidle += idle - (avgidle>>cl->ewma_log);
595                 }
596 
597                 if (avgidle <= 0) {
598                         /* Overlimit or at-limit */
599 
600                         if (avgidle < cl->minidle)
601                                 avgidle = cl->minidle;
602 
603                         cl->avgidle = avgidle;
604 
605                         /* Calculate expected time, when this class
606                          * will be allowed to send.
607                          * It will occur, when:
608                          * (1-W)*true_avgidle + W*delay = 0, i.e.
609                          * idle = (1/W - 1)*(-true_avgidle)
610                          * or
611                          * idle = (1 - W)*(-cl->avgidle);
612                          */
613                         idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
614 
615                         /*
616                          * That is not all.
617                          * To maintain the rate allocated to the class,
618                          * we add to undertime virtual clock,
619                          * necessary to complete transmitted packet.
620                          * (len/phys_bandwidth has been already passed
621                          * to the moment of cbq_update)
622                          */
623 
624                         idle -= L2T(&q->link, len);
625                         idle += L2T(cl, len);
626 
627                         cl->undertime = now + idle;
628                 } else {
629                         /* Underlimit */
630 
631                         cl->undertime = PSCHED_PASTPERFECT;
632                         if (avgidle > cl->maxidle)
633                                 cl->avgidle = cl->maxidle;
634                         else
635                                 cl->avgidle = avgidle;
636                 }
637                 if ((s64)(now - cl->last) > 0)
638                         cl->last = now;
639         }
640 
641         cbq_update_toplevel(q, this, q->tx_borrowed);
642 }
643 
644 static inline struct cbq_class *
645 cbq_under_limit(struct cbq_class *cl)
646 {
647         struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
648         struct cbq_class *this_cl = cl;
649 
650         if (cl->tparent == NULL)
651                 return cl;
652 
653         if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
654                 cl->delayed = 0;
655                 return cl;
656         }
657 
658         do {
659                 /* It is very suspicious place. Now overlimit
660                  * action is generated for not bounded classes
661                  * only if link is completely congested.
662                  * Though it is in agree with ancestor-only paradigm,
663                  * it looks very stupid. Particularly,
664                  * it means that this chunk of code will either
665                  * never be called or result in strong amplification
666                  * of burstiness. Dangerous, silly, and, however,
667                  * no another solution exists.
668                  */
669                 cl = cl->borrow;
670                 if (!cl) {
671                         this_cl->qstats.overlimits++;
672                         cbq_overlimit(this_cl);
673                         return NULL;
674                 }
675                 if (cl->level > q->toplevel)
676                         return NULL;
677         } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
678 
679         cl->delayed = 0;
680         return cl;
681 }
682 
683 static inline struct sk_buff *
684 cbq_dequeue_prio(struct Qdisc *sch, int prio)
685 {
686         struct cbq_sched_data *q = qdisc_priv(sch);
687         struct cbq_class *cl_tail, *cl_prev, *cl;
688         struct sk_buff *skb;
689         int deficit;
690 
691         cl_tail = cl_prev = q->active[prio];
692         cl = cl_prev->next_alive;
693 
694         do {
695                 deficit = 0;
696 
697                 /* Start round */
698                 do {
699                         struct cbq_class *borrow = cl;
700 
701                         if (cl->q->q.qlen &&
702                             (borrow = cbq_under_limit(cl)) == NULL)
703                                 goto skip_class;
704 
705                         if (cl->deficit <= 0) {
706                                 /* Class exhausted its allotment per
707                                  * this round. Switch to the next one.
708                                  */
709                                 deficit = 1;
710                                 cl->deficit += cl->quantum;
711                                 goto next_class;
712                         }
713 
714                         skb = cl->q->dequeue(cl->q);
715 
716                         /* Class did not give us any skb :-(
717                          * It could occur even if cl->q->q.qlen != 0
718                          * f.e. if cl->q == "tbf"
719                          */
720                         if (skb == NULL)
721                                 goto skip_class;
722 
723                         cl->deficit -= qdisc_pkt_len(skb);
724                         q->tx_class = cl;
725                         q->tx_borrowed = borrow;
726                         if (borrow != cl) {
727 #ifndef CBQ_XSTATS_BORROWS_BYTES
728                                 borrow->xstats.borrows++;
729                                 cl->xstats.borrows++;
730 #else
731                                 borrow->xstats.borrows += qdisc_pkt_len(skb);
732                                 cl->xstats.borrows += qdisc_pkt_len(skb);
733 #endif
734                         }
735                         q->tx_len = qdisc_pkt_len(skb);
736 
737                         if (cl->deficit <= 0) {
738                                 q->active[prio] = cl;
739                                 cl = cl->next_alive;
740                                 cl->deficit += cl->quantum;
741                         }
742                         return skb;
743 
744 skip_class:
745                         if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
746                                 /* Class is empty or penalized.
747                                  * Unlink it from active chain.
748                                  */
749                                 cl_prev->next_alive = cl->next_alive;
750                                 cl->next_alive = NULL;
751 
752                                 /* Did cl_tail point to it? */
753                                 if (cl == cl_tail) {
754                                         /* Repair it! */
755                                         cl_tail = cl_prev;
756 
757                                         /* Was it the last class in this band? */
758                                         if (cl == cl_tail) {
759                                                 /* Kill the band! */
760                                                 q->active[prio] = NULL;
761                                                 q->activemask &= ~(1<<prio);
762                                                 if (cl->q->q.qlen)
763                                                         cbq_activate_class(cl);
764                                                 return NULL;
765                                         }
766 
767                                         q->active[prio] = cl_tail;
768                                 }
769                                 if (cl->q->q.qlen)
770                                         cbq_activate_class(cl);
771 
772                                 cl = cl_prev;
773                         }
774 
775 next_class:
776                         cl_prev = cl;
777                         cl = cl->next_alive;
778                 } while (cl_prev != cl_tail);
779         } while (deficit);
780 
781         q->active[prio] = cl_prev;
782 
783         return NULL;
784 }
785 
786 static inline struct sk_buff *
787 cbq_dequeue_1(struct Qdisc *sch)
788 {
789         struct cbq_sched_data *q = qdisc_priv(sch);
790         struct sk_buff *skb;
791         unsigned int activemask;
792 
793         activemask = q->activemask & 0xFF;
794         while (activemask) {
795                 int prio = ffz(~activemask);
796                 activemask &= ~(1<<prio);
797                 skb = cbq_dequeue_prio(sch, prio);
798                 if (skb)
799                         return skb;
800         }
801         return NULL;
802 }
803 
804 static struct sk_buff *
805 cbq_dequeue(struct Qdisc *sch)
806 {
807         struct sk_buff *skb;
808         struct cbq_sched_data *q = qdisc_priv(sch);
809         psched_time_t now;
810 
811         now = psched_get_time();
812 
813         if (q->tx_class)
814                 cbq_update(q);
815 
816         q->now = now;
817 
818         for (;;) {
819                 q->wd_expires = 0;
820 
821                 skb = cbq_dequeue_1(sch);
822                 if (skb) {
823                         qdisc_bstats_update(sch, skb);
824                         sch->q.qlen--;
825                         return skb;
826                 }
827 
828                 /* All the classes are overlimit.
829                  *
830                  * It is possible, if:
831                  *
832                  * 1. Scheduler is empty.
833                  * 2. Toplevel cutoff inhibited borrowing.
834                  * 3. Root class is overlimit.
835                  *
836                  * Reset 2d and 3d conditions and retry.
837                  *
838                  * Note, that NS and cbq-2.0 are buggy, peeking
839                  * an arbitrary class is appropriate for ancestor-only
840                  * sharing, but not for toplevel algorithm.
841                  *
842                  * Our version is better, but slower, because it requires
843                  * two passes, but it is unavoidable with top-level sharing.
844                  */
845 
846                 if (q->toplevel == TC_CBQ_MAXLEVEL &&
847                     q->link.undertime == PSCHED_PASTPERFECT)
848                         break;
849 
850                 q->toplevel = TC_CBQ_MAXLEVEL;
851                 q->link.undertime = PSCHED_PASTPERFECT;
852         }
853 
854         /* No packets in scheduler or nobody wants to give them to us :-(
855          * Sigh... start watchdog timer in the last case.
856          */
857 
858         if (sch->q.qlen) {
859                 qdisc_qstats_overlimit(sch);
860                 if (q->wd_expires)
861                         qdisc_watchdog_schedule(&q->watchdog,
862                                                 now + q->wd_expires);
863         }
864         return NULL;
865 }
866 
867 /* CBQ class maintanance routines */
868 
869 static void cbq_adjust_levels(struct cbq_class *this)
870 {
871         if (this == NULL)
872                 return;
873 
874         do {
875                 int level = 0;
876                 struct cbq_class *cl;
877 
878                 cl = this->children;
879                 if (cl) {
880                         do {
881                                 if (cl->level > level)
882                                         level = cl->level;
883                         } while ((cl = cl->sibling) != this->children);
884                 }
885                 this->level = level + 1;
886         } while ((this = this->tparent) != NULL);
887 }
888 
889 static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
890 {
891         struct cbq_class *cl;
892         unsigned int h;
893 
894         if (q->quanta[prio] == 0)
895                 return;
896 
897         for (h = 0; h < q->clhash.hashsize; h++) {
898                 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
899                         /* BUGGGG... Beware! This expression suffer of
900                          * arithmetic overflows!
901                          */
902                         if (cl->priority == prio) {
903                                 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
904                                         q->quanta[prio];
905                         }
906                         if (cl->quantum <= 0 ||
907                             cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
908                                 pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
909                                         cl->common.classid, cl->quantum);
910                                 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
911                         }
912                 }
913         }
914 }
915 
916 static void cbq_sync_defmap(struct cbq_class *cl)
917 {
918         struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
919         struct cbq_class *split = cl->split;
920         unsigned int h;
921         int i;
922 
923         if (split == NULL)
924                 return;
925 
926         for (i = 0; i <= TC_PRIO_MAX; i++) {
927                 if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
928                         split->defaults[i] = NULL;
929         }
930 
931         for (i = 0; i <= TC_PRIO_MAX; i++) {
932                 int level = split->level;
933 
934                 if (split->defaults[i])
935                         continue;
936 
937                 for (h = 0; h < q->clhash.hashsize; h++) {
938                         struct cbq_class *c;
939 
940                         hlist_for_each_entry(c, &q->clhash.hash[h],
941                                              common.hnode) {
942                                 if (c->split == split && c->level < level &&
943                                     c->defmap & (1<<i)) {
944                                         split->defaults[i] = c;
945                                         level = c->level;
946                                 }
947                         }
948                 }
949         }
950 }
951 
952 static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
953 {
954         struct cbq_class *split = NULL;
955 
956         if (splitid == 0) {
957                 split = cl->split;
958                 if (!split)
959                         return;
960                 splitid = split->common.classid;
961         }
962 
963         if (split == NULL || split->common.classid != splitid) {
964                 for (split = cl->tparent; split; split = split->tparent)
965                         if (split->common.classid == splitid)
966                                 break;
967         }
968 
969         if (split == NULL)
970                 return;
971 
972         if (cl->split != split) {
973                 cl->defmap = 0;
974                 cbq_sync_defmap(cl);
975                 cl->split = split;
976                 cl->defmap = def & mask;
977         } else
978                 cl->defmap = (cl->defmap & ~mask) | (def & mask);
979 
980         cbq_sync_defmap(cl);
981 }
982 
983 static void cbq_unlink_class(struct cbq_class *this)
984 {
985         struct cbq_class *cl, **clp;
986         struct cbq_sched_data *q = qdisc_priv(this->qdisc);
987 
988         qdisc_class_hash_remove(&q->clhash, &this->common);
989 
990         if (this->tparent) {
991                 clp = &this->sibling;
992                 cl = *clp;
993                 do {
994                         if (cl == this) {
995                                 *clp = cl->sibling;
996                                 break;
997                         }
998                         clp = &cl->sibling;
999                 } while ((cl = *clp) != this->sibling);
1000 
1001                 if (this->tparent->children == this) {
1002                         this->tparent->children = this->sibling;
1003                         if (this->sibling == this)
1004                                 this->tparent->children = NULL;
1005                 }
1006         } else {
1007                 WARN_ON(this->sibling != this);
1008         }
1009 }
1010 
1011 static void cbq_link_class(struct cbq_class *this)
1012 {
1013         struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1014         struct cbq_class *parent = this->tparent;
1015 
1016         this->sibling = this;
1017         qdisc_class_hash_insert(&q->clhash, &this->common);
1018 
1019         if (parent == NULL)
1020                 return;
1021 
1022         if (parent->children == NULL) {
1023                 parent->children = this;
1024         } else {
1025                 this->sibling = parent->children->sibling;
1026                 parent->children->sibling = this;
1027         }
1028 }
1029 
1030 static void
1031 cbq_reset(struct Qdisc *sch)
1032 {
1033         struct cbq_sched_data *q = qdisc_priv(sch);
1034         struct cbq_class *cl;
1035         int prio;
1036         unsigned int h;
1037 
1038         q->activemask = 0;
1039         q->pmask = 0;
1040         q->tx_class = NULL;
1041         q->tx_borrowed = NULL;
1042         qdisc_watchdog_cancel(&q->watchdog);
1043         hrtimer_cancel(&q->delay_timer);
1044         q->toplevel = TC_CBQ_MAXLEVEL;
1045         q->now = psched_get_time();
1046 
1047         for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1048                 q->active[prio] = NULL;
1049 
1050         for (h = 0; h < q->clhash.hashsize; h++) {
1051                 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1052                         qdisc_reset(cl->q);
1053 
1054                         cl->next_alive = NULL;
1055                         cl->undertime = PSCHED_PASTPERFECT;
1056                         cl->avgidle = cl->maxidle;
1057                         cl->deficit = cl->quantum;
1058                         cl->cpriority = cl->priority;
1059                 }
1060         }
1061         sch->q.qlen = 0;
1062 }
1063 
1064 
1065 static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1066 {
1067         if (lss->change & TCF_CBQ_LSS_FLAGS) {
1068                 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1069                 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
1070         }
1071         if (lss->change & TCF_CBQ_LSS_EWMA)
1072                 cl->ewma_log = lss->ewma_log;
1073         if (lss->change & TCF_CBQ_LSS_AVPKT)
1074                 cl->avpkt = lss->avpkt;
1075         if (lss->change & TCF_CBQ_LSS_MINIDLE)
1076                 cl->minidle = -(long)lss->minidle;
1077         if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
1078                 cl->maxidle = lss->maxidle;
1079                 cl->avgidle = lss->maxidle;
1080         }
1081         if (lss->change & TCF_CBQ_LSS_OFFTIME)
1082                 cl->offtime = lss->offtime;
1083         return 0;
1084 }
1085 
1086 static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1087 {
1088         q->nclasses[cl->priority]--;
1089         q->quanta[cl->priority] -= cl->weight;
1090         cbq_normalize_quanta(q, cl->priority);
1091 }
1092 
1093 static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1094 {
1095         q->nclasses[cl->priority]++;
1096         q->quanta[cl->priority] += cl->weight;
1097         cbq_normalize_quanta(q, cl->priority);
1098 }
1099 
1100 static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1101 {
1102         struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1103 
1104         if (wrr->allot)
1105                 cl->allot = wrr->allot;
1106         if (wrr->weight)
1107                 cl->weight = wrr->weight;
1108         if (wrr->priority) {
1109                 cl->priority = wrr->priority - 1;
1110                 cl->cpriority = cl->priority;
1111                 if (cl->priority >= cl->priority2)
1112                         cl->priority2 = TC_CBQ_MAXPRIO - 1;
1113         }
1114 
1115         cbq_addprio(q, cl);
1116         return 0;
1117 }
1118 
1119 static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1120 {
1121         cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1122         return 0;
1123 }
1124 
1125 static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1126         [TCA_CBQ_LSSOPT]        = { .len = sizeof(struct tc_cbq_lssopt) },
1127         [TCA_CBQ_WRROPT]        = { .len = sizeof(struct tc_cbq_wrropt) },
1128         [TCA_CBQ_FOPT]          = { .len = sizeof(struct tc_cbq_fopt) },
1129         [TCA_CBQ_OVL_STRATEGY]  = { .len = sizeof(struct tc_cbq_ovl) },
1130         [TCA_CBQ_RATE]          = { .len = sizeof(struct tc_ratespec) },
1131         [TCA_CBQ_RTAB]          = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1132         [TCA_CBQ_POLICE]        = { .len = sizeof(struct tc_cbq_police) },
1133 };
1134 
1135 static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1136 {
1137         struct cbq_sched_data *q = qdisc_priv(sch);
1138         struct nlattr *tb[TCA_CBQ_MAX + 1];
1139         struct tc_ratespec *r;
1140         int err;
1141 
1142         qdisc_watchdog_init(&q->watchdog, sch);
1143         hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1144         q->delay_timer.function = cbq_undelay;
1145 
1146         if (!opt)
1147                 return -EINVAL;
1148 
1149         err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
1150         if (err < 0)
1151                 return err;
1152 
1153         if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL)
1154                 return -EINVAL;
1155 
1156         r = nla_data(tb[TCA_CBQ_RATE]);
1157 
1158         if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
1159                 return -EINVAL;
1160 
1161         err = tcf_block_get(&q->link.block, &q->link.filter_list, sch);
1162         if (err)
1163                 goto put_rtab;
1164 
1165         err = qdisc_class_hash_init(&q->clhash);
1166         if (err < 0)
1167                 goto put_block;
1168 
1169         q->link.sibling = &q->link;
1170         q->link.common.classid = sch->handle;
1171         q->link.qdisc = sch;
1172         q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1173                                       sch->handle);
1174         if (!q->link.q)
1175                 q->link.q = &noop_qdisc;
1176         else
1177                 qdisc_hash_add(q->link.q, true);
1178 
1179         q->link.priority = TC_CBQ_MAXPRIO - 1;
1180         q->link.priority2 = TC_CBQ_MAXPRIO - 1;
1181         q->link.cpriority = TC_CBQ_MAXPRIO - 1;
1182         q->link.allot = psched_mtu(qdisc_dev(sch));
1183         q->link.quantum = q->link.allot;
1184         q->link.weight = q->link.R_tab->rate.rate;
1185 
1186         q->link.ewma_log = TC_CBQ_DEF_EWMA;
1187         q->link.avpkt = q->link.allot/2;
1188         q->link.minidle = -0x7FFFFFFF;
1189 
1190         q->toplevel = TC_CBQ_MAXLEVEL;
1191         q->now = psched_get_time();
1192 
1193         cbq_link_class(&q->link);
1194 
1195         if (tb[TCA_CBQ_LSSOPT])
1196                 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
1197 
1198         cbq_addprio(q, &q->link);
1199         return 0;
1200 
1201 put_block:
1202         tcf_block_put(q->link.block);
1203 
1204 put_rtab:
1205         qdisc_put_rtab(q->link.R_tab);
1206         return err;
1207 }
1208 
1209 static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
1210 {
1211         unsigned char *b = skb_tail_pointer(skb);
1212 
1213         if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1214                 goto nla_put_failure;
1215         return skb->len;
1216 
1217 nla_put_failure:
1218         nlmsg_trim(skb, b);
1219         return -1;
1220 }
1221 
1222 static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
1223 {
1224         unsigned char *b = skb_tail_pointer(skb);
1225         struct tc_cbq_lssopt opt;
1226 
1227         opt.flags = 0;
1228         if (cl->borrow == NULL)
1229                 opt.flags |= TCF_CBQ_LSS_BOUNDED;
1230         if (cl->share == NULL)
1231                 opt.flags |= TCF_CBQ_LSS_ISOLATED;
1232         opt.ewma_log = cl->ewma_log;
1233         opt.level = cl->level;
1234         opt.avpkt = cl->avpkt;
1235         opt.maxidle = cl->maxidle;
1236         opt.minidle = (u32)(-cl->minidle);
1237         opt.offtime = cl->offtime;
1238         opt.change = ~0;
1239         if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
1240                 goto nla_put_failure;
1241         return skb->len;
1242 
1243 nla_put_failure:
1244         nlmsg_trim(skb, b);
1245         return -1;
1246 }
1247 
1248 static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
1249 {
1250         unsigned char *b = skb_tail_pointer(skb);
1251         struct tc_cbq_wrropt opt;
1252 
1253         memset(&opt, 0, sizeof(opt));
1254         opt.flags = 0;
1255         opt.allot = cl->allot;
1256         opt.priority = cl->priority + 1;
1257         opt.cpriority = cl->cpriority + 1;
1258         opt.weight = cl->weight;
1259         if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
1260                 goto nla_put_failure;
1261         return skb->len;
1262 
1263 nla_put_failure:
1264         nlmsg_trim(skb, b);
1265         return -1;
1266 }
1267 
1268 static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1269 {
1270         unsigned char *b = skb_tail_pointer(skb);
1271         struct tc_cbq_fopt opt;
1272 
1273         if (cl->split || cl->defmap) {
1274                 opt.split = cl->split ? cl->split->common.classid : 0;
1275                 opt.defmap = cl->defmap;
1276                 opt.defchange = ~0;
1277                 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
1278                         goto nla_put_failure;
1279         }
1280         return skb->len;
1281 
1282 nla_put_failure:
1283         nlmsg_trim(skb, b);
1284         return -1;
1285 }
1286 
1287 static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1288 {
1289         if (cbq_dump_lss(skb, cl) < 0 ||
1290             cbq_dump_rate(skb, cl) < 0 ||
1291             cbq_dump_wrr(skb, cl) < 0 ||
1292             cbq_dump_fopt(skb, cl) < 0)
1293                 return -1;
1294         return 0;
1295 }
1296 
1297 static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
1298 {
1299         struct cbq_sched_data *q = qdisc_priv(sch);
1300         struct nlattr *nest;
1301 
1302         nest = nla_nest_start(skb, TCA_OPTIONS);
1303         if (nest == NULL)
1304                 goto nla_put_failure;
1305         if (cbq_dump_attr(skb, &q->link) < 0)
1306                 goto nla_put_failure;
1307         return nla_nest_end(skb, nest);
1308 
1309 nla_put_failure:
1310         nla_nest_cancel(skb, nest);
1311         return -1;
1312 }
1313 
1314 static int
1315 cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1316 {
1317         struct cbq_sched_data *q = qdisc_priv(sch);
1318 
1319         q->link.xstats.avgidle = q->link.avgidle;
1320         return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
1321 }
1322 
1323 static int
1324 cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1325                struct sk_buff *skb, struct tcmsg *tcm)
1326 {
1327         struct cbq_class *cl = (struct cbq_class *)arg;
1328         struct nlattr *nest;
1329 
1330         if (cl->tparent)
1331                 tcm->tcm_parent = cl->tparent->common.classid;
1332         else
1333                 tcm->tcm_parent = TC_H_ROOT;
1334         tcm->tcm_handle = cl->common.classid;
1335         tcm->tcm_info = cl->q->handle;
1336 
1337         nest = nla_nest_start(skb, TCA_OPTIONS);
1338         if (nest == NULL)
1339                 goto nla_put_failure;
1340         if (cbq_dump_attr(skb, cl) < 0)
1341                 goto nla_put_failure;
1342         return nla_nest_end(skb, nest);
1343 
1344 nla_put_failure:
1345         nla_nest_cancel(skb, nest);
1346         return -1;
1347 }
1348 
1349 static int
1350 cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1351         struct gnet_dump *d)
1352 {
1353         struct cbq_sched_data *q = qdisc_priv(sch);
1354         struct cbq_class *cl = (struct cbq_class *)arg;
1355 
1356         cl->xstats.avgidle = cl->avgidle;
1357         cl->xstats.undertime = 0;
1358 
1359         if (cl->undertime != PSCHED_PASTPERFECT)
1360                 cl->xstats.undertime = cl->undertime - q->now;
1361 
1362         if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1363                                   d, NULL, &cl->bstats) < 0 ||
1364             gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1365             gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
1366                 return -1;
1367 
1368         return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1369 }
1370 
1371 static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1372                      struct Qdisc **old)
1373 {
1374         struct cbq_class *cl = (struct cbq_class *)arg;
1375 
1376         if (new == NULL) {
1377                 new = qdisc_create_dflt(sch->dev_queue,
1378                                         &pfifo_qdisc_ops, cl->common.classid);
1379                 if (new == NULL)
1380                         return -ENOBUFS;
1381         }
1382 
1383         *old = qdisc_replace(sch, new, &cl->q);
1384         return 0;
1385 }
1386 
1387 static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
1388 {
1389         struct cbq_class *cl = (struct cbq_class *)arg;
1390 
1391         return cl->q;
1392 }
1393 
1394 static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1395 {
1396         struct cbq_class *cl = (struct cbq_class *)arg;
1397 
1398         cbq_deactivate_class(cl);
1399 }
1400 
1401 static unsigned long cbq_find(struct Qdisc *sch, u32 classid)
1402 {
1403         struct cbq_sched_data *q = qdisc_priv(sch);
1404 
1405         return (unsigned long)cbq_class_lookup(q, classid);
1406 }
1407 
1408 static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1409 {
1410         struct cbq_sched_data *q = qdisc_priv(sch);
1411 
1412         WARN_ON(cl->filters);
1413 
1414         tcf_block_put(cl->block);
1415         qdisc_destroy(cl->q);
1416         qdisc_put_rtab(cl->R_tab);
1417         gen_kill_estimator(&cl->rate_est);
1418         if (cl != &q->link)
1419                 kfree(cl);
1420 }
1421 
1422 static void cbq_destroy(struct Qdisc *sch)
1423 {
1424         struct cbq_sched_data *q = qdisc_priv(sch);
1425         struct hlist_node *next;
1426         struct cbq_class *cl;
1427         unsigned int h;
1428 
1429 #ifdef CONFIG_NET_CLS_ACT
1430         q->rx_class = NULL;
1431 #endif
1432         /*
1433          * Filters must be destroyed first because we don't destroy the
1434          * classes from root to leafs which means that filters can still
1435          * be bound to classes which have been destroyed already. --TGR '04
1436          */
1437         for (h = 0; h < q->clhash.hashsize; h++) {
1438                 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1439                         tcf_block_put(cl->block);
1440                         cl->block = NULL;
1441                 }
1442         }
1443         for (h = 0; h < q->clhash.hashsize; h++) {
1444                 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
1445                                           common.hnode)
1446                         cbq_destroy_class(sch, cl);
1447         }
1448         qdisc_class_hash_destroy(&q->clhash);
1449 }
1450 
1451 static int
1452 cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
1453                  unsigned long *arg)
1454 {
1455         int err;
1456         struct cbq_sched_data *q = qdisc_priv(sch);
1457         struct cbq_class *cl = (struct cbq_class *)*arg;
1458         struct nlattr *opt = tca[TCA_OPTIONS];
1459         struct nlattr *tb[TCA_CBQ_MAX + 1];
1460         struct cbq_class *parent;
1461         struct qdisc_rate_table *rtab = NULL;
1462 
1463         if (opt == NULL)
1464                 return -EINVAL;
1465 
1466         err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
1467         if (err < 0)
1468                 return err;
1469 
1470         if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE])
1471                 return -EOPNOTSUPP;
1472 
1473         if (cl) {
1474                 /* Check parent */
1475                 if (parentid) {
1476                         if (cl->tparent &&
1477                             cl->tparent->common.classid != parentid)
1478                                 return -EINVAL;
1479                         if (!cl->tparent && parentid != TC_H_ROOT)
1480                                 return -EINVAL;
1481                 }
1482 
1483                 if (tb[TCA_CBQ_RATE]) {
1484                         rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
1485                                               tb[TCA_CBQ_RTAB]);
1486                         if (rtab == NULL)
1487                                 return -EINVAL;
1488                 }
1489 
1490                 if (tca[TCA_RATE]) {
1491                         err = gen_replace_estimator(&cl->bstats, NULL,
1492                                                     &cl->rate_est,
1493                                                     NULL,
1494                                                     qdisc_root_sleeping_running(sch),
1495                                                     tca[TCA_RATE]);
1496                         if (err) {
1497                                 qdisc_put_rtab(rtab);
1498                                 return err;
1499                         }
1500                 }
1501 
1502                 /* Change class parameters */
1503                 sch_tree_lock(sch);
1504 
1505                 if (cl->next_alive != NULL)
1506                         cbq_deactivate_class(cl);
1507 
1508                 if (rtab) {
1509                         qdisc_put_rtab(cl->R_tab);
1510                         cl->R_tab = rtab;
1511                 }
1512 
1513                 if (tb[TCA_CBQ_LSSOPT])
1514                         cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1515 
1516                 if (tb[TCA_CBQ_WRROPT]) {
1517                         cbq_rmprio(q, cl);
1518                         cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1519                 }
1520 
1521                 if (tb[TCA_CBQ_FOPT])
1522                         cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1523 
1524                 if (cl->q->q.qlen)
1525                         cbq_activate_class(cl);
1526 
1527                 sch_tree_unlock(sch);
1528 
1529                 return 0;
1530         }
1531 
1532         if (parentid == TC_H_ROOT)
1533                 return -EINVAL;
1534 
1535         if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL ||
1536             tb[TCA_CBQ_LSSOPT] == NULL)
1537                 return -EINVAL;
1538 
1539         rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
1540         if (rtab == NULL)
1541                 return -EINVAL;
1542 
1543         if (classid) {
1544                 err = -EINVAL;
1545                 if (TC_H_MAJ(classid ^ sch->handle) ||
1546                     cbq_class_lookup(q, classid))
1547                         goto failure;
1548         } else {
1549                 int i;
1550                 classid = TC_H_MAKE(sch->handle, 0x8000);
1551 
1552                 for (i = 0; i < 0x8000; i++) {
1553                         if (++q->hgenerator >= 0x8000)
1554                                 q->hgenerator = 1;
1555                         if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
1556                                 break;
1557                 }
1558                 err = -ENOSR;
1559                 if (i >= 0x8000)
1560                         goto failure;
1561                 classid = classid|q->hgenerator;
1562         }
1563 
1564         parent = &q->link;
1565         if (parentid) {
1566                 parent = cbq_class_lookup(q, parentid);
1567                 err = -EINVAL;
1568                 if (parent == NULL)
1569                         goto failure;
1570         }
1571 
1572         err = -ENOBUFS;
1573         cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1574         if (cl == NULL)
1575                 goto failure;
1576 
1577         err = tcf_block_get(&cl->block, &cl->filter_list, sch);
1578         if (err) {
1579                 kfree(cl);
1580                 return err;
1581         }
1582 
1583         if (tca[TCA_RATE]) {
1584                 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
1585                                         NULL,
1586                                         qdisc_root_sleeping_running(sch),
1587                                         tca[TCA_RATE]);
1588                 if (err) {
1589                         tcf_block_put(cl->block);
1590                         kfree(cl);
1591                         goto failure;
1592                 }
1593         }
1594 
1595         cl->R_tab = rtab;
1596         rtab = NULL;
1597         cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
1598         if (!cl->q)
1599                 cl->q = &noop_qdisc;
1600         else
1601                 qdisc_hash_add(cl->q, true);
1602 
1603         cl->common.classid = classid;
1604         cl->tparent = parent;
1605         cl->qdisc = sch;
1606         cl->allot = parent->allot;
1607         cl->quantum = cl->allot;
1608         cl->weight = cl->R_tab->rate.rate;
1609 
1610         sch_tree_lock(sch);
1611         cbq_link_class(cl);
1612         cl->borrow = cl->tparent;
1613         if (cl->tparent != &q->link)
1614                 cl->share = cl->tparent;
1615         cbq_adjust_levels(parent);
1616         cl->minidle = -0x7FFFFFFF;
1617         cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1618         cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1619         if (cl->ewma_log == 0)
1620                 cl->ewma_log = q->link.ewma_log;
1621         if (cl->maxidle == 0)
1622                 cl->maxidle = q->link.maxidle;
1623         if (cl->avpkt == 0)
1624                 cl->avpkt = q->link.avpkt;
1625         if (tb[TCA_CBQ_FOPT])
1626                 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1627         sch_tree_unlock(sch);
1628 
1629         qdisc_class_hash_grow(sch, &q->clhash);
1630 
1631         *arg = (unsigned long)cl;
1632         return 0;
1633 
1634 failure:
1635         qdisc_put_rtab(rtab);
1636         return err;
1637 }
1638 
1639 static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1640 {
1641         struct cbq_sched_data *q = qdisc_priv(sch);
1642         struct cbq_class *cl = (struct cbq_class *)arg;
1643         unsigned int qlen, backlog;
1644 
1645         if (cl->filters || cl->children || cl == &q->link)
1646                 return -EBUSY;
1647 
1648         sch_tree_lock(sch);
1649 
1650         qlen = cl->q->q.qlen;
1651         backlog = cl->q->qstats.backlog;
1652         qdisc_reset(cl->q);
1653         qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
1654 
1655         if (cl->next_alive)
1656                 cbq_deactivate_class(cl);
1657 
1658         if (q->tx_borrowed == cl)
1659                 q->tx_borrowed = q->tx_class;
1660         if (q->tx_class == cl) {
1661                 q->tx_class = NULL;
1662                 q->tx_borrowed = NULL;
1663         }
1664 #ifdef CONFIG_NET_CLS_ACT
1665         if (q->rx_class == cl)
1666                 q->rx_class = NULL;
1667 #endif
1668 
1669         cbq_unlink_class(cl);
1670         cbq_adjust_levels(cl->tparent);
1671         cl->defmap = 0;
1672         cbq_sync_defmap(cl);
1673 
1674         cbq_rmprio(q, cl);
1675         sch_tree_unlock(sch);
1676 
1677         cbq_destroy_class(sch, cl);
1678         return 0;
1679 }
1680 
1681 static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg)
1682 {
1683         struct cbq_sched_data *q = qdisc_priv(sch);
1684         struct cbq_class *cl = (struct cbq_class *)arg;
1685 
1686         if (cl == NULL)
1687                 cl = &q->link;
1688 
1689         return cl->block;
1690 }
1691 
1692 static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1693                                      u32 classid)
1694 {
1695         struct cbq_sched_data *q = qdisc_priv(sch);
1696         struct cbq_class *p = (struct cbq_class *)parent;
1697         struct cbq_class *cl = cbq_class_lookup(q, classid);
1698 
1699         if (cl) {
1700                 if (p && p->level <= cl->level)
1701                         return 0;
1702                 cl->filters++;
1703                 return (unsigned long)cl;
1704         }
1705         return 0;
1706 }
1707 
1708 static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
1709 {
1710         struct cbq_class *cl = (struct cbq_class *)arg;
1711 
1712         cl->filters--;
1713 }
1714 
1715 static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1716 {
1717         struct cbq_sched_data *q = qdisc_priv(sch);
1718         struct cbq_class *cl;
1719         unsigned int h;
1720 
1721         if (arg->stop)
1722                 return;
1723 
1724         for (h = 0; h < q->clhash.hashsize; h++) {
1725                 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1726                         if (arg->count < arg->skip) {
1727                                 arg->count++;
1728                                 continue;
1729                         }
1730                         if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1731                                 arg->stop = 1;
1732                                 return;
1733                         }
1734                         arg->count++;
1735                 }
1736         }
1737 }
1738 
1739 static const struct Qdisc_class_ops cbq_class_ops = {
1740         .graft          =       cbq_graft,
1741         .leaf           =       cbq_leaf,
1742         .qlen_notify    =       cbq_qlen_notify,
1743         .find           =       cbq_find,
1744         .change         =       cbq_change_class,
1745         .delete         =       cbq_delete,
1746         .walk           =       cbq_walk,
1747         .tcf_block      =       cbq_tcf_block,
1748         .bind_tcf       =       cbq_bind_filter,
1749         .unbind_tcf     =       cbq_unbind_filter,
1750         .dump           =       cbq_dump_class,
1751         .dump_stats     =       cbq_dump_class_stats,
1752 };
1753 
1754 static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
1755         .next           =       NULL,
1756         .cl_ops         =       &cbq_class_ops,
1757         .id             =       "cbq",
1758         .priv_size      =       sizeof(struct cbq_sched_data),
1759         .enqueue        =       cbq_enqueue,
1760         .dequeue        =       cbq_dequeue,
1761         .peek           =       qdisc_peek_dequeued,
1762         .init           =       cbq_init,
1763         .reset          =       cbq_reset,
1764         .destroy        =       cbq_destroy,
1765         .change         =       NULL,
1766         .dump           =       cbq_dump,
1767         .dump_stats     =       cbq_dump_stats,
1768         .owner          =       THIS_MODULE,
1769 };
1770 
1771 static int __init cbq_module_init(void)
1772 {
1773         return register_qdisc(&cbq_qdisc_ops);
1774 }
1775 static void __exit cbq_module_exit(void)
1776 {
1777         unregister_qdisc(&cbq_qdisc_ops);
1778 }
1779 module_init(cbq_module_init)
1780 module_exit(cbq_module_exit)
1781 MODULE_LICENSE("GPL");
1782 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp