1 /* 2 * net/sched/sch_cbq.c Class-Based Queueing discipline. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 */ 12 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/string.h> 18 #include <linux/errno.h> 19 #include <linux/skbuff.h> 20 #include <net/netlink.h> 21 #include <net/pkt_sched.h> 22 #include <net/pkt_cls.h> 23 24 25 /* Class-Based Queueing (CBQ) algorithm. 26 ======================================= 27 28 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource 29 Management Models for Packet Networks", 30 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995 31 32 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995 33 34 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting 35 Parameters", 1996 36 37 [4] Sally Floyd and Michael Speer, "Experimental Results 38 for Class-Based Queueing", 1998, not published. 39 40 ----------------------------------------------------------------------- 41 42 Algorithm skeleton was taken from NS simulator cbq.cc. 43 If someone wants to check this code against the LBL version, 44 he should take into account that ONLY the skeleton was borrowed, 45 the implementation is different. Particularly: 46 47 --- The WRR algorithm is different. Our version looks more 48 reasonable (I hope) and works when quanta are allowed to be 49 less than MTU, which is always the case when real time classes 50 have small rates. Note, that the statement of [3] is 51 incomplete, delay may actually be estimated even if class 52 per-round allotment is less than MTU. Namely, if per-round 53 allotment is W*r_i, and r_1+...+r_k = r < 1 54 55 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B 56 57 In the worst case we have IntServ estimate with D = W*r+k*MTU 58 and C = MTU*r. The proof (if correct at all) is trivial. 59 60 61 --- It seems that cbq-2.0 is not very accurate. At least, I cannot 62 interpret some places, which look like wrong translations 63 from NS. Anyone is advised to find these differences 64 and explain to me, why I am wrong 8). 65 66 --- Linux has no EOI event, so that we cannot estimate true class 67 idle time. Workaround is to consider the next dequeue event 68 as sign that previous packet is finished. This is wrong because of 69 internal device queueing, but on a permanently loaded link it is true. 70 Moreover, combined with clock integrator, this scheme looks 71 very close to an ideal solution. */ 72 73 struct cbq_sched_data; 74 75 76 struct cbq_class { 77 struct Qdisc_class_common common; 78 struct cbq_class *next_alive; /* next class with backlog in this priority band */ 79 80 /* Parameters */ 81 unsigned char priority; /* class priority */ 82 unsigned char priority2; /* priority to be used after overlimit */ 83 unsigned char ewma_log; /* time constant for idle time calculation */ 84 85 u32 defmap; 86 87 /* Link-sharing scheduler parameters */ 88 long maxidle; /* Class parameters: see below. */ 89 long offtime; 90 long minidle; 91 u32 avpkt; 92 struct qdisc_rate_table *R_tab; 93 94 /* General scheduler (WRR) parameters */ 95 long allot; 96 long quantum; /* Allotment per WRR round */ 97 long weight; /* Relative allotment: see below */ 98 99 struct Qdisc *qdisc; /* Ptr to CBQ discipline */ 100 struct cbq_class *split; /* Ptr to split node */ 101 struct cbq_class *share; /* Ptr to LS parent in the class tree */ 102 struct cbq_class *tparent; /* Ptr to tree parent in the class tree */ 103 struct cbq_class *borrow; /* NULL if class is bandwidth limited; 104 parent otherwise */ 105 struct cbq_class *sibling; /* Sibling chain */ 106 struct cbq_class *children; /* Pointer to children chain */ 107 108 struct Qdisc *q; /* Elementary queueing discipline */ 109 110 111 /* Variables */ 112 unsigned char cpriority; /* Effective priority */ 113 unsigned char delayed; 114 unsigned char level; /* level of the class in hierarchy: 115 0 for leaf classes, and maximal 116 level of children + 1 for nodes. 117 */ 118 119 psched_time_t last; /* Last end of service */ 120 psched_time_t undertime; 121 long avgidle; 122 long deficit; /* Saved deficit for WRR */ 123 psched_time_t penalized; 124 struct gnet_stats_basic_packed bstats; 125 struct gnet_stats_queue qstats; 126 struct net_rate_estimator __rcu *rate_est; 127 struct tc_cbq_xstats xstats; 128 129 struct tcf_proto __rcu *filter_list; 130 struct tcf_block *block; 131 132 int refcnt; 133 int filters; 134 135 struct cbq_class *defaults[TC_PRIO_MAX + 1]; 136 }; 137 138 struct cbq_sched_data { 139 struct Qdisc_class_hash clhash; /* Hash table of all classes */ 140 int nclasses[TC_CBQ_MAXPRIO + 1]; 141 unsigned int quanta[TC_CBQ_MAXPRIO + 1]; 142 143 struct cbq_class link; 144 145 unsigned int activemask; 146 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes 147 with backlog */ 148 149 #ifdef CONFIG_NET_CLS_ACT 150 struct cbq_class *rx_class; 151 #endif 152 struct cbq_class *tx_class; 153 struct cbq_class *tx_borrowed; 154 int tx_len; 155 psched_time_t now; /* Cached timestamp */ 156 unsigned int pmask; 157 158 struct hrtimer delay_timer; 159 struct qdisc_watchdog watchdog; /* Watchdog timer, 160 started when CBQ has 161 backlog, but cannot 162 transmit just now */ 163 psched_tdiff_t wd_expires; 164 int toplevel; 165 u32 hgenerator; 166 }; 167 168 169 #define L2T(cl, len) qdisc_l2t((cl)->R_tab, len) 170 171 static inline struct cbq_class * 172 cbq_class_lookup(struct cbq_sched_data *q, u32 classid) 173 { 174 struct Qdisc_class_common *clc; 175 176 clc = qdisc_class_find(&q->clhash, classid); 177 if (clc == NULL) 178 return NULL; 179 return container_of(clc, struct cbq_class, common); 180 } 181 182 #ifdef CONFIG_NET_CLS_ACT 183 184 static struct cbq_class * 185 cbq_reclassify(struct sk_buff *skb, struct cbq_class *this) 186 { 187 struct cbq_class *cl; 188 189 for (cl = this->tparent; cl; cl = cl->tparent) { 190 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT]; 191 192 if (new != NULL && new != this) 193 return new; 194 } 195 return NULL; 196 } 197 198 #endif 199 200 /* Classify packet. The procedure is pretty complicated, but 201 * it allows us to combine link sharing and priority scheduling 202 * transparently. 203 * 204 * Namely, you can put link sharing rules (f.e. route based) at root of CBQ, 205 * so that it resolves to split nodes. Then packets are classified 206 * by logical priority, or a more specific classifier may be attached 207 * to the split node. 208 */ 209 210 static struct cbq_class * 211 cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) 212 { 213 struct cbq_sched_data *q = qdisc_priv(sch); 214 struct cbq_class *head = &q->link; 215 struct cbq_class **defmap; 216 struct cbq_class *cl = NULL; 217 u32 prio = skb->priority; 218 struct tcf_proto *fl; 219 struct tcf_result res; 220 221 /* 222 * Step 1. If skb->priority points to one of our classes, use it. 223 */ 224 if (TC_H_MAJ(prio ^ sch->handle) == 0 && 225 (cl = cbq_class_lookup(q, prio)) != NULL) 226 return cl; 227 228 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 229 for (;;) { 230 int result = 0; 231 defmap = head->defaults; 232 233 fl = rcu_dereference_bh(head->filter_list); 234 /* 235 * Step 2+n. Apply classifier. 236 */ 237 result = tcf_classify(skb, fl, &res, true); 238 if (!fl || result < 0) 239 goto fallback; 240 241 cl = (void *)res.class; 242 if (!cl) { 243 if (TC_H_MAJ(res.classid)) 244 cl = cbq_class_lookup(q, res.classid); 245 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL) 246 cl = defmap[TC_PRIO_BESTEFFORT]; 247 248 if (cl == NULL) 249 goto fallback; 250 } 251 if (cl->level >= head->level) 252 goto fallback; 253 #ifdef CONFIG_NET_CLS_ACT 254 switch (result) { 255 case TC_ACT_QUEUED: 256 case TC_ACT_STOLEN: 257 case TC_ACT_TRAP: 258 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 259 case TC_ACT_SHOT: 260 return NULL; 261 case TC_ACT_RECLASSIFY: 262 return cbq_reclassify(skb, cl); 263 } 264 #endif 265 if (cl->level == 0) 266 return cl; 267 268 /* 269 * Step 3+n. If classifier selected a link sharing class, 270 * apply agency specific classifier. 271 * Repeat this procdure until we hit a leaf node. 272 */ 273 head = cl; 274 } 275 276 fallback: 277 cl = head; 278 279 /* 280 * Step 4. No success... 281 */ 282 if (TC_H_MAJ(prio) == 0 && 283 !(cl = head->defaults[prio & TC_PRIO_MAX]) && 284 !(cl = head->defaults[TC_PRIO_BESTEFFORT])) 285 return head; 286 287 return cl; 288 } 289 290 /* 291 * A packet has just been enqueued on the empty class. 292 * cbq_activate_class adds it to the tail of active class list 293 * of its priority band. 294 */ 295 296 static inline void cbq_activate_class(struct cbq_class *cl) 297 { 298 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); 299 int prio = cl->cpriority; 300 struct cbq_class *cl_tail; 301 302 cl_tail = q->active[prio]; 303 q->active[prio] = cl; 304 305 if (cl_tail != NULL) { 306 cl->next_alive = cl_tail->next_alive; 307 cl_tail->next_alive = cl; 308 } else { 309 cl->next_alive = cl; 310 q->activemask |= (1<<prio); 311 } 312 } 313 314 /* 315 * Unlink class from active chain. 316 * Note that this same procedure is done directly in cbq_dequeue* 317 * during round-robin procedure. 318 */ 319 320 static void cbq_deactivate_class(struct cbq_class *this) 321 { 322 struct cbq_sched_data *q = qdisc_priv(this->qdisc); 323 int prio = this->cpriority; 324 struct cbq_class *cl; 325 struct cbq_class *cl_prev = q->active[prio]; 326 327 do { 328 cl = cl_prev->next_alive; 329 if (cl == this) { 330 cl_prev->next_alive = cl->next_alive; 331 cl->next_alive = NULL; 332 333 if (cl == q->active[prio]) { 334 q->active[prio] = cl_prev; 335 if (cl == q->active[prio]) { 336 q->active[prio] = NULL; 337 q->activemask &= ~(1<<prio); 338 return; 339 } 340 } 341 return; 342 } 343 } while ((cl_prev = cl) != q->active[prio]); 344 } 345 346 static void 347 cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) 348 { 349 int toplevel = q->toplevel; 350 351 if (toplevel > cl->level) { 352 psched_time_t now = psched_get_time(); 353 354 do { 355 if (cl->undertime < now) { 356 q->toplevel = cl->level; 357 return; 358 } 359 } while ((cl = cl->borrow) != NULL && toplevel > cl->level); 360 } 361 } 362 363 static int 364 cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch, 365 struct sk_buff **to_free) 366 { 367 struct cbq_sched_data *q = qdisc_priv(sch); 368 int uninitialized_var(ret); 369 struct cbq_class *cl = cbq_classify(skb, sch, &ret); 370 371 #ifdef CONFIG_NET_CLS_ACT 372 q->rx_class = cl; 373 #endif 374 if (cl == NULL) { 375 if (ret & __NET_XMIT_BYPASS) 376 qdisc_qstats_drop(sch); 377 __qdisc_drop(skb, to_free); 378 return ret; 379 } 380 381 ret = qdisc_enqueue(skb, cl->q, to_free); 382 if (ret == NET_XMIT_SUCCESS) { 383 sch->q.qlen++; 384 cbq_mark_toplevel(q, cl); 385 if (!cl->next_alive) 386 cbq_activate_class(cl); 387 return ret; 388 } 389 390 if (net_xmit_drop_count(ret)) { 391 qdisc_qstats_drop(sch); 392 cbq_mark_toplevel(q, cl); 393 cl->qstats.drops++; 394 } 395 return ret; 396 } 397 398 /* Overlimit action: penalize leaf class by adding offtime */ 399 static void cbq_overlimit(struct cbq_class *cl) 400 { 401 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); 402 psched_tdiff_t delay = cl->undertime - q->now; 403 404 if (!cl->delayed) { 405 delay += cl->offtime; 406 407 /* 408 * Class goes to sleep, so that it will have no 409 * chance to work avgidle. Let's forgive it 8) 410 * 411 * BTW cbq-2.0 has a crap in this 412 * place, apparently they forgot to shift it by cl->ewma_log. 413 */ 414 if (cl->avgidle < 0) 415 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); 416 if (cl->avgidle < cl->minidle) 417 cl->avgidle = cl->minidle; 418 if (delay <= 0) 419 delay = 1; 420 cl->undertime = q->now + delay; 421 422 cl->xstats.overactions++; 423 cl->delayed = 1; 424 } 425 if (q->wd_expires == 0 || q->wd_expires > delay) 426 q->wd_expires = delay; 427 428 /* Dirty work! We must schedule wakeups based on 429 * real available rate, rather than leaf rate, 430 * which may be tiny (even zero). 431 */ 432 if (q->toplevel == TC_CBQ_MAXLEVEL) { 433 struct cbq_class *b; 434 psched_tdiff_t base_delay = q->wd_expires; 435 436 for (b = cl->borrow; b; b = b->borrow) { 437 delay = b->undertime - q->now; 438 if (delay < base_delay) { 439 if (delay <= 0) 440 delay = 1; 441 base_delay = delay; 442 } 443 } 444 445 q->wd_expires = base_delay; 446 } 447 } 448 449 static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio, 450 psched_time_t now) 451 { 452 struct cbq_class *cl; 453 struct cbq_class *cl_prev = q->active[prio]; 454 psched_time_t sched = now; 455 456 if (cl_prev == NULL) 457 return 0; 458 459 do { 460 cl = cl_prev->next_alive; 461 if (now - cl->penalized > 0) { 462 cl_prev->next_alive = cl->next_alive; 463 cl->next_alive = NULL; 464 cl->cpriority = cl->priority; 465 cl->delayed = 0; 466 cbq_activate_class(cl); 467 468 if (cl == q->active[prio]) { 469 q->active[prio] = cl_prev; 470 if (cl == q->active[prio]) { 471 q->active[prio] = NULL; 472 return 0; 473 } 474 } 475 476 cl = cl_prev->next_alive; 477 } else if (sched - cl->penalized > 0) 478 sched = cl->penalized; 479 } while ((cl_prev = cl) != q->active[prio]); 480 481 return sched - now; 482 } 483 484 static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) 485 { 486 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data, 487 delay_timer); 488 struct Qdisc *sch = q->watchdog.qdisc; 489 psched_time_t now; 490 psched_tdiff_t delay = 0; 491 unsigned int pmask; 492 493 now = psched_get_time(); 494 495 pmask = q->pmask; 496 q->pmask = 0; 497 498 while (pmask) { 499 int prio = ffz(~pmask); 500 psched_tdiff_t tmp; 501 502 pmask &= ~(1<<prio); 503 504 tmp = cbq_undelay_prio(q, prio, now); 505 if (tmp > 0) { 506 q->pmask |= 1<<prio; 507 if (tmp < delay || delay == 0) 508 delay = tmp; 509 } 510 } 511 512 if (delay) { 513 ktime_t time; 514 515 time = 0; 516 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay)); 517 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED); 518 } 519 520 __netif_schedule(qdisc_root(sch)); 521 return HRTIMER_NORESTART; 522 } 523 524 /* 525 * It is mission critical procedure. 526 * 527 * We "regenerate" toplevel cutoff, if transmitting class 528 * has backlog and it is not regulated. It is not part of 529 * original CBQ description, but looks more reasonable. 530 * Probably, it is wrong. This question needs further investigation. 531 */ 532 533 static inline void 534 cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, 535 struct cbq_class *borrowed) 536 { 537 if (cl && q->toplevel >= borrowed->level) { 538 if (cl->q->q.qlen > 1) { 539 do { 540 if (borrowed->undertime == PSCHED_PASTPERFECT) { 541 q->toplevel = borrowed->level; 542 return; 543 } 544 } while ((borrowed = borrowed->borrow) != NULL); 545 } 546 #if 0 547 /* It is not necessary now. Uncommenting it 548 will save CPU cycles, but decrease fairness. 549 */ 550 q->toplevel = TC_CBQ_MAXLEVEL; 551 #endif 552 } 553 } 554 555 static void 556 cbq_update(struct cbq_sched_data *q) 557 { 558 struct cbq_class *this = q->tx_class; 559 struct cbq_class *cl = this; 560 int len = q->tx_len; 561 psched_time_t now; 562 563 q->tx_class = NULL; 564 /* Time integrator. We calculate EOS time 565 * by adding expected packet transmission time. 566 */ 567 now = q->now + L2T(&q->link, len); 568 569 for ( ; cl; cl = cl->share) { 570 long avgidle = cl->avgidle; 571 long idle; 572 573 cl->bstats.packets++; 574 cl->bstats.bytes += len; 575 576 /* 577 * (now - last) is total time between packet right edges. 578 * (last_pktlen/rate) is "virtual" busy time, so that 579 * 580 * idle = (now - last) - last_pktlen/rate 581 */ 582 583 idle = now - cl->last; 584 if ((unsigned long)idle > 128*1024*1024) { 585 avgidle = cl->maxidle; 586 } else { 587 idle -= L2T(cl, len); 588 589 /* true_avgidle := (1-W)*true_avgidle + W*idle, 590 * where W=2^{-ewma_log}. But cl->avgidle is scaled: 591 * cl->avgidle == true_avgidle/W, 592 * hence: 593 */ 594 avgidle += idle - (avgidle>>cl->ewma_log); 595 } 596 597 if (avgidle <= 0) { 598 /* Overlimit or at-limit */ 599 600 if (avgidle < cl->minidle) 601 avgidle = cl->minidle; 602 603 cl->avgidle = avgidle; 604 605 /* Calculate expected time, when this class 606 * will be allowed to send. 607 * It will occur, when: 608 * (1-W)*true_avgidle + W*delay = 0, i.e. 609 * idle = (1/W - 1)*(-true_avgidle) 610 * or 611 * idle = (1 - W)*(-cl->avgidle); 612 */ 613 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log); 614 615 /* 616 * That is not all. 617 * To maintain the rate allocated to the class, 618 * we add to undertime virtual clock, 619 * necessary to complete transmitted packet. 620 * (len/phys_bandwidth has been already passed 621 * to the moment of cbq_update) 622 */ 623 624 idle -= L2T(&q->link, len); 625 idle += L2T(cl, len); 626 627 cl->undertime = now + idle; 628 } else { 629 /* Underlimit */ 630 631 cl->undertime = PSCHED_PASTPERFECT; 632 if (avgidle > cl->maxidle) 633 cl->avgidle = cl->maxidle; 634 else 635 cl->avgidle = avgidle; 636 } 637 if ((s64)(now - cl->last) > 0) 638 cl->last = now; 639 } 640 641 cbq_update_toplevel(q, this, q->tx_borrowed); 642 } 643 644 static inline struct cbq_class * 645 cbq_under_limit(struct cbq_class *cl) 646 { 647 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); 648 struct cbq_class *this_cl = cl; 649 650 if (cl->tparent == NULL) 651 return cl; 652 653 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) { 654 cl->delayed = 0; 655 return cl; 656 } 657 658 do { 659 /* It is very suspicious place. Now overlimit 660 * action is generated for not bounded classes 661 * only if link is completely congested. 662 * Though it is in agree with ancestor-only paradigm, 663 * it looks very stupid. Particularly, 664 * it means that this chunk of code will either 665 * never be called or result in strong amplification 666 * of burstiness. Dangerous, silly, and, however, 667 * no another solution exists. 668 */ 669 cl = cl->borrow; 670 if (!cl) { 671 this_cl->qstats.overlimits++; 672 cbq_overlimit(this_cl); 673 return NULL; 674 } 675 if (cl->level > q->toplevel) 676 return NULL; 677 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime); 678 679 cl->delayed = 0; 680 return cl; 681 } 682 683 static inline struct sk_buff * 684 cbq_dequeue_prio(struct Qdisc *sch, int prio) 685 { 686 struct cbq_sched_data *q = qdisc_priv(sch); 687 struct cbq_class *cl_tail, *cl_prev, *cl; 688 struct sk_buff *skb; 689 int deficit; 690 691 cl_tail = cl_prev = q->active[prio]; 692 cl = cl_prev->next_alive; 693 694 do { 695 deficit = 0; 696 697 /* Start round */ 698 do { 699 struct cbq_class *borrow = cl; 700 701 if (cl->q->q.qlen && 702 (borrow = cbq_under_limit(cl)) == NULL) 703 goto skip_class; 704 705 if (cl->deficit <= 0) { 706 /* Class exhausted its allotment per 707 * this round. Switch to the next one. 708 */ 709 deficit = 1; 710 cl->deficit += cl->quantum; 711 goto next_class; 712 } 713 714 skb = cl->q->dequeue(cl->q); 715 716 /* Class did not give us any skb :-( 717 * It could occur even if cl->q->q.qlen != 0 718 * f.e. if cl->q == "tbf" 719 */ 720 if (skb == NULL) 721 goto skip_class; 722 723 cl->deficit -= qdisc_pkt_len(skb); 724 q->tx_class = cl; 725 q->tx_borrowed = borrow; 726 if (borrow != cl) { 727 #ifndef CBQ_XSTATS_BORROWS_BYTES 728 borrow->xstats.borrows++; 729 cl->xstats.borrows++; 730 #else 731 borrow->xstats.borrows += qdisc_pkt_len(skb); 732 cl->xstats.borrows += qdisc_pkt_len(skb); 733 #endif 734 } 735 q->tx_len = qdisc_pkt_len(skb); 736 737 if (cl->deficit <= 0) { 738 q->active[prio] = cl; 739 cl = cl->next_alive; 740 cl->deficit += cl->quantum; 741 } 742 return skb; 743 744 skip_class: 745 if (cl->q->q.qlen == 0 || prio != cl->cpriority) { 746 /* Class is empty or penalized. 747 * Unlink it from active chain. 748 */ 749 cl_prev->next_alive = cl->next_alive; 750 cl->next_alive = NULL; 751 752 /* Did cl_tail point to it? */ 753 if (cl == cl_tail) { 754 /* Repair it! */ 755 cl_tail = cl_prev; 756 757 /* Was it the last class in this band? */ 758 if (cl == cl_tail) { 759 /* Kill the band! */ 760 q->active[prio] = NULL; 761 q->activemask &= ~(1<<prio); 762 if (cl->q->q.qlen) 763 cbq_activate_class(cl); 764 return NULL; 765 } 766 767 q->active[prio] = cl_tail; 768 } 769 if (cl->q->q.qlen) 770 cbq_activate_class(cl); 771 772 cl = cl_prev; 773 } 774 775 next_class: 776 cl_prev = cl; 777 cl = cl->next_alive; 778 } while (cl_prev != cl_tail); 779 } while (deficit); 780 781 q->active[prio] = cl_prev; 782 783 return NULL; 784 } 785 786 static inline struct sk_buff * 787 cbq_dequeue_1(struct Qdisc *sch) 788 { 789 struct cbq_sched_data *q = qdisc_priv(sch); 790 struct sk_buff *skb; 791 unsigned int activemask; 792 793 activemask = q->activemask & 0xFF; 794 while (activemask) { 795 int prio = ffz(~activemask); 796 activemask &= ~(1<<prio); 797 skb = cbq_dequeue_prio(sch, prio); 798 if (skb) 799 return skb; 800 } 801 return NULL; 802 } 803 804 static struct sk_buff * 805 cbq_dequeue(struct Qdisc *sch) 806 { 807 struct sk_buff *skb; 808 struct cbq_sched_data *q = qdisc_priv(sch); 809 psched_time_t now; 810 811 now = psched_get_time(); 812 813 if (q->tx_class) 814 cbq_update(q); 815 816 q->now = now; 817 818 for (;;) { 819 q->wd_expires = 0; 820 821 skb = cbq_dequeue_1(sch); 822 if (skb) { 823 qdisc_bstats_update(sch, skb); 824 sch->q.qlen--; 825 return skb; 826 } 827 828 /* All the classes are overlimit. 829 * 830 * It is possible, if: 831 * 832 * 1. Scheduler is empty. 833 * 2. Toplevel cutoff inhibited borrowing. 834 * 3. Root class is overlimit. 835 * 836 * Reset 2d and 3d conditions and retry. 837 * 838 * Note, that NS and cbq-2.0 are buggy, peeking 839 * an arbitrary class is appropriate for ancestor-only 840 * sharing, but not for toplevel algorithm. 841 * 842 * Our version is better, but slower, because it requires 843 * two passes, but it is unavoidable with top-level sharing. 844 */ 845 846 if (q->toplevel == TC_CBQ_MAXLEVEL && 847 q->link.undertime == PSCHED_PASTPERFECT) 848 break; 849 850 q->toplevel = TC_CBQ_MAXLEVEL; 851 q->link.undertime = PSCHED_PASTPERFECT; 852 } 853 854 /* No packets in scheduler or nobody wants to give them to us :-( 855 * Sigh... start watchdog timer in the last case. 856 */ 857 858 if (sch->q.qlen) { 859 qdisc_qstats_overlimit(sch); 860 if (q->wd_expires) 861 qdisc_watchdog_schedule(&q->watchdog, 862 now + q->wd_expires); 863 } 864 return NULL; 865 } 866 867 /* CBQ class maintanance routines */ 868 869 static void cbq_adjust_levels(struct cbq_class *this) 870 { 871 if (this == NULL) 872 return; 873 874 do { 875 int level = 0; 876 struct cbq_class *cl; 877 878 cl = this->children; 879 if (cl) { 880 do { 881 if (cl->level > level) 882 level = cl->level; 883 } while ((cl = cl->sibling) != this->children); 884 } 885 this->level = level + 1; 886 } while ((this = this->tparent) != NULL); 887 } 888 889 static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) 890 { 891 struct cbq_class *cl; 892 unsigned int h; 893 894 if (q->quanta[prio] == 0) 895 return; 896 897 for (h = 0; h < q->clhash.hashsize; h++) { 898 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { 899 /* BUGGGG... Beware! This expression suffer of 900 * arithmetic overflows! 901 */ 902 if (cl->priority == prio) { 903 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ 904 q->quanta[prio]; 905 } 906 if (cl->quantum <= 0 || 907 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) { 908 pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n", 909 cl->common.classid, cl->quantum); 910 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1; 911 } 912 } 913 } 914 } 915 916 static void cbq_sync_defmap(struct cbq_class *cl) 917 { 918 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); 919 struct cbq_class *split = cl->split; 920 unsigned int h; 921 int i; 922 923 if (split == NULL) 924 return; 925 926 for (i = 0; i <= TC_PRIO_MAX; i++) { 927 if (split->defaults[i] == cl && !(cl->defmap & (1<<i))) 928 split->defaults[i] = NULL; 929 } 930 931 for (i = 0; i <= TC_PRIO_MAX; i++) { 932 int level = split->level; 933 934 if (split->defaults[i]) 935 continue; 936 937 for (h = 0; h < q->clhash.hashsize; h++) { 938 struct cbq_class *c; 939 940 hlist_for_each_entry(c, &q->clhash.hash[h], 941 common.hnode) { 942 if (c->split == split && c->level < level && 943 c->defmap & (1<<i)) { 944 split->defaults[i] = c; 945 level = c->level; 946 } 947 } 948 } 949 } 950 } 951 952 static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask) 953 { 954 struct cbq_class *split = NULL; 955 956 if (splitid == 0) { 957 split = cl->split; 958 if (!split) 959 return; 960 splitid = split->common.classid; 961 } 962 963 if (split == NULL || split->common.classid != splitid) { 964 for (split = cl->tparent; split; split = split->tparent) 965 if (split->common.classid == splitid) 966 break; 967 } 968 969 if (split == NULL) 970 return; 971 972 if (cl->split != split) { 973 cl->defmap = 0; 974 cbq_sync_defmap(cl); 975 cl->split = split; 976 cl->defmap = def & mask; 977 } else 978 cl->defmap = (cl->defmap & ~mask) | (def & mask); 979 980 cbq_sync_defmap(cl); 981 } 982 983 static void cbq_unlink_class(struct cbq_class *this) 984 { 985 struct cbq_class *cl, **clp; 986 struct cbq_sched_data *q = qdisc_priv(this->qdisc); 987 988 qdisc_class_hash_remove(&q->clhash, &this->common); 989 990 if (this->tparent) { 991 clp = &this->sibling; 992 cl = *clp; 993 do { 994 if (cl == this) { 995 *clp = cl->sibling; 996 break; 997 } 998 clp = &cl->sibling; 999 } while ((cl = *clp) != this->sibling); 1000 1001 if (this->tparent->children == this) { 1002 this->tparent->children = this->sibling; 1003 if (this->sibling == this) 1004 this->tparent->children = NULL; 1005 } 1006 } else { 1007 WARN_ON(this->sibling != this); 1008 } 1009 } 1010 1011 static void cbq_link_class(struct cbq_class *this) 1012 { 1013 struct cbq_sched_data *q = qdisc_priv(this->qdisc); 1014 struct cbq_class *parent = this->tparent; 1015 1016 this->sibling = this; 1017 qdisc_class_hash_insert(&q->clhash, &this->common); 1018 1019 if (parent == NULL) 1020 return; 1021 1022 if (parent->children == NULL) { 1023 parent->children = this; 1024 } else { 1025 this->sibling = parent->children->sibling; 1026 parent->children->sibling = this; 1027 } 1028 } 1029 1030 static void 1031 cbq_reset(struct Qdisc *sch) 1032 { 1033 struct cbq_sched_data *q = qdisc_priv(sch); 1034 struct cbq_class *cl; 1035 int prio; 1036 unsigned int h; 1037 1038 q->activemask = 0; 1039 q->pmask = 0; 1040 q->tx_class = NULL; 1041 q->tx_borrowed = NULL; 1042 qdisc_watchdog_cancel(&q->watchdog); 1043 hrtimer_cancel(&q->delay_timer); 1044 q->toplevel = TC_CBQ_MAXLEVEL; 1045 q->now = psched_get_time(); 1046 1047 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++) 1048 q->active[prio] = NULL; 1049 1050 for (h = 0; h < q->clhash.hashsize; h++) { 1051 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { 1052 qdisc_reset(cl->q); 1053 1054 cl->next_alive = NULL; 1055 cl->undertime = PSCHED_PASTPERFECT; 1056 cl->avgidle = cl->maxidle; 1057 cl->deficit = cl->quantum; 1058 cl->cpriority = cl->priority; 1059 } 1060 } 1061 sch->q.qlen = 0; 1062 } 1063 1064 1065 static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss) 1066 { 1067 if (lss->change & TCF_CBQ_LSS_FLAGS) { 1068 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent; 1069 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent; 1070 } 1071 if (lss->change & TCF_CBQ_LSS_EWMA) 1072 cl->ewma_log = lss->ewma_log; 1073 if (lss->change & TCF_CBQ_LSS_AVPKT) 1074 cl->avpkt = lss->avpkt; 1075 if (lss->change & TCF_CBQ_LSS_MINIDLE) 1076 cl->minidle = -(long)lss->minidle; 1077 if (lss->change & TCF_CBQ_LSS_MAXIDLE) { 1078 cl->maxidle = lss->maxidle; 1079 cl->avgidle = lss->maxidle; 1080 } 1081 if (lss->change & TCF_CBQ_LSS_OFFTIME) 1082 cl->offtime = lss->offtime; 1083 return 0; 1084 } 1085 1086 static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl) 1087 { 1088 q->nclasses[cl->priority]--; 1089 q->quanta[cl->priority] -= cl->weight; 1090 cbq_normalize_quanta(q, cl->priority); 1091 } 1092 1093 static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl) 1094 { 1095 q->nclasses[cl->priority]++; 1096 q->quanta[cl->priority] += cl->weight; 1097 cbq_normalize_quanta(q, cl->priority); 1098 } 1099 1100 static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr) 1101 { 1102 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); 1103 1104 if (wrr->allot) 1105 cl->allot = wrr->allot; 1106 if (wrr->weight) 1107 cl->weight = wrr->weight; 1108 if (wrr->priority) { 1109 cl->priority = wrr->priority - 1; 1110 cl->cpriority = cl->priority; 1111 if (cl->priority >= cl->priority2) 1112 cl->priority2 = TC_CBQ_MAXPRIO - 1; 1113 } 1114 1115 cbq_addprio(q, cl); 1116 return 0; 1117 } 1118 1119 static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt) 1120 { 1121 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange); 1122 return 0; 1123 } 1124 1125 static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = { 1126 [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) }, 1127 [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) }, 1128 [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) }, 1129 [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) }, 1130 [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) }, 1131 [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, 1132 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) }, 1133 }; 1134 1135 static int cbq_init(struct Qdisc *sch, struct nlattr *opt) 1136 { 1137 struct cbq_sched_data *q = qdisc_priv(sch); 1138 struct nlattr *tb[TCA_CBQ_MAX + 1]; 1139 struct tc_ratespec *r; 1140 int err; 1141 1142 qdisc_watchdog_init(&q->watchdog, sch); 1143 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 1144 q->delay_timer.function = cbq_undelay; 1145 1146 if (!opt) 1147 return -EINVAL; 1148 1149 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL); 1150 if (err < 0) 1151 return err; 1152 1153 if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL) 1154 return -EINVAL; 1155 1156 r = nla_data(tb[TCA_CBQ_RATE]); 1157 1158 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL) 1159 return -EINVAL; 1160 1161 err = qdisc_class_hash_init(&q->clhash); 1162 if (err < 0) 1163 goto put_rtab; 1164 1165 q->link.refcnt = 1; 1166 q->link.sibling = &q->link; 1167 q->link.common.classid = sch->handle; 1168 q->link.qdisc = sch; 1169 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, 1170 sch->handle); 1171 if (!q->link.q) 1172 q->link.q = &noop_qdisc; 1173 else 1174 qdisc_hash_add(q->link.q, true); 1175 1176 q->link.priority = TC_CBQ_MAXPRIO - 1; 1177 q->link.priority2 = TC_CBQ_MAXPRIO - 1; 1178 q->link.cpriority = TC_CBQ_MAXPRIO - 1; 1179 q->link.allot = psched_mtu(qdisc_dev(sch)); 1180 q->link.quantum = q->link.allot; 1181 q->link.weight = q->link.R_tab->rate.rate; 1182 1183 q->link.ewma_log = TC_CBQ_DEF_EWMA; 1184 q->link.avpkt = q->link.allot/2; 1185 q->link.minidle = -0x7FFFFFFF; 1186 1187 q->toplevel = TC_CBQ_MAXLEVEL; 1188 q->now = psched_get_time(); 1189 1190 cbq_link_class(&q->link); 1191 1192 if (tb[TCA_CBQ_LSSOPT]) 1193 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT])); 1194 1195 cbq_addprio(q, &q->link); 1196 return 0; 1197 1198 put_rtab: 1199 qdisc_put_rtab(q->link.R_tab); 1200 return err; 1201 } 1202 1203 static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) 1204 { 1205 unsigned char *b = skb_tail_pointer(skb); 1206 1207 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate)) 1208 goto nla_put_failure; 1209 return skb->len; 1210 1211 nla_put_failure: 1212 nlmsg_trim(skb, b); 1213 return -1; 1214 } 1215 1216 static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) 1217 { 1218 unsigned char *b = skb_tail_pointer(skb); 1219 struct tc_cbq_lssopt opt; 1220 1221 opt.flags = 0; 1222 if (cl->borrow == NULL) 1223 opt.flags |= TCF_CBQ_LSS_BOUNDED; 1224 if (cl->share == NULL) 1225 opt.flags |= TCF_CBQ_LSS_ISOLATED; 1226 opt.ewma_log = cl->ewma_log; 1227 opt.level = cl->level; 1228 opt.avpkt = cl->avpkt; 1229 opt.maxidle = cl->maxidle; 1230 opt.minidle = (u32)(-cl->minidle); 1231 opt.offtime = cl->offtime; 1232 opt.change = ~0; 1233 if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt)) 1234 goto nla_put_failure; 1235 return skb->len; 1236 1237 nla_put_failure: 1238 nlmsg_trim(skb, b); 1239 return -1; 1240 } 1241 1242 static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) 1243 { 1244 unsigned char *b = skb_tail_pointer(skb); 1245 struct tc_cbq_wrropt opt; 1246 1247 memset(&opt, 0, sizeof(opt)); 1248 opt.flags = 0; 1249 opt.allot = cl->allot; 1250 opt.priority = cl->priority + 1; 1251 opt.cpriority = cl->cpriority + 1; 1252 opt.weight = cl->weight; 1253 if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt)) 1254 goto nla_put_failure; 1255 return skb->len; 1256 1257 nla_put_failure: 1258 nlmsg_trim(skb, b); 1259 return -1; 1260 } 1261 1262 static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) 1263 { 1264 unsigned char *b = skb_tail_pointer(skb); 1265 struct tc_cbq_fopt opt; 1266 1267 if (cl->split || cl->defmap) { 1268 opt.split = cl->split ? cl->split->common.classid : 0; 1269 opt.defmap = cl->defmap; 1270 opt.defchange = ~0; 1271 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt)) 1272 goto nla_put_failure; 1273 } 1274 return skb->len; 1275 1276 nla_put_failure: 1277 nlmsg_trim(skb, b); 1278 return -1; 1279 } 1280 1281 static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl) 1282 { 1283 if (cbq_dump_lss(skb, cl) < 0 || 1284 cbq_dump_rate(skb, cl) < 0 || 1285 cbq_dump_wrr(skb, cl) < 0 || 1286 cbq_dump_fopt(skb, cl) < 0) 1287 return -1; 1288 return 0; 1289 } 1290 1291 static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb) 1292 { 1293 struct cbq_sched_data *q = qdisc_priv(sch); 1294 struct nlattr *nest; 1295 1296 nest = nla_nest_start(skb, TCA_OPTIONS); 1297 if (nest == NULL) 1298 goto nla_put_failure; 1299 if (cbq_dump_attr(skb, &q->link) < 0) 1300 goto nla_put_failure; 1301 return nla_nest_end(skb, nest); 1302 1303 nla_put_failure: 1304 nla_nest_cancel(skb, nest); 1305 return -1; 1306 } 1307 1308 static int 1309 cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 1310 { 1311 struct cbq_sched_data *q = qdisc_priv(sch); 1312 1313 q->link.xstats.avgidle = q->link.avgidle; 1314 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats)); 1315 } 1316 1317 static int 1318 cbq_dump_class(struct Qdisc *sch, unsigned long arg, 1319 struct sk_buff *skb, struct tcmsg *tcm) 1320 { 1321 struct cbq_class *cl = (struct cbq_class *)arg; 1322 struct nlattr *nest; 1323 1324 if (cl->tparent) 1325 tcm->tcm_parent = cl->tparent->common.classid; 1326 else 1327 tcm->tcm_parent = TC_H_ROOT; 1328 tcm->tcm_handle = cl->common.classid; 1329 tcm->tcm_info = cl->q->handle; 1330 1331 nest = nla_nest_start(skb, TCA_OPTIONS); 1332 if (nest == NULL) 1333 goto nla_put_failure; 1334 if (cbq_dump_attr(skb, cl) < 0) 1335 goto nla_put_failure; 1336 return nla_nest_end(skb, nest); 1337 1338 nla_put_failure: 1339 nla_nest_cancel(skb, nest); 1340 return -1; 1341 } 1342 1343 static int 1344 cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, 1345 struct gnet_dump *d) 1346 { 1347 struct cbq_sched_data *q = qdisc_priv(sch); 1348 struct cbq_class *cl = (struct cbq_class *)arg; 1349 1350 cl->xstats.avgidle = cl->avgidle; 1351 cl->xstats.undertime = 0; 1352 1353 if (cl->undertime != PSCHED_PASTPERFECT) 1354 cl->xstats.undertime = cl->undertime - q->now; 1355 1356 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 1357 d, NULL, &cl->bstats) < 0 || 1358 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 1359 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0) 1360 return -1; 1361 1362 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); 1363 } 1364 1365 static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 1366 struct Qdisc **old) 1367 { 1368 struct cbq_class *cl = (struct cbq_class *)arg; 1369 1370 if (new == NULL) { 1371 new = qdisc_create_dflt(sch->dev_queue, 1372 &pfifo_qdisc_ops, cl->common.classid); 1373 if (new == NULL) 1374 return -ENOBUFS; 1375 } 1376 1377 *old = qdisc_replace(sch, new, &cl->q); 1378 return 0; 1379 } 1380 1381 static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg) 1382 { 1383 struct cbq_class *cl = (struct cbq_class *)arg; 1384 1385 return cl->q; 1386 } 1387 1388 static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg) 1389 { 1390 struct cbq_class *cl = (struct cbq_class *)arg; 1391 1392 if (cl->q->q.qlen == 0) 1393 cbq_deactivate_class(cl); 1394 } 1395 1396 static unsigned long cbq_get(struct Qdisc *sch, u32 classid) 1397 { 1398 struct cbq_sched_data *q = qdisc_priv(sch); 1399 struct cbq_class *cl = cbq_class_lookup(q, classid); 1400 1401 if (cl) { 1402 cl->refcnt++; 1403 return (unsigned long)cl; 1404 } 1405 return 0; 1406 } 1407 1408 static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl) 1409 { 1410 struct cbq_sched_data *q = qdisc_priv(sch); 1411 1412 WARN_ON(cl->filters); 1413 1414 tcf_block_put(cl->block); 1415 qdisc_destroy(cl->q); 1416 qdisc_put_rtab(cl->R_tab); 1417 gen_kill_estimator(&cl->rate_est); 1418 if (cl != &q->link) 1419 kfree(cl); 1420 } 1421 1422 static void cbq_destroy(struct Qdisc *sch) 1423 { 1424 struct cbq_sched_data *q = qdisc_priv(sch); 1425 struct hlist_node *next; 1426 struct cbq_class *cl; 1427 unsigned int h; 1428 1429 #ifdef CONFIG_NET_CLS_ACT 1430 q->rx_class = NULL; 1431 #endif 1432 /* 1433 * Filters must be destroyed first because we don't destroy the 1434 * classes from root to leafs which means that filters can still 1435 * be bound to classes which have been destroyed already. --TGR '04 1436 */ 1437 for (h = 0; h < q->clhash.hashsize; h++) { 1438 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { 1439 tcf_block_put(cl->block); 1440 cl->block = NULL; 1441 } 1442 } 1443 for (h = 0; h < q->clhash.hashsize; h++) { 1444 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h], 1445 common.hnode) 1446 cbq_destroy_class(sch, cl); 1447 } 1448 qdisc_class_hash_destroy(&q->clhash); 1449 } 1450 1451 static void cbq_put(struct Qdisc *sch, unsigned long arg) 1452 { 1453 struct cbq_class *cl = (struct cbq_class *)arg; 1454 1455 if (--cl->refcnt == 0) { 1456 #ifdef CONFIG_NET_CLS_ACT 1457 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch); 1458 struct cbq_sched_data *q = qdisc_priv(sch); 1459 1460 spin_lock_bh(root_lock); 1461 if (q->rx_class == cl) 1462 q->rx_class = NULL; 1463 spin_unlock_bh(root_lock); 1464 #endif 1465 1466 cbq_destroy_class(sch, cl); 1467 } 1468 } 1469 1470 static int 1471 cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca, 1472 unsigned long *arg) 1473 { 1474 int err; 1475 struct cbq_sched_data *q = qdisc_priv(sch); 1476 struct cbq_class *cl = (struct cbq_class *)*arg; 1477 struct nlattr *opt = tca[TCA_OPTIONS]; 1478 struct nlattr *tb[TCA_CBQ_MAX + 1]; 1479 struct cbq_class *parent; 1480 struct qdisc_rate_table *rtab = NULL; 1481 1482 if (opt == NULL) 1483 return -EINVAL; 1484 1485 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL); 1486 if (err < 0) 1487 return err; 1488 1489 if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE]) 1490 return -EOPNOTSUPP; 1491 1492 if (cl) { 1493 /* Check parent */ 1494 if (parentid) { 1495 if (cl->tparent && 1496 cl->tparent->common.classid != parentid) 1497 return -EINVAL; 1498 if (!cl->tparent && parentid != TC_H_ROOT) 1499 return -EINVAL; 1500 } 1501 1502 if (tb[TCA_CBQ_RATE]) { 1503 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), 1504 tb[TCA_CBQ_RTAB]); 1505 if (rtab == NULL) 1506 return -EINVAL; 1507 } 1508 1509 if (tca[TCA_RATE]) { 1510 err = gen_replace_estimator(&cl->bstats, NULL, 1511 &cl->rate_est, 1512 NULL, 1513 qdisc_root_sleeping_running(sch), 1514 tca[TCA_RATE]); 1515 if (err) { 1516 qdisc_put_rtab(rtab); 1517 return err; 1518 } 1519 } 1520 1521 /* Change class parameters */ 1522 sch_tree_lock(sch); 1523 1524 if (cl->next_alive != NULL) 1525 cbq_deactivate_class(cl); 1526 1527 if (rtab) { 1528 qdisc_put_rtab(cl->R_tab); 1529 cl->R_tab = rtab; 1530 } 1531 1532 if (tb[TCA_CBQ_LSSOPT]) 1533 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); 1534 1535 if (tb[TCA_CBQ_WRROPT]) { 1536 cbq_rmprio(q, cl); 1537 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); 1538 } 1539 1540 if (tb[TCA_CBQ_FOPT]) 1541 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); 1542 1543 if (cl->q->q.qlen) 1544 cbq_activate_class(cl); 1545 1546 sch_tree_unlock(sch); 1547 1548 return 0; 1549 } 1550 1551 if (parentid == TC_H_ROOT) 1552 return -EINVAL; 1553 1554 if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL || 1555 tb[TCA_CBQ_LSSOPT] == NULL) 1556 return -EINVAL; 1557 1558 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]); 1559 if (rtab == NULL) 1560 return -EINVAL; 1561 1562 if (classid) { 1563 err = -EINVAL; 1564 if (TC_H_MAJ(classid ^ sch->handle) || 1565 cbq_class_lookup(q, classid)) 1566 goto failure; 1567 } else { 1568 int i; 1569 classid = TC_H_MAKE(sch->handle, 0x8000); 1570 1571 for (i = 0; i < 0x8000; i++) { 1572 if (++q->hgenerator >= 0x8000) 1573 q->hgenerator = 1; 1574 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL) 1575 break; 1576 } 1577 err = -ENOSR; 1578 if (i >= 0x8000) 1579 goto failure; 1580 classid = classid|q->hgenerator; 1581 } 1582 1583 parent = &q->link; 1584 if (parentid) { 1585 parent = cbq_class_lookup(q, parentid); 1586 err = -EINVAL; 1587 if (parent == NULL) 1588 goto failure; 1589 } 1590 1591 err = -ENOBUFS; 1592 cl = kzalloc(sizeof(*cl), GFP_KERNEL); 1593 if (cl == NULL) 1594 goto failure; 1595 1596 err = tcf_block_get(&cl->block, &cl->filter_list); 1597 if (err) { 1598 kfree(cl); 1599 return err; 1600 } 1601 1602 if (tca[TCA_RATE]) { 1603 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, 1604 NULL, 1605 qdisc_root_sleeping_running(sch), 1606 tca[TCA_RATE]); 1607 if (err) { 1608 tcf_block_put(cl->block); 1609 kfree(cl); 1610 goto failure; 1611 } 1612 } 1613 1614 cl->R_tab = rtab; 1615 rtab = NULL; 1616 cl->refcnt = 1; 1617 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); 1618 if (!cl->q) 1619 cl->q = &noop_qdisc; 1620 else 1621 qdisc_hash_add(cl->q, true); 1622 1623 cl->common.classid = classid; 1624 cl->tparent = parent; 1625 cl->qdisc = sch; 1626 cl->allot = parent->allot; 1627 cl->quantum = cl->allot; 1628 cl->weight = cl->R_tab->rate.rate; 1629 1630 sch_tree_lock(sch); 1631 cbq_link_class(cl); 1632 cl->borrow = cl->tparent; 1633 if (cl->tparent != &q->link) 1634 cl->share = cl->tparent; 1635 cbq_adjust_levels(parent); 1636 cl->minidle = -0x7FFFFFFF; 1637 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); 1638 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); 1639 if (cl->ewma_log == 0) 1640 cl->ewma_log = q->link.ewma_log; 1641 if (cl->maxidle == 0) 1642 cl->maxidle = q->link.maxidle; 1643 if (cl->avpkt == 0) 1644 cl->avpkt = q->link.avpkt; 1645 if (tb[TCA_CBQ_FOPT]) 1646 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); 1647 sch_tree_unlock(sch); 1648 1649 qdisc_class_hash_grow(sch, &q->clhash); 1650 1651 *arg = (unsigned long)cl; 1652 return 0; 1653 1654 failure: 1655 qdisc_put_rtab(rtab); 1656 return err; 1657 } 1658 1659 static int cbq_delete(struct Qdisc *sch, unsigned long arg) 1660 { 1661 struct cbq_sched_data *q = qdisc_priv(sch); 1662 struct cbq_class *cl = (struct cbq_class *)arg; 1663 unsigned int qlen, backlog; 1664 1665 if (cl->filters || cl->children || cl == &q->link) 1666 return -EBUSY; 1667 1668 sch_tree_lock(sch); 1669 1670 qlen = cl->q->q.qlen; 1671 backlog = cl->q->qstats.backlog; 1672 qdisc_reset(cl->q); 1673 qdisc_tree_reduce_backlog(cl->q, qlen, backlog); 1674 1675 if (cl->next_alive) 1676 cbq_deactivate_class(cl); 1677 1678 if (q->tx_borrowed == cl) 1679 q->tx_borrowed = q->tx_class; 1680 if (q->tx_class == cl) { 1681 q->tx_class = NULL; 1682 q->tx_borrowed = NULL; 1683 } 1684 #ifdef CONFIG_NET_CLS_ACT 1685 if (q->rx_class == cl) 1686 q->rx_class = NULL; 1687 #endif 1688 1689 cbq_unlink_class(cl); 1690 cbq_adjust_levels(cl->tparent); 1691 cl->defmap = 0; 1692 cbq_sync_defmap(cl); 1693 1694 cbq_rmprio(q, cl); 1695 sch_tree_unlock(sch); 1696 1697 BUG_ON(--cl->refcnt == 0); 1698 /* 1699 * This shouldn't happen: we "hold" one cops->get() when called 1700 * from tc_ctl_tclass; the destroy method is done from cops->put(). 1701 */ 1702 1703 return 0; 1704 } 1705 1706 static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg) 1707 { 1708 struct cbq_sched_data *q = qdisc_priv(sch); 1709 struct cbq_class *cl = (struct cbq_class *)arg; 1710 1711 if (cl == NULL) 1712 cl = &q->link; 1713 1714 return cl->block; 1715 } 1716 1717 static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent, 1718 u32 classid) 1719 { 1720 struct cbq_sched_data *q = qdisc_priv(sch); 1721 struct cbq_class *p = (struct cbq_class *)parent; 1722 struct cbq_class *cl = cbq_class_lookup(q, classid); 1723 1724 if (cl) { 1725 if (p && p->level <= cl->level) 1726 return 0; 1727 cl->filters++; 1728 return (unsigned long)cl; 1729 } 1730 return 0; 1731 } 1732 1733 static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg) 1734 { 1735 struct cbq_class *cl = (struct cbq_class *)arg; 1736 1737 cl->filters--; 1738 } 1739 1740 static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg) 1741 { 1742 struct cbq_sched_data *q = qdisc_priv(sch); 1743 struct cbq_class *cl; 1744 unsigned int h; 1745 1746 if (arg->stop) 1747 return; 1748 1749 for (h = 0; h < q->clhash.hashsize; h++) { 1750 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { 1751 if (arg->count < arg->skip) { 1752 arg->count++; 1753 continue; 1754 } 1755 if (arg->fn(sch, (unsigned long)cl, arg) < 0) { 1756 arg->stop = 1; 1757 return; 1758 } 1759 arg->count++; 1760 } 1761 } 1762 } 1763 1764 static const struct Qdisc_class_ops cbq_class_ops = { 1765 .graft = cbq_graft, 1766 .leaf = cbq_leaf, 1767 .qlen_notify = cbq_qlen_notify, 1768 .get = cbq_get, 1769 .put = cbq_put, 1770 .change = cbq_change_class, 1771 .delete = cbq_delete, 1772 .walk = cbq_walk, 1773 .tcf_block = cbq_tcf_block, 1774 .bind_tcf = cbq_bind_filter, 1775 .unbind_tcf = cbq_unbind_filter, 1776 .dump = cbq_dump_class, 1777 .dump_stats = cbq_dump_class_stats, 1778 }; 1779 1780 static struct Qdisc_ops cbq_qdisc_ops __read_mostly = { 1781 .next = NULL, 1782 .cl_ops = &cbq_class_ops, 1783 .id = "cbq", 1784 .priv_size = sizeof(struct cbq_sched_data), 1785 .enqueue = cbq_enqueue, 1786 .dequeue = cbq_dequeue, 1787 .peek = qdisc_peek_dequeued, 1788 .init = cbq_init, 1789 .reset = cbq_reset, 1790 .destroy = cbq_destroy, 1791 .change = NULL, 1792 .dump = cbq_dump, 1793 .dump_stats = cbq_dump_stats, 1794 .owner = THIS_MODULE, 1795 }; 1796 1797 static int __init cbq_module_init(void) 1798 { 1799 return register_qdisc(&cbq_qdisc_ops); 1800 } 1801 static void __exit cbq_module_exit(void) 1802 { 1803 unregister_qdisc(&cbq_qdisc_ops); 1804 } 1805 module_init(cbq_module_init) 1806 module_exit(cbq_module_exit) 1807 MODULE_LICENSE("GPL"); 1808
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.