~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/sched/sch_tbf.c

Version: ~ [ linux-6.2-rc3 ] ~ [ linux-6.1.5 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.87 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.162 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.228 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.269 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.302 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * net/sched/sch_tbf.c  Token Bucket Filter queue.
  3  *
  4  *              This program is free software; you can redistribute it and/or
  5  *              modify it under the terms of the GNU General Public License
  6  *              as published by the Free Software Foundation; either version
  7  *              2 of the License, or (at your option) any later version.
  8  *
  9  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 10  *              Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
 11  *                                               original idea by Martin Devera
 12  *
 13  */
 14 
 15 #include <linux/module.h>
 16 #include <linux/types.h>
 17 #include <linux/kernel.h>
 18 #include <linux/string.h>
 19 #include <linux/errno.h>
 20 #include <linux/skbuff.h>
 21 #include <net/netlink.h>
 22 #include <net/sch_generic.h>
 23 #include <net/pkt_sched.h>
 24 
 25 
 26 /*      Simple Token Bucket Filter.
 27         =======================================
 28 
 29         SOURCE.
 30         -------
 31 
 32         None.
 33 
 34         Description.
 35         ------------
 36 
 37         A data flow obeys TBF with rate R and depth B, if for any
 38         time interval t_i...t_f the number of transmitted bits
 39         does not exceed B + R*(t_f-t_i).
 40 
 41         Packetized version of this definition:
 42         The sequence of packets of sizes s_i served at moments t_i
 43         obeys TBF, if for any i<=k:
 44 
 45         s_i+....+s_k <= B + R*(t_k - t_i)
 46 
 47         Algorithm.
 48         ----------
 49 
 50         Let N(t_i) be B/R initially and N(t) grow continuously with time as:
 51 
 52         N(t+delta) = min{B/R, N(t) + delta}
 53 
 54         If the first packet in queue has length S, it may be
 55         transmitted only at the time t_* when S/R <= N(t_*),
 56         and in this case N(t) jumps:
 57 
 58         N(t_* + 0) = N(t_* - 0) - S/R.
 59 
 60 
 61 
 62         Actually, QoS requires two TBF to be applied to a data stream.
 63         One of them controls steady state burst size, another
 64         one with rate P (peak rate) and depth M (equal to link MTU)
 65         limits bursts at a smaller time scale.
 66 
 67         It is easy to see that P>R, and B>M. If P is infinity, this double
 68         TBF is equivalent to a single one.
 69 
 70         When TBF works in reshaping mode, latency is estimated as:
 71 
 72         lat = max ((L-B)/R, (L-M)/P)
 73 
 74 
 75         NOTES.
 76         ------
 77 
 78         If TBF throttles, it starts a watchdog timer, which will wake it up
 79         when it is ready to transmit.
 80         Note that the minimal timer resolution is 1/HZ.
 81         If no new packets arrive during this period,
 82         or if the device is not awaken by EOI for some previous packet,
 83         TBF can stop its activity for 1/HZ.
 84 
 85 
 86         This means, that with depth B, the maximal rate is
 87 
 88         R_crit = B*HZ
 89 
 90         F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
 91 
 92         Note that the peak rate TBF is much more tough: with MTU 1500
 93         P_crit = 150Kbytes/sec. So, if you need greater peak
 94         rates, use alpha with HZ=1000 :-)
 95 
 96         With classful TBF, limit is just kept for backwards compatibility.
 97         It is passed to the default bfifo qdisc - if the inner qdisc is
 98         changed the limit is not effective anymore.
 99 */
100 
101 struct tbf_sched_data {
102 /* Parameters */
103         u32             limit;          /* Maximal length of backlog: bytes */
104         u32             max_size;
105         s64             buffer;         /* Token bucket depth/rate: MUST BE >= MTU/B */
106         s64             mtu;
107         struct psched_ratecfg rate;
108         struct psched_ratecfg peak;
109 
110 /* Variables */
111         s64     tokens;                 /* Current number of B tokens */
112         s64     ptokens;                /* Current number of P tokens */
113         s64     t_c;                    /* Time check-point */
114         struct Qdisc    *qdisc;         /* Inner qdisc, default - bfifo queue */
115         struct qdisc_watchdog watchdog; /* Watchdog timer */
116 };
117 
118 
119 /* Time to Length, convert time in ns to length in bytes
120  * to determinate how many bytes can be sent in given time.
121  */
122 static u64 psched_ns_t2l(const struct psched_ratecfg *r,
123                          u64 time_in_ns)
124 {
125         /* The formula is :
126          * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
127          */
128         u64 len = time_in_ns * r->rate_bytes_ps;
129 
130         do_div(len, NSEC_PER_SEC);
131 
132         if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
133                 do_div(len, 53);
134                 len = len * 48;
135         }
136 
137         if (len > r->overhead)
138                 len -= r->overhead;
139         else
140                 len = 0;
141 
142         return len;
143 }
144 
145 /*
146  * Return length of individual segments of a gso packet,
147  * including all headers (MAC, IP, TCP/UDP)
148  */
149 static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
150 {
151         unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
152         return hdr_len + skb_gso_transport_seglen(skb);
153 }
154 
155 /* GSO packet is too big, segment it so that tbf can transmit
156  * each segment in time
157  */
158 static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
159                        struct sk_buff **to_free)
160 {
161         struct tbf_sched_data *q = qdisc_priv(sch);
162         struct sk_buff *segs, *nskb;
163         netdev_features_t features = netif_skb_features(skb);
164         unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
165         int ret, nb;
166 
167         segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
168 
169         if (IS_ERR_OR_NULL(segs))
170                 return qdisc_drop(skb, sch, to_free);
171 
172         nb = 0;
173         while (segs) {
174                 nskb = segs->next;
175                 segs->next = NULL;
176                 qdisc_skb_cb(segs)->pkt_len = segs->len;
177                 len += segs->len;
178                 ret = qdisc_enqueue(segs, q->qdisc, to_free);
179                 if (ret != NET_XMIT_SUCCESS) {
180                         if (net_xmit_drop_count(ret))
181                                 qdisc_qstats_drop(sch);
182                 } else {
183                         nb++;
184                 }
185                 segs = nskb;
186         }
187         sch->q.qlen += nb;
188         if (nb > 1)
189                 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
190         consume_skb(skb);
191         return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
192 }
193 
194 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
195                        struct sk_buff **to_free)
196 {
197         struct tbf_sched_data *q = qdisc_priv(sch);
198         int ret;
199 
200         if (qdisc_pkt_len(skb) > q->max_size) {
201                 if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
202                         return tbf_segment(skb, sch, to_free);
203                 return qdisc_drop(skb, sch, to_free);
204         }
205         ret = qdisc_enqueue(skb, q->qdisc, to_free);
206         if (ret != NET_XMIT_SUCCESS) {
207                 if (net_xmit_drop_count(ret))
208                         qdisc_qstats_drop(sch);
209                 return ret;
210         }
211 
212         qdisc_qstats_backlog_inc(sch, skb);
213         sch->q.qlen++;
214         return NET_XMIT_SUCCESS;
215 }
216 
217 static bool tbf_peak_present(const struct tbf_sched_data *q)
218 {
219         return q->peak.rate_bytes_ps;
220 }
221 
222 static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
223 {
224         struct tbf_sched_data *q = qdisc_priv(sch);
225         struct sk_buff *skb;
226 
227         skb = q->qdisc->ops->peek(q->qdisc);
228 
229         if (skb) {
230                 s64 now;
231                 s64 toks;
232                 s64 ptoks = 0;
233                 unsigned int len = qdisc_pkt_len(skb);
234 
235                 now = ktime_get_ns();
236                 toks = min_t(s64, now - q->t_c, q->buffer);
237 
238                 if (tbf_peak_present(q)) {
239                         ptoks = toks + q->ptokens;
240                         if (ptoks > q->mtu)
241                                 ptoks = q->mtu;
242                         ptoks -= (s64) psched_l2t_ns(&q->peak, len);
243                 }
244                 toks += q->tokens;
245                 if (toks > q->buffer)
246                         toks = q->buffer;
247                 toks -= (s64) psched_l2t_ns(&q->rate, len);
248 
249                 if ((toks|ptoks) >= 0) {
250                         skb = qdisc_dequeue_peeked(q->qdisc);
251                         if (unlikely(!skb))
252                                 return NULL;
253 
254                         q->t_c = now;
255                         q->tokens = toks;
256                         q->ptokens = ptoks;
257                         qdisc_qstats_backlog_dec(sch, skb);
258                         sch->q.qlen--;
259                         qdisc_bstats_update(sch, skb);
260                         return skb;
261                 }
262 
263                 qdisc_watchdog_schedule_ns(&q->watchdog,
264                                            now + max_t(long, -toks, -ptoks));
265 
266                 /* Maybe we have a shorter packet in the queue,
267                    which can be sent now. It sounds cool,
268                    but, however, this is wrong in principle.
269                    We MUST NOT reorder packets under these circumstances.
270 
271                    Really, if we split the flow into independent
272                    subflows, it would be a very good solution.
273                    This is the main idea of all FQ algorithms
274                    (cf. CSZ, HPFQ, HFSC)
275                  */
276 
277                 qdisc_qstats_overlimit(sch);
278         }
279         return NULL;
280 }
281 
282 static void tbf_reset(struct Qdisc *sch)
283 {
284         struct tbf_sched_data *q = qdisc_priv(sch);
285 
286         qdisc_reset(q->qdisc);
287         sch->qstats.backlog = 0;
288         sch->q.qlen = 0;
289         q->t_c = ktime_get_ns();
290         q->tokens = q->buffer;
291         q->ptokens = q->mtu;
292         qdisc_watchdog_cancel(&q->watchdog);
293 }
294 
295 static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
296         [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
297         [TCA_TBF_RTAB]  = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
298         [TCA_TBF_PTAB]  = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
299         [TCA_TBF_RATE64]        = { .type = NLA_U64 },
300         [TCA_TBF_PRATE64]       = { .type = NLA_U64 },
301         [TCA_TBF_BURST] = { .type = NLA_U32 },
302         [TCA_TBF_PBURST] = { .type = NLA_U32 },
303 };
304 
305 static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
306 {
307         int err;
308         struct tbf_sched_data *q = qdisc_priv(sch);
309         struct nlattr *tb[TCA_TBF_MAX + 1];
310         struct tc_tbf_qopt *qopt;
311         struct Qdisc *child = NULL;
312         struct psched_ratecfg rate;
313         struct psched_ratecfg peak;
314         u64 max_size;
315         s64 buffer, mtu;
316         u64 rate64 = 0, prate64 = 0;
317 
318         err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy, NULL);
319         if (err < 0)
320                 return err;
321 
322         err = -EINVAL;
323         if (tb[TCA_TBF_PARMS] == NULL)
324                 goto done;
325 
326         qopt = nla_data(tb[TCA_TBF_PARMS]);
327         if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
328                 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
329                                               tb[TCA_TBF_RTAB]));
330 
331         if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
332                         qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
333                                                       tb[TCA_TBF_PTAB]));
334 
335         buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
336         mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
337 
338         if (tb[TCA_TBF_RATE64])
339                 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
340         psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
341 
342         if (tb[TCA_TBF_BURST]) {
343                 max_size = nla_get_u32(tb[TCA_TBF_BURST]);
344                 buffer = psched_l2t_ns(&rate, max_size);
345         } else {
346                 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
347         }
348 
349         if (qopt->peakrate.rate) {
350                 if (tb[TCA_TBF_PRATE64])
351                         prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
352                 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
353                 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
354                         pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
355                                         peak.rate_bytes_ps, rate.rate_bytes_ps);
356                         err = -EINVAL;
357                         goto done;
358                 }
359 
360                 if (tb[TCA_TBF_PBURST]) {
361                         u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
362                         max_size = min_t(u32, max_size, pburst);
363                         mtu = psched_l2t_ns(&peak, pburst);
364                 } else {
365                         max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
366                 }
367         } else {
368                 memset(&peak, 0, sizeof(peak));
369         }
370 
371         if (max_size < psched_mtu(qdisc_dev(sch)))
372                 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
373                                     max_size, qdisc_dev(sch)->name,
374                                     psched_mtu(qdisc_dev(sch)));
375 
376         if (!max_size) {
377                 err = -EINVAL;
378                 goto done;
379         }
380 
381         if (q->qdisc != &noop_qdisc) {
382                 err = fifo_set_limit(q->qdisc, qopt->limit);
383                 if (err)
384                         goto done;
385         } else if (qopt->limit > 0) {
386                 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
387                 if (IS_ERR(child)) {
388                         err = PTR_ERR(child);
389                         goto done;
390                 }
391         }
392 
393         sch_tree_lock(sch);
394         if (child) {
395                 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
396                                           q->qdisc->qstats.backlog);
397                 qdisc_destroy(q->qdisc);
398                 q->qdisc = child;
399                 if (child != &noop_qdisc)
400                         qdisc_hash_add(child, true);
401         }
402         q->limit = qopt->limit;
403         if (tb[TCA_TBF_PBURST])
404                 q->mtu = mtu;
405         else
406                 q->mtu = PSCHED_TICKS2NS(qopt->mtu);
407         q->max_size = max_size;
408         if (tb[TCA_TBF_BURST])
409                 q->buffer = buffer;
410         else
411                 q->buffer = PSCHED_TICKS2NS(qopt->buffer);
412         q->tokens = q->buffer;
413         q->ptokens = q->mtu;
414 
415         memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
416         memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
417 
418         sch_tree_unlock(sch);
419         err = 0;
420 done:
421         return err;
422 }
423 
424 static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
425 {
426         struct tbf_sched_data *q = qdisc_priv(sch);
427 
428         qdisc_watchdog_init(&q->watchdog, sch);
429         q->qdisc = &noop_qdisc;
430 
431         if (opt == NULL)
432                 return -EINVAL;
433 
434         q->t_c = ktime_get_ns();
435 
436         return tbf_change(sch, opt);
437 }
438 
439 static void tbf_destroy(struct Qdisc *sch)
440 {
441         struct tbf_sched_data *q = qdisc_priv(sch);
442 
443         qdisc_watchdog_cancel(&q->watchdog);
444         qdisc_destroy(q->qdisc);
445 }
446 
447 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
448 {
449         struct tbf_sched_data *q = qdisc_priv(sch);
450         struct nlattr *nest;
451         struct tc_tbf_qopt opt;
452 
453         sch->qstats.backlog = q->qdisc->qstats.backlog;
454         nest = nla_nest_start(skb, TCA_OPTIONS);
455         if (nest == NULL)
456                 goto nla_put_failure;
457 
458         opt.limit = q->limit;
459         psched_ratecfg_getrate(&opt.rate, &q->rate);
460         if (tbf_peak_present(q))
461                 psched_ratecfg_getrate(&opt.peakrate, &q->peak);
462         else
463                 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
464         opt.mtu = PSCHED_NS2TICKS(q->mtu);
465         opt.buffer = PSCHED_NS2TICKS(q->buffer);
466         if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
467                 goto nla_put_failure;
468         if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
469             nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps,
470                               TCA_TBF_PAD))
471                 goto nla_put_failure;
472         if (tbf_peak_present(q) &&
473             q->peak.rate_bytes_ps >= (1ULL << 32) &&
474             nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps,
475                               TCA_TBF_PAD))
476                 goto nla_put_failure;
477 
478         return nla_nest_end(skb, nest);
479 
480 nla_put_failure:
481         nla_nest_cancel(skb, nest);
482         return -1;
483 }
484 
485 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
486                           struct sk_buff *skb, struct tcmsg *tcm)
487 {
488         struct tbf_sched_data *q = qdisc_priv(sch);
489 
490         tcm->tcm_handle |= TC_H_MIN(1);
491         tcm->tcm_info = q->qdisc->handle;
492 
493         return 0;
494 }
495 
496 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
497                      struct Qdisc **old)
498 {
499         struct tbf_sched_data *q = qdisc_priv(sch);
500 
501         if (new == NULL)
502                 new = &noop_qdisc;
503 
504         *old = qdisc_replace(sch, new, &q->qdisc);
505         return 0;
506 }
507 
508 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
509 {
510         struct tbf_sched_data *q = qdisc_priv(sch);
511         return q->qdisc;
512 }
513 
514 static unsigned long tbf_find(struct Qdisc *sch, u32 classid)
515 {
516         return 1;
517 }
518 
519 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
520 {
521         if (!walker->stop) {
522                 if (walker->count >= walker->skip)
523                         if (walker->fn(sch, 1, walker) < 0) {
524                                 walker->stop = 1;
525                                 return;
526                         }
527                 walker->count++;
528         }
529 }
530 
531 static const struct Qdisc_class_ops tbf_class_ops = {
532         .graft          =       tbf_graft,
533         .leaf           =       tbf_leaf,
534         .find           =       tbf_find,
535         .walk           =       tbf_walk,
536         .dump           =       tbf_dump_class,
537 };
538 
539 static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
540         .next           =       NULL,
541         .cl_ops         =       &tbf_class_ops,
542         .id             =       "tbf",
543         .priv_size      =       sizeof(struct tbf_sched_data),
544         .enqueue        =       tbf_enqueue,
545         .dequeue        =       tbf_dequeue,
546         .peek           =       qdisc_peek_dequeued,
547         .init           =       tbf_init,
548         .reset          =       tbf_reset,
549         .destroy        =       tbf_destroy,
550         .change         =       tbf_change,
551         .dump           =       tbf_dump,
552         .owner          =       THIS_MODULE,
553 };
554 
555 static int __init tbf_module_init(void)
556 {
557         return register_qdisc(&tbf_qdisc_ops);
558 }
559 
560 static void __exit tbf_module_exit(void)
561 {
562         unregister_qdisc(&tbf_qdisc_ops);
563 }
564 module_init(tbf_module_init)
565 module_exit(tbf_module_exit)
566 MODULE_LICENSE("GPL");
567 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp