~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/sched/sch_dsmark.c

Version: ~ [ linux-5.4-rc3 ] ~ [ linux-5.3.6 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.79 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.149 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.196 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.196 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.75 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* net/sched/sch_dsmark.c - Differentiated Services field marker */
  2 
  3 /* Written 1998-2000 by Werner Almesberger, EPFL ICA */
  4 
  5 
  6 #include <linux/module.h>
  7 #include <linux/init.h>
  8 #include <linux/slab.h>
  9 #include <linux/types.h>
 10 #include <linux/string.h>
 11 #include <linux/errno.h>
 12 #include <linux/skbuff.h>
 13 #include <linux/rtnetlink.h>
 14 #include <linux/bitops.h>
 15 #include <net/pkt_sched.h>
 16 #include <net/pkt_cls.h>
 17 #include <net/dsfield.h>
 18 #include <net/inet_ecn.h>
 19 #include <asm/byteorder.h>
 20 
 21 /*
 22  * classid      class           marking
 23  * -------      -----           -------
 24  *   n/a          0             n/a
 25  *   x:0          1             use entry [0]
 26  *   ...         ...            ...
 27  *   x:y y>0     y+1            use entry [y]
 28  *   ...         ...            ...
 29  * x:indices-1  indices         use entry [indices-1]
 30  *   ...         ...            ...
 31  *   x:y         y+1            use entry [y & (indices-1)]
 32  *   ...         ...            ...
 33  * 0xffff       0x10000         use entry [indices-1]
 34  */
 35 
 36 
 37 #define NO_DEFAULT_INDEX        (1 << 16)
 38 
 39 struct mask_value {
 40         u8                      mask;
 41         u8                      value;
 42 };
 43 
 44 struct dsmark_qdisc_data {
 45         struct Qdisc            *q;
 46         struct tcf_proto __rcu  *filter_list;
 47         struct mask_value       *mv;
 48         u16                     indices;
 49         u8                      set_tc_index;
 50         u32                     default_index;  /* index range is 0...0xffff */
 51 #define DSMARK_EMBEDDED_SZ      16
 52         struct mask_value       embedded[DSMARK_EMBEDDED_SZ];
 53 };
 54 
 55 static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
 56 {
 57         return index <= p->indices && index > 0;
 58 }
 59 
 60 /* ------------------------- Class/flow operations ------------------------- */
 61 
 62 static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
 63                         struct Qdisc *new, struct Qdisc **old)
 64 {
 65         struct dsmark_qdisc_data *p = qdisc_priv(sch);
 66 
 67         pr_debug("%s(sch %p,[qdisc %p],new %p,old %p)\n",
 68                  __func__, sch, p, new, old);
 69 
 70         if (new == NULL) {
 71                 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
 72                                         sch->handle);
 73                 if (new == NULL)
 74                         new = &noop_qdisc;
 75         }
 76 
 77         *old = qdisc_replace(sch, new, &p->q);
 78         return 0;
 79 }
 80 
 81 static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
 82 {
 83         struct dsmark_qdisc_data *p = qdisc_priv(sch);
 84         return p->q;
 85 }
 86 
 87 static unsigned long dsmark_get(struct Qdisc *sch, u32 classid)
 88 {
 89         pr_debug("%s(sch %p,[qdisc %p],classid %x)\n",
 90                  __func__, sch, qdisc_priv(sch), classid);
 91 
 92         return TC_H_MIN(classid) + 1;
 93 }
 94 
 95 static unsigned long dsmark_bind_filter(struct Qdisc *sch,
 96                                         unsigned long parent, u32 classid)
 97 {
 98         return dsmark_get(sch, classid);
 99 }
100 
101 static void dsmark_put(struct Qdisc *sch, unsigned long cl)
102 {
103 }
104 
105 static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
106         [TCA_DSMARK_INDICES]            = { .type = NLA_U16 },
107         [TCA_DSMARK_DEFAULT_INDEX]      = { .type = NLA_U16 },
108         [TCA_DSMARK_SET_TC_INDEX]       = { .type = NLA_FLAG },
109         [TCA_DSMARK_MASK]               = { .type = NLA_U8 },
110         [TCA_DSMARK_VALUE]              = { .type = NLA_U8 },
111 };
112 
113 static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
114                          struct nlattr **tca, unsigned long *arg)
115 {
116         struct dsmark_qdisc_data *p = qdisc_priv(sch);
117         struct nlattr *opt = tca[TCA_OPTIONS];
118         struct nlattr *tb[TCA_DSMARK_MAX + 1];
119         int err = -EINVAL;
120 
121         pr_debug("%s(sch %p,[qdisc %p],classid %x,parent %x), arg 0x%lx\n",
122                  __func__, sch, p, classid, parent, *arg);
123 
124         if (!dsmark_valid_index(p, *arg)) {
125                 err = -ENOENT;
126                 goto errout;
127         }
128 
129         if (!opt)
130                 goto errout;
131 
132         err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy);
133         if (err < 0)
134                 goto errout;
135 
136         if (tb[TCA_DSMARK_VALUE])
137                 p->mv[*arg - 1].value = nla_get_u8(tb[TCA_DSMARK_VALUE]);
138 
139         if (tb[TCA_DSMARK_MASK])
140                 p->mv[*arg - 1].mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
141 
142         err = 0;
143 
144 errout:
145         return err;
146 }
147 
148 static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
149 {
150         struct dsmark_qdisc_data *p = qdisc_priv(sch);
151 
152         if (!dsmark_valid_index(p, arg))
153                 return -EINVAL;
154 
155         p->mv[arg - 1].mask = 0xff;
156         p->mv[arg - 1].value = 0;
157 
158         return 0;
159 }
160 
161 static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
162 {
163         struct dsmark_qdisc_data *p = qdisc_priv(sch);
164         int i;
165 
166         pr_debug("%s(sch %p,[qdisc %p],walker %p)\n",
167                  __func__, sch, p, walker);
168 
169         if (walker->stop)
170                 return;
171 
172         for (i = 0; i < p->indices; i++) {
173                 if (p->mv[i].mask == 0xff && !p->mv[i].value)
174                         goto ignore;
175                 if (walker->count >= walker->skip) {
176                         if (walker->fn(sch, i + 1, walker) < 0) {
177                                 walker->stop = 1;
178                                 break;
179                         }
180                 }
181 ignore:
182                 walker->count++;
183         }
184 }
185 
186 static inline struct tcf_proto __rcu **dsmark_find_tcf(struct Qdisc *sch,
187                                                        unsigned long cl)
188 {
189         struct dsmark_qdisc_data *p = qdisc_priv(sch);
190         return &p->filter_list;
191 }
192 
193 /* --------------------------- Qdisc operations ---------------------------- */
194 
195 static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
196                           struct sk_buff **to_free)
197 {
198         struct dsmark_qdisc_data *p = qdisc_priv(sch);
199         int err;
200 
201         pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
202 
203         if (p->set_tc_index) {
204                 int wlen = skb_network_offset(skb);
205 
206                 switch (tc_skb_protocol(skb)) {
207                 case htons(ETH_P_IP):
208                         wlen += sizeof(struct iphdr);
209                         if (!pskb_may_pull(skb, wlen) ||
210                             skb_try_make_writable(skb, wlen))
211                                 goto drop;
212 
213                         skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
214                                 & ~INET_ECN_MASK;
215                         break;
216 
217                 case htons(ETH_P_IPV6):
218                         wlen += sizeof(struct ipv6hdr);
219                         if (!pskb_may_pull(skb, wlen) ||
220                             skb_try_make_writable(skb, wlen))
221                                 goto drop;
222 
223                         skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
224                                 & ~INET_ECN_MASK;
225                         break;
226                 default:
227                         skb->tc_index = 0;
228                         break;
229                 }
230         }
231 
232         if (TC_H_MAJ(skb->priority) == sch->handle)
233                 skb->tc_index = TC_H_MIN(skb->priority);
234         else {
235                 struct tcf_result res;
236                 struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
237                 int result = tc_classify(skb, fl, &res, false);
238 
239                 pr_debug("result %d class 0x%04x\n", result, res.classid);
240 
241                 switch (result) {
242 #ifdef CONFIG_NET_CLS_ACT
243                 case TC_ACT_QUEUED:
244                 case TC_ACT_STOLEN:
245                         __qdisc_drop(skb, to_free);
246                         return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
247 
248                 case TC_ACT_SHOT:
249                         goto drop;
250 #endif
251                 case TC_ACT_OK:
252                         skb->tc_index = TC_H_MIN(res.classid);
253                         break;
254 
255                 default:
256                         if (p->default_index != NO_DEFAULT_INDEX)
257                                 skb->tc_index = p->default_index;
258                         break;
259                 }
260         }
261 
262         err = qdisc_enqueue(skb, p->q, to_free);
263         if (err != NET_XMIT_SUCCESS) {
264                 if (net_xmit_drop_count(err))
265                         qdisc_qstats_drop(sch);
266                 return err;
267         }
268 
269         qdisc_qstats_backlog_inc(sch, skb);
270         sch->q.qlen++;
271 
272         return NET_XMIT_SUCCESS;
273 
274 drop:
275         qdisc_drop(skb, sch, to_free);
276         return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
277 }
278 
279 static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
280 {
281         struct dsmark_qdisc_data *p = qdisc_priv(sch);
282         struct sk_buff *skb;
283         u32 index;
284 
285         pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
286 
287         skb = qdisc_dequeue_peeked(p->q);
288         if (skb == NULL)
289                 return NULL;
290 
291         qdisc_bstats_update(sch, skb);
292         qdisc_qstats_backlog_dec(sch, skb);
293         sch->q.qlen--;
294 
295         index = skb->tc_index & (p->indices - 1);
296         pr_debug("index %d->%d\n", skb->tc_index, index);
297 
298         switch (tc_skb_protocol(skb)) {
299         case htons(ETH_P_IP):
300                 ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
301                                     p->mv[index].value);
302                         break;
303         case htons(ETH_P_IPV6):
304                 ipv6_change_dsfield(ipv6_hdr(skb), p->mv[index].mask,
305                                     p->mv[index].value);
306                         break;
307         default:
308                 /*
309                  * Only complain if a change was actually attempted.
310                  * This way, we can send non-IP traffic through dsmark
311                  * and don't need yet another qdisc as a bypass.
312                  */
313                 if (p->mv[index].mask != 0xff || p->mv[index].value)
314                         pr_warn("%s: unsupported protocol %d\n",
315                                 __func__, ntohs(tc_skb_protocol(skb)));
316                 break;
317         }
318 
319         return skb;
320 }
321 
322 static struct sk_buff *dsmark_peek(struct Qdisc *sch)
323 {
324         struct dsmark_qdisc_data *p = qdisc_priv(sch);
325 
326         pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
327 
328         return p->q->ops->peek(p->q);
329 }
330 
331 static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
332 {
333         struct dsmark_qdisc_data *p = qdisc_priv(sch);
334         struct nlattr *tb[TCA_DSMARK_MAX + 1];
335         int err = -EINVAL;
336         u32 default_index = NO_DEFAULT_INDEX;
337         u16 indices;
338         int i;
339 
340         pr_debug("%s(sch %p,[qdisc %p],opt %p)\n", __func__, sch, p, opt);
341 
342         if (!opt)
343                 goto errout;
344 
345         err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy);
346         if (err < 0)
347                 goto errout;
348 
349         err = -EINVAL;
350         indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
351 
352         if (hweight32(indices) != 1)
353                 goto errout;
354 
355         if (tb[TCA_DSMARK_DEFAULT_INDEX])
356                 default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]);
357 
358         if (indices <= DSMARK_EMBEDDED_SZ)
359                 p->mv = p->embedded;
360         else
361                 p->mv = kmalloc_array(indices, sizeof(*p->mv), GFP_KERNEL);
362         if (!p->mv) {
363                 err = -ENOMEM;
364                 goto errout;
365         }
366         for (i = 0; i < indices; i++) {
367                 p->mv[i].mask = 0xff;
368                 p->mv[i].value = 0;
369         }
370         p->indices = indices;
371         p->default_index = default_index;
372         p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
373 
374         p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle);
375         if (p->q == NULL)
376                 p->q = &noop_qdisc;
377 
378         pr_debug("%s: qdisc %p\n", __func__, p->q);
379 
380         err = 0;
381 errout:
382         return err;
383 }
384 
385 static void dsmark_reset(struct Qdisc *sch)
386 {
387         struct dsmark_qdisc_data *p = qdisc_priv(sch);
388 
389         pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
390         qdisc_reset(p->q);
391         sch->qstats.backlog = 0;
392         sch->q.qlen = 0;
393 }
394 
395 static void dsmark_destroy(struct Qdisc *sch)
396 {
397         struct dsmark_qdisc_data *p = qdisc_priv(sch);
398 
399         pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
400 
401         tcf_destroy_chain(&p->filter_list);
402         qdisc_destroy(p->q);
403         if (p->mv != p->embedded)
404                 kfree(p->mv);
405 }
406 
407 static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
408                              struct sk_buff *skb, struct tcmsg *tcm)
409 {
410         struct dsmark_qdisc_data *p = qdisc_priv(sch);
411         struct nlattr *opts = NULL;
412 
413         pr_debug("%s(sch %p,[qdisc %p],class %ld\n", __func__, sch, p, cl);
414 
415         if (!dsmark_valid_index(p, cl))
416                 return -EINVAL;
417 
418         tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
419         tcm->tcm_info = p->q->handle;
420 
421         opts = nla_nest_start(skb, TCA_OPTIONS);
422         if (opts == NULL)
423                 goto nla_put_failure;
424         if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mv[cl - 1].mask) ||
425             nla_put_u8(skb, TCA_DSMARK_VALUE, p->mv[cl - 1].value))
426                 goto nla_put_failure;
427 
428         return nla_nest_end(skb, opts);
429 
430 nla_put_failure:
431         nla_nest_cancel(skb, opts);
432         return -EMSGSIZE;
433 }
434 
435 static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
436 {
437         struct dsmark_qdisc_data *p = qdisc_priv(sch);
438         struct nlattr *opts = NULL;
439 
440         opts = nla_nest_start(skb, TCA_OPTIONS);
441         if (opts == NULL)
442                 goto nla_put_failure;
443         if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
444                 goto nla_put_failure;
445 
446         if (p->default_index != NO_DEFAULT_INDEX &&
447             nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
448                 goto nla_put_failure;
449 
450         if (p->set_tc_index &&
451             nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
452                 goto nla_put_failure;
453 
454         return nla_nest_end(skb, opts);
455 
456 nla_put_failure:
457         nla_nest_cancel(skb, opts);
458         return -EMSGSIZE;
459 }
460 
461 static const struct Qdisc_class_ops dsmark_class_ops = {
462         .graft          =       dsmark_graft,
463         .leaf           =       dsmark_leaf,
464         .get            =       dsmark_get,
465         .put            =       dsmark_put,
466         .change         =       dsmark_change,
467         .delete         =       dsmark_delete,
468         .walk           =       dsmark_walk,
469         .tcf_chain      =       dsmark_find_tcf,
470         .bind_tcf       =       dsmark_bind_filter,
471         .unbind_tcf     =       dsmark_put,
472         .dump           =       dsmark_dump_class,
473 };
474 
475 static struct Qdisc_ops dsmark_qdisc_ops __read_mostly = {
476         .next           =       NULL,
477         .cl_ops         =       &dsmark_class_ops,
478         .id             =       "dsmark",
479         .priv_size      =       sizeof(struct dsmark_qdisc_data),
480         .enqueue        =       dsmark_enqueue,
481         .dequeue        =       dsmark_dequeue,
482         .peek           =       dsmark_peek,
483         .init           =       dsmark_init,
484         .reset          =       dsmark_reset,
485         .destroy        =       dsmark_destroy,
486         .change         =       NULL,
487         .dump           =       dsmark_dump,
488         .owner          =       THIS_MODULE,
489 };
490 
491 static int __init dsmark_module_init(void)
492 {
493         return register_qdisc(&dsmark_qdisc_ops);
494 }
495 
496 static void __exit dsmark_module_exit(void)
497 {
498         unregister_qdisc(&dsmark_qdisc_ops);
499 }
500 
501 module_init(dsmark_module_init)
502 module_exit(dsmark_module_exit)
503 
504 MODULE_LICENSE("GPL");
505 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp