1 /* 2 * net/sched/cls_api.c Packet classifier API. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 * Changes: 12 * 13 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 14 * 15 */ 16 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/kernel.h> 20 #include <linux/string.h> 21 #include <linux/errno.h> 22 #include <linux/skbuff.h> 23 #include <linux/init.h> 24 #include <linux/kmod.h> 25 #include <linux/err.h> 26 #include <linux/slab.h> 27 #include <net/net_namespace.h> 28 #include <net/sock.h> 29 #include <net/netlink.h> 30 #include <net/pkt_sched.h> 31 #include <net/pkt_cls.h> 32 33 /* The list of all installed classifier types */ 34 static LIST_HEAD(tcf_proto_base); 35 36 /* Protects list of registered TC modules. It is pure SMP lock. */ 37 static DEFINE_RWLOCK(cls_mod_lock); 38 39 /* Find classifier type by string name */ 40 41 static const struct tcf_proto_ops *tcf_proto_lookup_ops(struct nlattr *kind) 42 { 43 const struct tcf_proto_ops *t, *res = NULL; 44 45 if (kind) { 46 read_lock(&cls_mod_lock); 47 list_for_each_entry(t, &tcf_proto_base, head) { 48 if (nla_strcmp(kind, t->kind) == 0) { 49 if (try_module_get(t->owner)) 50 res = t; 51 break; 52 } 53 } 54 read_unlock(&cls_mod_lock); 55 } 56 return res; 57 } 58 59 /* Register(unregister) new classifier type */ 60 61 int register_tcf_proto_ops(struct tcf_proto_ops *ops) 62 { 63 struct tcf_proto_ops *t; 64 int rc = -EEXIST; 65 66 write_lock(&cls_mod_lock); 67 list_for_each_entry(t, &tcf_proto_base, head) 68 if (!strcmp(ops->kind, t->kind)) 69 goto out; 70 71 list_add_tail(&ops->head, &tcf_proto_base); 72 rc = 0; 73 out: 74 write_unlock(&cls_mod_lock); 75 return rc; 76 } 77 EXPORT_SYMBOL(register_tcf_proto_ops); 78 79 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 80 { 81 struct tcf_proto_ops *t; 82 int rc = -ENOENT; 83 84 /* Wait for outstanding call_rcu()s, if any, from a 85 * tcf_proto_ops's destroy() handler. 86 */ 87 rcu_barrier(); 88 89 write_lock(&cls_mod_lock); 90 list_for_each_entry(t, &tcf_proto_base, head) { 91 if (t == ops) { 92 list_del(&t->head); 93 rc = 0; 94 break; 95 } 96 } 97 write_unlock(&cls_mod_lock); 98 return rc; 99 } 100 EXPORT_SYMBOL(unregister_tcf_proto_ops); 101 102 static int tfilter_notify(struct net *net, struct sk_buff *oskb, 103 struct nlmsghdr *n, struct tcf_proto *tp, 104 unsigned long fh, int event); 105 106 107 /* Select new prio value from the range, managed by kernel. */ 108 109 static inline u32 tcf_auto_prio(struct tcf_proto *tp) 110 { 111 u32 first = TC_H_MAKE(0xC0000000U, 0U); 112 113 if (tp) 114 first = tp->prio - 1; 115 116 return first; 117 } 118 119 /* Add/change/delete/get a filter node */ 120 121 static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) 122 { 123 struct net *net = sock_net(skb->sk); 124 struct nlattr *tca[TCA_MAX + 1]; 125 struct tcmsg *t; 126 u32 protocol; 127 u32 prio; 128 u32 nprio; 129 u32 parent; 130 struct net_device *dev; 131 struct Qdisc *q; 132 struct tcf_proto __rcu **back; 133 struct tcf_proto __rcu **chain; 134 struct tcf_proto *tp; 135 const struct tcf_proto_ops *tp_ops; 136 const struct Qdisc_class_ops *cops; 137 unsigned long cl; 138 unsigned long fh; 139 int err; 140 int tp_created; 141 142 if ((n->nlmsg_type != RTM_GETTFILTER) && 143 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 144 return -EPERM; 145 146 replay: 147 tp_created = 0; 148 149 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL); 150 if (err < 0) 151 return err; 152 153 t = nlmsg_data(n); 154 protocol = TC_H_MIN(t->tcm_info); 155 prio = TC_H_MAJ(t->tcm_info); 156 nprio = prio; 157 parent = t->tcm_parent; 158 cl = 0; 159 160 if (prio == 0) { 161 /* If no priority is given, user wants we allocated it. */ 162 if (n->nlmsg_type != RTM_NEWTFILTER || 163 !(n->nlmsg_flags & NLM_F_CREATE)) 164 return -ENOENT; 165 prio = TC_H_MAKE(0x80000000U, 0U); 166 } 167 168 /* Find head of filter chain. */ 169 170 /* Find link */ 171 dev = __dev_get_by_index(net, t->tcm_ifindex); 172 if (dev == NULL) 173 return -ENODEV; 174 175 /* Find qdisc */ 176 if (!parent) { 177 q = dev->qdisc; 178 parent = q->handle; 179 } else { 180 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent)); 181 if (q == NULL) 182 return -EINVAL; 183 } 184 185 /* Is it classful? */ 186 cops = q->ops->cl_ops; 187 if (!cops) 188 return -EINVAL; 189 190 if (cops->tcf_chain == NULL) 191 return -EOPNOTSUPP; 192 193 /* Do we search for filter, attached to class? */ 194 if (TC_H_MIN(parent)) { 195 cl = cops->get(q, parent); 196 if (cl == 0) 197 return -ENOENT; 198 } 199 200 /* And the last stroke */ 201 chain = cops->tcf_chain(q, cl); 202 err = -EINVAL; 203 if (chain == NULL) 204 goto errout; 205 206 /* Check the chain for existence of proto-tcf with this priority */ 207 for (back = chain; 208 (tp = rtnl_dereference(*back)) != NULL; 209 back = &tp->next) { 210 if (tp->prio >= prio) { 211 if (tp->prio == prio) { 212 if (!nprio || 213 (tp->protocol != protocol && protocol)) 214 goto errout; 215 } else 216 tp = NULL; 217 break; 218 } 219 } 220 221 if (tp == NULL) { 222 /* Proto-tcf does not exist, create new one */ 223 224 if (tca[TCA_KIND] == NULL || !protocol) 225 goto errout; 226 227 err = -ENOENT; 228 if (n->nlmsg_type != RTM_NEWTFILTER || 229 !(n->nlmsg_flags & NLM_F_CREATE)) 230 goto errout; 231 232 233 /* Create new proto tcf */ 234 235 err = -ENOBUFS; 236 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 237 if (tp == NULL) 238 goto errout; 239 err = -ENOENT; 240 tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND]); 241 if (tp_ops == NULL) { 242 #ifdef CONFIG_MODULES 243 struct nlattr *kind = tca[TCA_KIND]; 244 char name[IFNAMSIZ]; 245 246 if (kind != NULL && 247 nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) { 248 rtnl_unlock(); 249 request_module("cls_%s", name); 250 rtnl_lock(); 251 tp_ops = tcf_proto_lookup_ops(kind); 252 /* We dropped the RTNL semaphore in order to 253 * perform the module load. So, even if we 254 * succeeded in loading the module we have to 255 * replay the request. We indicate this using 256 * -EAGAIN. 257 */ 258 if (tp_ops != NULL) { 259 module_put(tp_ops->owner); 260 err = -EAGAIN; 261 } 262 } 263 #endif 264 kfree(tp); 265 goto errout; 266 } 267 tp->ops = tp_ops; 268 tp->protocol = protocol; 269 tp->prio = nprio ? : 270 TC_H_MAJ(tcf_auto_prio(rtnl_dereference(*back))); 271 tp->q = q; 272 tp->classify = tp_ops->classify; 273 tp->classid = parent; 274 275 err = tp_ops->init(tp); 276 if (err != 0) { 277 module_put(tp_ops->owner); 278 kfree(tp); 279 goto errout; 280 } 281 282 tp_created = 1; 283 284 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) 285 goto errout; 286 287 fh = tp->ops->get(tp, t->tcm_handle); 288 289 if (fh == 0) { 290 if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { 291 struct tcf_proto *next = rtnl_dereference(tp->next); 292 293 RCU_INIT_POINTER(*back, next); 294 295 tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER); 296 tcf_destroy(tp, true); 297 err = 0; 298 goto errout; 299 } 300 301 err = -ENOENT; 302 if (n->nlmsg_type != RTM_NEWTFILTER || 303 !(n->nlmsg_flags & NLM_F_CREATE)) 304 goto errout; 305 } else { 306 switch (n->nlmsg_type) { 307 case RTM_NEWTFILTER: 308 err = -EEXIST; 309 if (n->nlmsg_flags & NLM_F_EXCL) { 310 if (tp_created) 311 tcf_destroy(tp, true); 312 goto errout; 313 } 314 break; 315 case RTM_DELTFILTER: 316 err = tp->ops->delete(tp, fh); 317 if (err == 0) { 318 struct tcf_proto *next = rtnl_dereference(tp->next); 319 320 tfilter_notify(net, skb, n, tp, 321 t->tcm_handle, RTM_DELTFILTER); 322 if (tcf_destroy(tp, false)) 323 RCU_INIT_POINTER(*back, next); 324 } 325 goto errout; 326 case RTM_GETTFILTER: 327 err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER); 328 goto errout; 329 default: 330 err = -EINVAL; 331 goto errout; 332 } 333 } 334 335 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, 336 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE); 337 if (err == 0) { 338 if (tp_created) { 339 RCU_INIT_POINTER(tp->next, rtnl_dereference(*back)); 340 rcu_assign_pointer(*back, tp); 341 } 342 tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER); 343 } else { 344 if (tp_created) 345 tcf_destroy(tp, true); 346 } 347 348 errout: 349 if (cl) 350 cops->put(q, cl); 351 if (err == -EAGAIN) 352 /* Replay the request. */ 353 goto replay; 354 return err; 355 } 356 357 static int tcf_fill_node(struct net *net, struct sk_buff *skb, struct tcf_proto *tp, 358 unsigned long fh, u32 portid, u32 seq, u16 flags, int event) 359 { 360 struct tcmsg *tcm; 361 struct nlmsghdr *nlh; 362 unsigned char *b = skb_tail_pointer(skb); 363 364 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 365 if (!nlh) 366 goto out_nlmsg_trim; 367 tcm = nlmsg_data(nlh); 368 tcm->tcm_family = AF_UNSPEC; 369 tcm->tcm__pad1 = 0; 370 tcm->tcm__pad2 = 0; 371 tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex; 372 tcm->tcm_parent = tp->classid; 373 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 374 if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) 375 goto nla_put_failure; 376 tcm->tcm_handle = fh; 377 if (RTM_DELTFILTER != event) { 378 tcm->tcm_handle = 0; 379 if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0) 380 goto nla_put_failure; 381 } 382 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 383 return skb->len; 384 385 out_nlmsg_trim: 386 nla_put_failure: 387 nlmsg_trim(skb, b); 388 return -1; 389 } 390 391 static int tfilter_notify(struct net *net, struct sk_buff *oskb, 392 struct nlmsghdr *n, struct tcf_proto *tp, 393 unsigned long fh, int event) 394 { 395 struct sk_buff *skb; 396 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 397 398 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 399 if (!skb) 400 return -ENOBUFS; 401 402 if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, 0, event) <= 0) { 403 kfree_skb(skb); 404 return -EINVAL; 405 } 406 407 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, 408 n->nlmsg_flags & NLM_F_ECHO); 409 } 410 411 struct tcf_dump_args { 412 struct tcf_walker w; 413 struct sk_buff *skb; 414 struct netlink_callback *cb; 415 }; 416 417 static int tcf_node_dump(struct tcf_proto *tp, unsigned long n, 418 struct tcf_walker *arg) 419 { 420 struct tcf_dump_args *a = (void *)arg; 421 struct net *net = sock_net(a->skb->sk); 422 423 return tcf_fill_node(net, a->skb, tp, n, NETLINK_CB(a->cb->skb).portid, 424 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER); 425 } 426 427 /* called with RTNL */ 428 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 429 { 430 struct net *net = sock_net(skb->sk); 431 int t; 432 int s_t; 433 struct net_device *dev; 434 struct Qdisc *q; 435 struct tcf_proto *tp, __rcu **chain; 436 struct tcmsg *tcm = nlmsg_data(cb->nlh); 437 unsigned long cl = 0; 438 const struct Qdisc_class_ops *cops; 439 struct tcf_dump_args arg; 440 441 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 442 return skb->len; 443 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 444 if (!dev) 445 return skb->len; 446 447 if (!tcm->tcm_parent) 448 q = dev->qdisc; 449 else 450 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 451 if (!q) 452 goto out; 453 cops = q->ops->cl_ops; 454 if (!cops) 455 goto errout; 456 if (cops->tcf_chain == NULL) 457 goto errout; 458 if (TC_H_MIN(tcm->tcm_parent)) { 459 cl = cops->get(q, tcm->tcm_parent); 460 if (cl == 0) 461 goto errout; 462 } 463 chain = cops->tcf_chain(q, cl); 464 if (chain == NULL) 465 goto errout; 466 467 s_t = cb->args[0]; 468 469 for (tp = rtnl_dereference(*chain), t = 0; 470 tp; tp = rtnl_dereference(tp->next), t++) { 471 if (t < s_t) 472 continue; 473 if (TC_H_MAJ(tcm->tcm_info) && 474 TC_H_MAJ(tcm->tcm_info) != tp->prio) 475 continue; 476 if (TC_H_MIN(tcm->tcm_info) && 477 TC_H_MIN(tcm->tcm_info) != tp->protocol) 478 continue; 479 if (t > s_t) 480 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0])); 481 if (cb->args[1] == 0) { 482 if (tcf_fill_node(net, skb, tp, 0, NETLINK_CB(cb->skb).portid, 483 cb->nlh->nlmsg_seq, NLM_F_MULTI, 484 RTM_NEWTFILTER) <= 0) 485 break; 486 487 cb->args[1] = 1; 488 } 489 if (tp->ops->walk == NULL) 490 continue; 491 arg.w.fn = tcf_node_dump; 492 arg.skb = skb; 493 arg.cb = cb; 494 arg.w.stop = 0; 495 arg.w.skip = cb->args[1] - 1; 496 arg.w.count = 0; 497 tp->ops->walk(tp, &arg.w); 498 cb->args[1] = arg.w.count + 1; 499 if (arg.w.stop) 500 break; 501 } 502 503 cb->args[0] = t; 504 505 errout: 506 if (cl) 507 cops->put(q, cl); 508 out: 509 return skb->len; 510 } 511 512 void tcf_exts_destroy(struct tcf_exts *exts) 513 { 514 #ifdef CONFIG_NET_CLS_ACT 515 tcf_action_destroy(&exts->actions, TCA_ACT_UNBIND); 516 INIT_LIST_HEAD(&exts->actions); 517 #endif 518 } 519 EXPORT_SYMBOL(tcf_exts_destroy); 520 521 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 522 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr) 523 { 524 #ifdef CONFIG_NET_CLS_ACT 525 { 526 struct tc_action *act; 527 528 INIT_LIST_HEAD(&exts->actions); 529 if (exts->police && tb[exts->police]) { 530 act = tcf_action_init_1(net, tb[exts->police], rate_tlv, 531 "police", ovr, 532 TCA_ACT_BIND); 533 if (IS_ERR(act)) 534 return PTR_ERR(act); 535 536 act->type = exts->type = TCA_OLD_COMPAT; 537 list_add(&act->list, &exts->actions); 538 } else if (exts->action && tb[exts->action]) { 539 int err; 540 err = tcf_action_init(net, tb[exts->action], rate_tlv, 541 NULL, ovr, 542 TCA_ACT_BIND, &exts->actions); 543 if (err) 544 return err; 545 } 546 } 547 #else 548 if ((exts->action && tb[exts->action]) || 549 (exts->police && tb[exts->police])) 550 return -EOPNOTSUPP; 551 #endif 552 553 return 0; 554 } 555 EXPORT_SYMBOL(tcf_exts_validate); 556 557 void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, 558 struct tcf_exts *src) 559 { 560 #ifdef CONFIG_NET_CLS_ACT 561 LIST_HEAD(tmp); 562 tcf_tree_lock(tp); 563 list_splice_init(&dst->actions, &tmp); 564 list_splice(&src->actions, &dst->actions); 565 dst->type = src->type; 566 tcf_tree_unlock(tp); 567 tcf_action_destroy(&tmp, TCA_ACT_UNBIND); 568 #endif 569 } 570 EXPORT_SYMBOL(tcf_exts_change); 571 572 #define tcf_exts_first_act(ext) \ 573 list_first_entry_or_null(&(exts)->actions, \ 574 struct tc_action, list) 575 576 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 577 { 578 #ifdef CONFIG_NET_CLS_ACT 579 struct nlattr *nest; 580 581 if (exts->action && !list_empty(&exts->actions)) { 582 /* 583 * again for backward compatible mode - we want 584 * to work with both old and new modes of entering 585 * tc data even if iproute2 was newer - jhs 586 */ 587 if (exts->type != TCA_OLD_COMPAT) { 588 nest = nla_nest_start(skb, exts->action); 589 if (nest == NULL) 590 goto nla_put_failure; 591 if (tcf_action_dump(skb, &exts->actions, 0, 0) < 0) 592 goto nla_put_failure; 593 nla_nest_end(skb, nest); 594 } else if (exts->police) { 595 struct tc_action *act = tcf_exts_first_act(exts); 596 nest = nla_nest_start(skb, exts->police); 597 if (nest == NULL || !act) 598 goto nla_put_failure; 599 if (tcf_action_dump_old(skb, act, 0, 0) < 0) 600 goto nla_put_failure; 601 nla_nest_end(skb, nest); 602 } 603 } 604 return 0; 605 606 nla_put_failure: 607 nla_nest_cancel(skb, nest); 608 return -1; 609 #else 610 return 0; 611 #endif 612 } 613 EXPORT_SYMBOL(tcf_exts_dump); 614 615 616 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) 617 { 618 #ifdef CONFIG_NET_CLS_ACT 619 struct tc_action *a = tcf_exts_first_act(exts); 620 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) 621 return -1; 622 #endif 623 return 0; 624 } 625 EXPORT_SYMBOL(tcf_exts_dump_stats); 626 627 static int __init tc_filter_init(void) 628 { 629 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, NULL); 630 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, NULL); 631 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter, 632 tc_dump_tfilter, NULL); 633 634 return 0; 635 } 636 637 subsys_initcall(tc_filter_init); 638
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.