1 /* 2 * net/sched/cls_route.c ROUTE4 classifier. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 */ 11 12 #include <linux/module.h> 13 #include <linux/config.h> 14 #include <asm/uaccess.h> 15 #include <asm/system.h> 16 #include <asm/bitops.h> 17 #include <linux/types.h> 18 #include <linux/kernel.h> 19 #include <linux/sched.h> 20 #include <linux/string.h> 21 #include <linux/mm.h> 22 #include <linux/socket.h> 23 #include <linux/sockios.h> 24 #include <linux/in.h> 25 #include <linux/errno.h> 26 #include <linux/interrupt.h> 27 #include <linux/if_ether.h> 28 #include <linux/inet.h> 29 #include <linux/netdevice.h> 30 #include <linux/etherdevice.h> 31 #include <linux/notifier.h> 32 #include <net/ip.h> 33 #include <net/route.h> 34 #include <linux/skbuff.h> 35 #include <net/sock.h> 36 #include <net/pkt_sched.h> 37 38 /* 39 1. For now we assume that route tags < 256. 40 It allows to use direct table lookups, instead of hash tables. 41 2. For now we assume that "from TAG" and "fromdev DEV" statements 42 are mutually exclusive. 43 3. "to TAG from ANY" has higher priority, than "to ANY from XXX" 44 */ 45 46 struct route4_fastmap 47 { 48 struct route4_filter *filter; 49 u32 id; 50 int iif; 51 }; 52 53 struct route4_head 54 { 55 struct route4_fastmap fastmap[16]; 56 struct route4_bucket *table[256+1]; 57 }; 58 59 struct route4_bucket 60 { 61 struct route4_filter *ht[16+16+1]; 62 }; 63 64 struct route4_filter 65 { 66 struct route4_filter *next; 67 u32 id; 68 int iif; 69 70 struct tcf_result res; 71 #ifdef CONFIG_NET_CLS_POLICE 72 struct tcf_police *police; 73 #endif 74 75 u32 handle; 76 struct route4_bucket *bkt; 77 }; 78 79 #define ROUTE4_FAILURE ((struct route4_filter*)(-1L)) 80 81 static __inline__ int route4_fastmap_hash(u32 id, int iif) 82 { 83 return id&0xF; 84 } 85 86 static void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id) 87 { 88 spin_lock_bh(&dev->queue_lock); 89 memset(head->fastmap, 0, sizeof(head->fastmap)); 90 spin_unlock_bh(&dev->queue_lock); 91 } 92 93 static void __inline__ 94 route4_set_fastmap(struct route4_head *head, u32 id, int iif, 95 struct route4_filter *f) 96 { 97 int h = route4_fastmap_hash(id, iif); 98 head->fastmap[h].id = id; 99 head->fastmap[h].iif = iif; 100 head->fastmap[h].filter = f; 101 } 102 103 static __inline__ int route4_hash_to(u32 id) 104 { 105 return id&0xFF; 106 } 107 108 static __inline__ int route4_hash_from(u32 id) 109 { 110 return (id>>16)&0xF; 111 } 112 113 static __inline__ int route4_hash_iif(int iif) 114 { 115 return 16 + ((iif>>16)&0xF); 116 } 117 118 static __inline__ int route4_hash_wild(void) 119 { 120 return 32; 121 } 122 123 #ifdef CONFIG_NET_CLS_POLICE 124 #define IF_ROUTE_POLICE \ 125 if (f->police) { \ 126 int pol_res = tcf_police(skb, f->police); \ 127 if (pol_res >= 0) return pol_res; \ 128 dont_cache = 1; \ 129 continue; \ 130 } \ 131 if (!dont_cache) 132 #else 133 #define IF_ROUTE_POLICE 134 #endif 135 136 137 static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp, 138 struct tcf_result *res) 139 { 140 struct route4_head *head = (struct route4_head*)tp->root; 141 struct dst_entry *dst; 142 struct route4_bucket *b; 143 struct route4_filter *f; 144 #ifdef CONFIG_NET_CLS_POLICE 145 int dont_cache = 0; 146 #endif 147 u32 id, h; 148 int iif; 149 150 if ((dst = skb->dst) == NULL) 151 goto failure; 152 153 id = dst->tclassid; 154 if (head == NULL) 155 goto old_method; 156 157 iif = ((struct rtable*)dst)->fl.iif; 158 159 h = route4_fastmap_hash(id, iif); 160 if (id == head->fastmap[h].id && 161 iif == head->fastmap[h].iif && 162 (f = head->fastmap[h].filter) != NULL) { 163 if (f == ROUTE4_FAILURE) 164 goto failure; 165 166 *res = f->res; 167 return 0; 168 } 169 170 h = route4_hash_to(id); 171 172 restart: 173 if ((b = head->table[h]) != NULL) { 174 f = b->ht[route4_hash_from(id)]; 175 176 for ( ; f; f = f->next) { 177 if (f->id == id) { 178 *res = f->res; 179 IF_ROUTE_POLICE route4_set_fastmap(head, id, iif, f); 180 return 0; 181 } 182 } 183 184 for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next) { 185 if (f->iif == iif) { 186 *res = f->res; 187 IF_ROUTE_POLICE route4_set_fastmap(head, id, iif, f); 188 return 0; 189 } 190 } 191 192 for (f = b->ht[route4_hash_wild()]; f; f = f->next) { 193 *res = f->res; 194 IF_ROUTE_POLICE route4_set_fastmap(head, id, iif, f); 195 return 0; 196 } 197 198 } 199 if (h < 256) { 200 h = 256; 201 id &= ~0xFFFF; 202 goto restart; 203 } 204 205 #ifdef CONFIG_NET_CLS_POLICE 206 if (!dont_cache) 207 #endif 208 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE); 209 failure: 210 return -1; 211 212 old_method: 213 if (id && (TC_H_MAJ(id) == 0 || 214 !(TC_H_MAJ(id^tp->q->handle)))) { 215 res->classid = id; 216 res->class = 0; 217 return 0; 218 } 219 return -1; 220 } 221 222 static u32 to_hash(u32 id) 223 { 224 u32 h = id&0xFF; 225 if (id&0x8000) 226 h += 256; 227 return h; 228 } 229 230 static u32 from_hash(u32 id) 231 { 232 id &= 0xFFFF; 233 if (id == 0xFFFF) 234 return 32; 235 if (!(id & 0x8000)) { 236 if (id > 255) 237 return 256; 238 return id&0xF; 239 } 240 return 16 + (id&0xF); 241 } 242 243 static unsigned long route4_get(struct tcf_proto *tp, u32 handle) 244 { 245 struct route4_head *head = (struct route4_head*)tp->root; 246 struct route4_bucket *b; 247 struct route4_filter *f; 248 unsigned h1, h2; 249 250 if (!head) 251 return 0; 252 253 h1 = to_hash(handle); 254 if (h1 > 256) 255 return 0; 256 257 h2 = from_hash(handle>>16); 258 if (h2 > 32) 259 return 0; 260 261 if ((b = head->table[h1]) != NULL) { 262 for (f = b->ht[h2]; f; f = f->next) 263 if (f->handle == handle) 264 return (unsigned long)f; 265 } 266 return 0; 267 } 268 269 static void route4_put(struct tcf_proto *tp, unsigned long f) 270 { 271 } 272 273 static int route4_init(struct tcf_proto *tp) 274 { 275 return 0; 276 } 277 278 static void route4_destroy(struct tcf_proto *tp) 279 { 280 struct route4_head *head = xchg(&tp->root, NULL); 281 int h1, h2; 282 283 if (head == NULL) 284 return; 285 286 for (h1=0; h1<=256; h1++) { 287 struct route4_bucket *b; 288 289 if ((b = head->table[h1]) != NULL) { 290 for (h2=0; h2<=32; h2++) { 291 struct route4_filter *f; 292 293 while ((f = b->ht[h2]) != NULL) { 294 unsigned long cl; 295 296 b->ht[h2] = f->next; 297 if ((cl = __cls_set_class(&f->res.class, 0)) != 0) 298 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl); 299 #ifdef CONFIG_NET_CLS_POLICE 300 tcf_police_release(f->police); 301 #endif 302 kfree(f); 303 } 304 } 305 kfree(b); 306 } 307 } 308 kfree(head); 309 } 310 311 static int route4_delete(struct tcf_proto *tp, unsigned long arg) 312 { 313 struct route4_head *head = (struct route4_head*)tp->root; 314 struct route4_filter **fp, *f = (struct route4_filter*)arg; 315 unsigned h = 0; 316 struct route4_bucket *b; 317 int i; 318 319 if (!head || !f) 320 return -EINVAL; 321 322 h = f->handle; 323 b = f->bkt; 324 325 for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) { 326 if (*fp == f) { 327 unsigned long cl; 328 329 tcf_tree_lock(tp); 330 *fp = f->next; 331 tcf_tree_unlock(tp); 332 333 route4_reset_fastmap(tp->q->dev, head, f->id); 334 335 if ((cl = cls_set_class(tp, &f->res.class, 0)) != 0) 336 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl); 337 338 #ifdef CONFIG_NET_CLS_POLICE 339 tcf_police_release(f->police); 340 #endif 341 kfree(f); 342 343 /* Strip tree */ 344 345 for (i=0; i<=32; i++) 346 if (b->ht[i]) 347 return 0; 348 349 /* OK, session has no flows */ 350 tcf_tree_lock(tp); 351 head->table[to_hash(h)] = NULL; 352 tcf_tree_unlock(tp); 353 354 kfree(b); 355 return 0; 356 } 357 } 358 return 0; 359 } 360 361 static int route4_change(struct tcf_proto *tp, unsigned long base, 362 u32 handle, 363 struct rtattr **tca, 364 unsigned long *arg) 365 { 366 struct route4_head *head = tp->root; 367 struct route4_filter *f, *f1, **ins_f; 368 struct route4_bucket *b; 369 struct rtattr *opt = tca[TCA_OPTIONS-1]; 370 struct rtattr *tb[TCA_ROUTE4_MAX]; 371 unsigned h1, h2; 372 int err; 373 374 if (opt == NULL) 375 return handle ? -EINVAL : 0; 376 377 if (rtattr_parse(tb, TCA_ROUTE4_MAX, RTA_DATA(opt), RTA_PAYLOAD(opt)) < 0) 378 return -EINVAL; 379 380 if ((f = (struct route4_filter*)*arg) != NULL) { 381 /* Node exists: adjust only classid */ 382 383 if (f->handle != handle && handle) 384 return -EINVAL; 385 if (tb[TCA_ROUTE4_CLASSID-1]) { 386 unsigned long cl; 387 388 f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]); 389 cl = cls_set_class(tp, &f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid)); 390 if (cl) 391 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl); 392 } 393 #ifdef CONFIG_NET_CLS_POLICE 394 if (tb[TCA_ROUTE4_POLICE-1]) { 395 struct tcf_police *police = tcf_police_locate(tb[TCA_ROUTE4_POLICE-1], tca[TCA_RATE-1]); 396 397 tcf_tree_lock(tp); 398 police = xchg(&f->police, police); 399 tcf_tree_unlock(tp); 400 401 tcf_police_release(police); 402 } 403 #endif 404 return 0; 405 } 406 407 /* Now more serious part... */ 408 409 if (head == NULL) { 410 head = kmalloc(sizeof(struct route4_head), GFP_KERNEL); 411 if (head == NULL) 412 return -ENOBUFS; 413 memset(head, 0, sizeof(struct route4_head)); 414 415 tcf_tree_lock(tp); 416 tp->root = head; 417 tcf_tree_unlock(tp); 418 } 419 420 f = kmalloc(sizeof(struct route4_filter), GFP_KERNEL); 421 if (f == NULL) 422 return -ENOBUFS; 423 424 memset(f, 0, sizeof(*f)); 425 426 err = -EINVAL; 427 f->handle = 0x8000; 428 if (tb[TCA_ROUTE4_TO-1]) { 429 if (handle&0x8000) 430 goto errout; 431 if (RTA_PAYLOAD(tb[TCA_ROUTE4_TO-1]) < 4) 432 goto errout; 433 f->id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_TO-1]); 434 if (f->id > 0xFF) 435 goto errout; 436 f->handle = f->id; 437 } 438 if (tb[TCA_ROUTE4_FROM-1]) { 439 u32 sid; 440 if (tb[TCA_ROUTE4_IIF-1]) 441 goto errout; 442 if (RTA_PAYLOAD(tb[TCA_ROUTE4_FROM-1]) < 4) 443 goto errout; 444 sid = (*(u32*)RTA_DATA(tb[TCA_ROUTE4_FROM-1])); 445 if (sid > 0xFF) 446 goto errout; 447 f->handle |= sid<<16; 448 f->id |= sid<<16; 449 } else if (tb[TCA_ROUTE4_IIF-1]) { 450 if (RTA_PAYLOAD(tb[TCA_ROUTE4_IIF-1]) < 4) 451 goto errout; 452 f->iif = *(u32*)RTA_DATA(tb[TCA_ROUTE4_IIF-1]); 453 if (f->iif > 0x7FFF) 454 goto errout; 455 f->handle |= (f->iif|0x8000)<<16; 456 } else 457 f->handle |= 0xFFFF<<16; 458 459 if (handle) { 460 f->handle |= handle&0x7F00; 461 if (f->handle != handle) 462 goto errout; 463 } 464 465 if (tb[TCA_ROUTE4_CLASSID-1]) { 466 if (RTA_PAYLOAD(tb[TCA_ROUTE4_CLASSID-1]) < 4) 467 goto errout; 468 f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]); 469 } 470 471 h1 = to_hash(f->handle); 472 if ((b = head->table[h1]) == NULL) { 473 err = -ENOBUFS; 474 b = kmalloc(sizeof(struct route4_bucket), GFP_KERNEL); 475 if (b == NULL) 476 goto errout; 477 memset(b, 0, sizeof(*b)); 478 479 tcf_tree_lock(tp); 480 head->table[h1] = b; 481 tcf_tree_unlock(tp); 482 } 483 f->bkt = b; 484 485 err = -EEXIST; 486 h2 = from_hash(f->handle>>16); 487 for (ins_f = &b->ht[h2]; (f1=*ins_f) != NULL; ins_f = &f1->next) { 488 if (f->handle < f1->handle) 489 break; 490 if (f1->handle == f->handle) 491 goto errout; 492 } 493 494 cls_set_class(tp, &f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid)); 495 #ifdef CONFIG_NET_CLS_POLICE 496 if (tb[TCA_ROUTE4_POLICE-1]) 497 f->police = tcf_police_locate(tb[TCA_ROUTE4_POLICE-1], tca[TCA_RATE-1]); 498 #endif 499 500 f->next = f1; 501 tcf_tree_lock(tp); 502 *ins_f = f; 503 tcf_tree_unlock(tp); 504 505 route4_reset_fastmap(tp->q->dev, head, f->id); 506 *arg = (unsigned long)f; 507 return 0; 508 509 errout: 510 if (f) 511 kfree(f); 512 return err; 513 } 514 515 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg) 516 { 517 struct route4_head *head = tp->root; 518 unsigned h, h1; 519 520 if (head == NULL) 521 arg->stop = 1; 522 523 if (arg->stop) 524 return; 525 526 for (h = 0; h <= 256; h++) { 527 struct route4_bucket *b = head->table[h]; 528 529 if (b) { 530 for (h1 = 0; h1 <= 32; h1++) { 531 struct route4_filter *f; 532 533 for (f = b->ht[h1]; f; f = f->next) { 534 if (arg->count < arg->skip) { 535 arg->count++; 536 continue; 537 } 538 if (arg->fn(tp, (unsigned long)f, arg) < 0) { 539 arg->stop = 1; 540 break; 541 } 542 arg->count++; 543 } 544 } 545 } 546 } 547 } 548 549 static int route4_dump(struct tcf_proto *tp, unsigned long fh, 550 struct sk_buff *skb, struct tcmsg *t) 551 { 552 struct route4_filter *f = (struct route4_filter*)fh; 553 unsigned char *b = skb->tail; 554 struct rtattr *rta; 555 u32 id; 556 557 if (f == NULL) 558 return skb->len; 559 560 t->tcm_handle = f->handle; 561 562 rta = (struct rtattr*)b; 563 RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 564 565 if (!(f->handle&0x8000)) { 566 id = f->id&0xFF; 567 RTA_PUT(skb, TCA_ROUTE4_TO, sizeof(id), &id); 568 } 569 if (f->handle&0x80000000) { 570 if ((f->handle>>16) != 0xFFFF) 571 RTA_PUT(skb, TCA_ROUTE4_IIF, sizeof(f->iif), &f->iif); 572 } else { 573 id = f->id>>16; 574 RTA_PUT(skb, TCA_ROUTE4_FROM, sizeof(id), &id); 575 } 576 if (f->res.classid) 577 RTA_PUT(skb, TCA_ROUTE4_CLASSID, 4, &f->res.classid); 578 #ifdef CONFIG_NET_CLS_POLICE 579 if (f->police) { 580 struct rtattr * p_rta = (struct rtattr*)skb->tail; 581 582 RTA_PUT(skb, TCA_ROUTE4_POLICE, 0, NULL); 583 584 if (tcf_police_dump(skb, f->police) < 0) 585 goto rtattr_failure; 586 587 p_rta->rta_len = skb->tail - (u8*)p_rta; 588 } 589 #endif 590 591 rta->rta_len = skb->tail - b; 592 #ifdef CONFIG_NET_CLS_POLICE 593 if (f->police) { 594 if (qdisc_copy_stats(skb, &f->police->stats)) 595 goto rtattr_failure; 596 } 597 #endif 598 return skb->len; 599 600 rtattr_failure: 601 skb_trim(skb, b - skb->data); 602 return -1; 603 } 604 605 struct tcf_proto_ops cls_route4_ops = { 606 .next = NULL, 607 .kind = "route", 608 .classify = route4_classify, 609 .init = route4_init, 610 .destroy = route4_destroy, 611 .get = route4_get, 612 .put = route4_put, 613 .change = route4_change, 614 .delete = route4_delete, 615 .walk = route4_walk, 616 .dump = route4_dump, 617 .owner = THIS_MODULE, 618 }; 619 620 #ifdef MODULE 621 int init_module(void) 622 { 623 return register_tcf_proto_ops(&cls_route4_ops); 624 } 625 626 void cleanup_module(void) 627 { 628 unregister_tcf_proto_ops(&cls_route4_ops); 629 } 630 #endif 631 MODULE_LICENSE("GPL"); 632
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.