1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Generic address resolution entity 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 8 * 9 * Fixes: 10 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add. 11 * Harald Welte Add neighbour cache statistics like rtstat 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/slab.h> 17 #include <linux/kmemleak.h> 18 #include <linux/types.h> 19 #include <linux/kernel.h> 20 #include <linux/module.h> 21 #include <linux/socket.h> 22 #include <linux/netdevice.h> 23 #include <linux/proc_fs.h> 24 #ifdef CONFIG_SYSCTL 25 #include <linux/sysctl.h> 26 #endif 27 #include <linux/times.h> 28 #include <net/net_namespace.h> 29 #include <net/neighbour.h> 30 #include <net/arp.h> 31 #include <net/dst.h> 32 #include <net/sock.h> 33 #include <net/netevent.h> 34 #include <net/netlink.h> 35 #include <linux/rtnetlink.h> 36 #include <linux/random.h> 37 #include <linux/string.h> 38 #include <linux/log2.h> 39 #include <linux/inetdevice.h> 40 #include <net/addrconf.h> 41 42 #include <trace/events/neigh.h> 43 44 #define NEIGH_DEBUG 1 45 #define neigh_dbg(level, fmt, ...) \ 46 do { \ 47 if (level <= NEIGH_DEBUG) \ 48 pr_debug(fmt, ##__VA_ARGS__); \ 49 } while (0) 50 51 #define PNEIGH_HASHMASK 0xF 52 53 static void neigh_timer_handler(struct timer_list *t); 54 static void __neigh_notify(struct neighbour *n, int type, int flags, 55 u32 pid); 56 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid); 57 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, 58 struct net_device *dev); 59 60 #ifdef CONFIG_PROC_FS 61 static const struct seq_operations neigh_stat_seq_ops; 62 #endif 63 64 /* 65 Neighbour hash table buckets are protected with rwlock tbl->lock. 66 67 - All the scans/updates to hash buckets MUST be made under this lock. 68 - NOTHING clever should be made under this lock: no callbacks 69 to protocol backends, no attempts to send something to network. 70 It will result in deadlocks, if backend/driver wants to use neighbour 71 cache. 72 - If the entry requires some non-trivial actions, increase 73 its reference count and release table lock. 74 75 Neighbour entries are protected: 76 - with reference count. 77 - with rwlock neigh->lock 78 79 Reference count prevents destruction. 80 81 neigh->lock mainly serializes ll address data and its validity state. 82 However, the same lock is used to protect another entry fields: 83 - timer 84 - resolution queue 85 86 Again, nothing clever shall be made under neigh->lock, 87 the most complicated procedure, which we allow is dev->hard_header. 88 It is supposed, that dev->hard_header is simplistic and does 89 not make callbacks to neighbour tables. 90 */ 91 92 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb) 93 { 94 kfree_skb(skb); 95 return -ENETDOWN; 96 } 97 98 static void neigh_cleanup_and_release(struct neighbour *neigh) 99 { 100 trace_neigh_cleanup_and_release(neigh, 0); 101 __neigh_notify(neigh, RTM_DELNEIGH, 0, 0); 102 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 103 neigh_release(neigh); 104 } 105 106 /* 107 * It is random distribution in the interval (1/2)*base...(3/2)*base. 108 * It corresponds to default IPv6 settings and is not overridable, 109 * because it is really reasonable choice. 110 */ 111 112 unsigned long neigh_rand_reach_time(unsigned long base) 113 { 114 return base ? (prandom_u32() % base) + (base >> 1) : 0; 115 } 116 EXPORT_SYMBOL(neigh_rand_reach_time); 117 118 static void neigh_mark_dead(struct neighbour *n) 119 { 120 n->dead = 1; 121 if (!list_empty(&n->gc_list)) { 122 list_del_init(&n->gc_list); 123 atomic_dec(&n->tbl->gc_entries); 124 } 125 if (!list_empty(&n->managed_list)) 126 list_del_init(&n->managed_list); 127 } 128 129 static void neigh_update_gc_list(struct neighbour *n) 130 { 131 bool on_gc_list, exempt_from_gc; 132 133 write_lock_bh(&n->tbl->lock); 134 write_lock(&n->lock); 135 if (n->dead) 136 goto out; 137 138 /* remove from the gc list if new state is permanent or if neighbor 139 * is externally learned; otherwise entry should be on the gc list 140 */ 141 exempt_from_gc = n->nud_state & NUD_PERMANENT || 142 n->flags & NTF_EXT_LEARNED; 143 on_gc_list = !list_empty(&n->gc_list); 144 145 if (exempt_from_gc && on_gc_list) { 146 list_del_init(&n->gc_list); 147 atomic_dec(&n->tbl->gc_entries); 148 } else if (!exempt_from_gc && !on_gc_list) { 149 /* add entries to the tail; cleaning removes from the front */ 150 list_add_tail(&n->gc_list, &n->tbl->gc_list); 151 atomic_inc(&n->tbl->gc_entries); 152 } 153 out: 154 write_unlock(&n->lock); 155 write_unlock_bh(&n->tbl->lock); 156 } 157 158 static void neigh_update_managed_list(struct neighbour *n) 159 { 160 bool on_managed_list, add_to_managed; 161 162 write_lock_bh(&n->tbl->lock); 163 write_lock(&n->lock); 164 if (n->dead) 165 goto out; 166 167 add_to_managed = n->flags & NTF_MANAGED; 168 on_managed_list = !list_empty(&n->managed_list); 169 170 if (!add_to_managed && on_managed_list) 171 list_del_init(&n->managed_list); 172 else if (add_to_managed && !on_managed_list) 173 list_add_tail(&n->managed_list, &n->tbl->managed_list); 174 out: 175 write_unlock(&n->lock); 176 write_unlock_bh(&n->tbl->lock); 177 } 178 179 static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify, 180 bool *gc_update, bool *managed_update) 181 { 182 u32 ndm_flags, old_flags = neigh->flags; 183 184 if (!(flags & NEIGH_UPDATE_F_ADMIN)) 185 return; 186 187 ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0; 188 ndm_flags |= (flags & NEIGH_UPDATE_F_MANAGED) ? NTF_MANAGED : 0; 189 190 if ((old_flags ^ ndm_flags) & NTF_EXT_LEARNED) { 191 if (ndm_flags & NTF_EXT_LEARNED) 192 neigh->flags |= NTF_EXT_LEARNED; 193 else 194 neigh->flags &= ~NTF_EXT_LEARNED; 195 *notify = 1; 196 *gc_update = true; 197 } 198 if ((old_flags ^ ndm_flags) & NTF_MANAGED) { 199 if (ndm_flags & NTF_MANAGED) 200 neigh->flags |= NTF_MANAGED; 201 else 202 neigh->flags &= ~NTF_MANAGED; 203 *notify = 1; 204 *managed_update = true; 205 } 206 } 207 208 static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np, 209 struct neigh_table *tbl) 210 { 211 bool retval = false; 212 213 write_lock(&n->lock); 214 if (refcount_read(&n->refcnt) == 1) { 215 struct neighbour *neigh; 216 217 neigh = rcu_dereference_protected(n->next, 218 lockdep_is_held(&tbl->lock)); 219 rcu_assign_pointer(*np, neigh); 220 neigh_mark_dead(n); 221 retval = true; 222 } 223 write_unlock(&n->lock); 224 if (retval) 225 neigh_cleanup_and_release(n); 226 return retval; 227 } 228 229 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl) 230 { 231 struct neigh_hash_table *nht; 232 void *pkey = ndel->primary_key; 233 u32 hash_val; 234 struct neighbour *n; 235 struct neighbour __rcu **np; 236 237 nht = rcu_dereference_protected(tbl->nht, 238 lockdep_is_held(&tbl->lock)); 239 hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd); 240 hash_val = hash_val >> (32 - nht->hash_shift); 241 242 np = &nht->hash_buckets[hash_val]; 243 while ((n = rcu_dereference_protected(*np, 244 lockdep_is_held(&tbl->lock)))) { 245 if (n == ndel) 246 return neigh_del(n, np, tbl); 247 np = &n->next; 248 } 249 return false; 250 } 251 252 static int neigh_forced_gc(struct neigh_table *tbl) 253 { 254 int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2; 255 unsigned long tref = jiffies - 5 * HZ; 256 struct neighbour *n, *tmp; 257 int shrunk = 0; 258 259 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs); 260 261 write_lock_bh(&tbl->lock); 262 263 list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) { 264 if (refcount_read(&n->refcnt) == 1) { 265 bool remove = false; 266 267 write_lock(&n->lock); 268 if ((n->nud_state == NUD_FAILED) || 269 (n->nud_state == NUD_NOARP) || 270 (tbl->is_multicast && 271 tbl->is_multicast(n->primary_key)) || 272 time_after(tref, n->updated)) 273 remove = true; 274 write_unlock(&n->lock); 275 276 if (remove && neigh_remove_one(n, tbl)) 277 shrunk++; 278 if (shrunk >= max_clean) 279 break; 280 } 281 } 282 283 tbl->last_flush = jiffies; 284 285 write_unlock_bh(&tbl->lock); 286 287 return shrunk; 288 } 289 290 static void neigh_add_timer(struct neighbour *n, unsigned long when) 291 { 292 neigh_hold(n); 293 if (unlikely(mod_timer(&n->timer, when))) { 294 printk("NEIGH: BUG, double timer add, state is %x\n", 295 n->nud_state); 296 dump_stack(); 297 } 298 } 299 300 static int neigh_del_timer(struct neighbour *n) 301 { 302 if ((n->nud_state & NUD_IN_TIMER) && 303 del_timer(&n->timer)) { 304 neigh_release(n); 305 return 1; 306 } 307 return 0; 308 } 309 310 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev, 311 int family) 312 { 313 switch (family) { 314 case AF_INET: 315 return __in_dev_arp_parms_get_rcu(dev); 316 case AF_INET6: 317 return __in6_dev_nd_parms_get_rcu(dev); 318 } 319 return NULL; 320 } 321 322 static void neigh_parms_qlen_dec(struct net_device *dev, int family) 323 { 324 struct neigh_parms *p; 325 326 rcu_read_lock(); 327 p = neigh_get_dev_parms_rcu(dev, family); 328 if (p) 329 p->qlen--; 330 rcu_read_unlock(); 331 } 332 333 static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net, 334 int family) 335 { 336 struct sk_buff_head tmp; 337 unsigned long flags; 338 struct sk_buff *skb; 339 340 skb_queue_head_init(&tmp); 341 spin_lock_irqsave(&list->lock, flags); 342 skb = skb_peek(list); 343 while (skb != NULL) { 344 struct sk_buff *skb_next = skb_peek_next(skb, list); 345 struct net_device *dev = skb->dev; 346 347 if (net == NULL || net_eq(dev_net(dev), net)) { 348 neigh_parms_qlen_dec(dev, family); 349 __skb_unlink(skb, list); 350 __skb_queue_tail(&tmp, skb); 351 } 352 skb = skb_next; 353 } 354 spin_unlock_irqrestore(&list->lock, flags); 355 356 while ((skb = __skb_dequeue(&tmp))) { 357 dev_put(skb->dev); 358 kfree_skb(skb); 359 } 360 } 361 362 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev, 363 bool skip_perm) 364 { 365 int i; 366 struct neigh_hash_table *nht; 367 368 nht = rcu_dereference_protected(tbl->nht, 369 lockdep_is_held(&tbl->lock)); 370 371 for (i = 0; i < (1 << nht->hash_shift); i++) { 372 struct neighbour *n; 373 struct neighbour __rcu **np = &nht->hash_buckets[i]; 374 375 while ((n = rcu_dereference_protected(*np, 376 lockdep_is_held(&tbl->lock))) != NULL) { 377 if (dev && n->dev != dev) { 378 np = &n->next; 379 continue; 380 } 381 if (skip_perm && n->nud_state & NUD_PERMANENT) { 382 np = &n->next; 383 continue; 384 } 385 rcu_assign_pointer(*np, 386 rcu_dereference_protected(n->next, 387 lockdep_is_held(&tbl->lock))); 388 write_lock(&n->lock); 389 neigh_del_timer(n); 390 neigh_mark_dead(n); 391 if (refcount_read(&n->refcnt) != 1) { 392 /* The most unpleasant situation. 393 We must destroy neighbour entry, 394 but someone still uses it. 395 396 The destroy will be delayed until 397 the last user releases us, but 398 we must kill timers etc. and move 399 it to safe state. 400 */ 401 __skb_queue_purge(&n->arp_queue); 402 n->arp_queue_len_bytes = 0; 403 n->output = neigh_blackhole; 404 if (n->nud_state & NUD_VALID) 405 n->nud_state = NUD_NOARP; 406 else 407 n->nud_state = NUD_NONE; 408 neigh_dbg(2, "neigh %p is stray\n", n); 409 } 410 write_unlock(&n->lock); 411 neigh_cleanup_and_release(n); 412 } 413 } 414 } 415 416 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev) 417 { 418 write_lock_bh(&tbl->lock); 419 neigh_flush_dev(tbl, dev, false); 420 write_unlock_bh(&tbl->lock); 421 } 422 EXPORT_SYMBOL(neigh_changeaddr); 423 424 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev, 425 bool skip_perm) 426 { 427 write_lock_bh(&tbl->lock); 428 neigh_flush_dev(tbl, dev, skip_perm); 429 pneigh_ifdown_and_unlock(tbl, dev); 430 pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL, 431 tbl->family); 432 if (skb_queue_empty_lockless(&tbl->proxy_queue)) 433 del_timer_sync(&tbl->proxy_timer); 434 return 0; 435 } 436 437 int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev) 438 { 439 __neigh_ifdown(tbl, dev, true); 440 return 0; 441 } 442 EXPORT_SYMBOL(neigh_carrier_down); 443 444 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) 445 { 446 __neigh_ifdown(tbl, dev, false); 447 return 0; 448 } 449 EXPORT_SYMBOL(neigh_ifdown); 450 451 static struct neighbour *neigh_alloc(struct neigh_table *tbl, 452 struct net_device *dev, 453 u32 flags, bool exempt_from_gc) 454 { 455 struct neighbour *n = NULL; 456 unsigned long now = jiffies; 457 int entries; 458 459 if (exempt_from_gc) 460 goto do_alloc; 461 462 entries = atomic_inc_return(&tbl->gc_entries) - 1; 463 if (entries >= tbl->gc_thresh3 || 464 (entries >= tbl->gc_thresh2 && 465 time_after(now, tbl->last_flush + 5 * HZ))) { 466 if (!neigh_forced_gc(tbl) && 467 entries >= tbl->gc_thresh3) { 468 net_info_ratelimited("%s: neighbor table overflow!\n", 469 tbl->id); 470 NEIGH_CACHE_STAT_INC(tbl, table_fulls); 471 goto out_entries; 472 } 473 } 474 475 do_alloc: 476 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC); 477 if (!n) 478 goto out_entries; 479 480 __skb_queue_head_init(&n->arp_queue); 481 rwlock_init(&n->lock); 482 seqlock_init(&n->ha_lock); 483 n->updated = n->used = now; 484 n->nud_state = NUD_NONE; 485 n->output = neigh_blackhole; 486 n->flags = flags; 487 seqlock_init(&n->hh.hh_lock); 488 n->parms = neigh_parms_clone(&tbl->parms); 489 timer_setup(&n->timer, neigh_timer_handler, 0); 490 491 NEIGH_CACHE_STAT_INC(tbl, allocs); 492 n->tbl = tbl; 493 refcount_set(&n->refcnt, 1); 494 n->dead = 1; 495 INIT_LIST_HEAD(&n->gc_list); 496 INIT_LIST_HEAD(&n->managed_list); 497 498 atomic_inc(&tbl->entries); 499 out: 500 return n; 501 502 out_entries: 503 if (!exempt_from_gc) 504 atomic_dec(&tbl->gc_entries); 505 goto out; 506 } 507 508 static void neigh_get_hash_rnd(u32 *x) 509 { 510 *x = get_random_u32() | 1; 511 } 512 513 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift) 514 { 515 size_t size = (1 << shift) * sizeof(struct neighbour *); 516 struct neigh_hash_table *ret; 517 struct neighbour __rcu **buckets; 518 int i; 519 520 ret = kmalloc(sizeof(*ret), GFP_ATOMIC); 521 if (!ret) 522 return NULL; 523 if (size <= PAGE_SIZE) { 524 buckets = kzalloc(size, GFP_ATOMIC); 525 } else { 526 buckets = (struct neighbour __rcu **) 527 __get_free_pages(GFP_ATOMIC | __GFP_ZERO, 528 get_order(size)); 529 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC); 530 } 531 if (!buckets) { 532 kfree(ret); 533 return NULL; 534 } 535 ret->hash_buckets = buckets; 536 ret->hash_shift = shift; 537 for (i = 0; i < NEIGH_NUM_HASH_RND; i++) 538 neigh_get_hash_rnd(&ret->hash_rnd[i]); 539 return ret; 540 } 541 542 static void neigh_hash_free_rcu(struct rcu_head *head) 543 { 544 struct neigh_hash_table *nht = container_of(head, 545 struct neigh_hash_table, 546 rcu); 547 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *); 548 struct neighbour __rcu **buckets = nht->hash_buckets; 549 550 if (size <= PAGE_SIZE) { 551 kfree(buckets); 552 } else { 553 kmemleak_free(buckets); 554 free_pages((unsigned long)buckets, get_order(size)); 555 } 556 kfree(nht); 557 } 558 559 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl, 560 unsigned long new_shift) 561 { 562 unsigned int i, hash; 563 struct neigh_hash_table *new_nht, *old_nht; 564 565 NEIGH_CACHE_STAT_INC(tbl, hash_grows); 566 567 old_nht = rcu_dereference_protected(tbl->nht, 568 lockdep_is_held(&tbl->lock)); 569 new_nht = neigh_hash_alloc(new_shift); 570 if (!new_nht) 571 return old_nht; 572 573 for (i = 0; i < (1 << old_nht->hash_shift); i++) { 574 struct neighbour *n, *next; 575 576 for (n = rcu_dereference_protected(old_nht->hash_buckets[i], 577 lockdep_is_held(&tbl->lock)); 578 n != NULL; 579 n = next) { 580 hash = tbl->hash(n->primary_key, n->dev, 581 new_nht->hash_rnd); 582 583 hash >>= (32 - new_nht->hash_shift); 584 next = rcu_dereference_protected(n->next, 585 lockdep_is_held(&tbl->lock)); 586 587 rcu_assign_pointer(n->next, 588 rcu_dereference_protected( 589 new_nht->hash_buckets[hash], 590 lockdep_is_held(&tbl->lock))); 591 rcu_assign_pointer(new_nht->hash_buckets[hash], n); 592 } 593 } 594 595 rcu_assign_pointer(tbl->nht, new_nht); 596 call_rcu(&old_nht->rcu, neigh_hash_free_rcu); 597 return new_nht; 598 } 599 600 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, 601 struct net_device *dev) 602 { 603 struct neighbour *n; 604 605 NEIGH_CACHE_STAT_INC(tbl, lookups); 606 607 rcu_read_lock_bh(); 608 n = __neigh_lookup_noref(tbl, pkey, dev); 609 if (n) { 610 if (!refcount_inc_not_zero(&n->refcnt)) 611 n = NULL; 612 NEIGH_CACHE_STAT_INC(tbl, hits); 613 } 614 615 rcu_read_unlock_bh(); 616 return n; 617 } 618 EXPORT_SYMBOL(neigh_lookup); 619 620 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, 621 const void *pkey) 622 { 623 struct neighbour *n; 624 unsigned int key_len = tbl->key_len; 625 u32 hash_val; 626 struct neigh_hash_table *nht; 627 628 NEIGH_CACHE_STAT_INC(tbl, lookups); 629 630 rcu_read_lock_bh(); 631 nht = rcu_dereference_bh(tbl->nht); 632 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift); 633 634 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); 635 n != NULL; 636 n = rcu_dereference_bh(n->next)) { 637 if (!memcmp(n->primary_key, pkey, key_len) && 638 net_eq(dev_net(n->dev), net)) { 639 if (!refcount_inc_not_zero(&n->refcnt)) 640 n = NULL; 641 NEIGH_CACHE_STAT_INC(tbl, hits); 642 break; 643 } 644 } 645 646 rcu_read_unlock_bh(); 647 return n; 648 } 649 EXPORT_SYMBOL(neigh_lookup_nodev); 650 651 static struct neighbour * 652 ___neigh_create(struct neigh_table *tbl, const void *pkey, 653 struct net_device *dev, u32 flags, 654 bool exempt_from_gc, bool want_ref) 655 { 656 u32 hash_val, key_len = tbl->key_len; 657 struct neighbour *n1, *rc, *n; 658 struct neigh_hash_table *nht; 659 int error; 660 661 n = neigh_alloc(tbl, dev, flags, exempt_from_gc); 662 trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc); 663 if (!n) { 664 rc = ERR_PTR(-ENOBUFS); 665 goto out; 666 } 667 668 memcpy(n->primary_key, pkey, key_len); 669 n->dev = dev; 670 netdev_hold(dev, &n->dev_tracker, GFP_ATOMIC); 671 672 /* Protocol specific setup. */ 673 if (tbl->constructor && (error = tbl->constructor(n)) < 0) { 674 rc = ERR_PTR(error); 675 goto out_neigh_release; 676 } 677 678 if (dev->netdev_ops->ndo_neigh_construct) { 679 error = dev->netdev_ops->ndo_neigh_construct(dev, n); 680 if (error < 0) { 681 rc = ERR_PTR(error); 682 goto out_neigh_release; 683 } 684 } 685 686 /* Device specific setup. */ 687 if (n->parms->neigh_setup && 688 (error = n->parms->neigh_setup(n)) < 0) { 689 rc = ERR_PTR(error); 690 goto out_neigh_release; 691 } 692 693 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1); 694 695 write_lock_bh(&tbl->lock); 696 nht = rcu_dereference_protected(tbl->nht, 697 lockdep_is_held(&tbl->lock)); 698 699 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift)) 700 nht = neigh_hash_grow(tbl, nht->hash_shift + 1); 701 702 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift); 703 704 if (n->parms->dead) { 705 rc = ERR_PTR(-EINVAL); 706 goto out_tbl_unlock; 707 } 708 709 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val], 710 lockdep_is_held(&tbl->lock)); 711 n1 != NULL; 712 n1 = rcu_dereference_protected(n1->next, 713 lockdep_is_held(&tbl->lock))) { 714 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) { 715 if (want_ref) 716 neigh_hold(n1); 717 rc = n1; 718 goto out_tbl_unlock; 719 } 720 } 721 722 n->dead = 0; 723 if (!exempt_from_gc) 724 list_add_tail(&n->gc_list, &n->tbl->gc_list); 725 if (n->flags & NTF_MANAGED) 726 list_add_tail(&n->managed_list, &n->tbl->managed_list); 727 if (want_ref) 728 neigh_hold(n); 729 rcu_assign_pointer(n->next, 730 rcu_dereference_protected(nht->hash_buckets[hash_val], 731 lockdep_is_held(&tbl->lock))); 732 rcu_assign_pointer(nht->hash_buckets[hash_val], n); 733 write_unlock_bh(&tbl->lock); 734 neigh_dbg(2, "neigh %p is created\n", n); 735 rc = n; 736 out: 737 return rc; 738 out_tbl_unlock: 739 write_unlock_bh(&tbl->lock); 740 out_neigh_release: 741 if (!exempt_from_gc) 742 atomic_dec(&tbl->gc_entries); 743 neigh_release(n); 744 goto out; 745 } 746 747 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, 748 struct net_device *dev, bool want_ref) 749 { 750 return ___neigh_create(tbl, pkey, dev, 0, false, want_ref); 751 } 752 EXPORT_SYMBOL(__neigh_create); 753 754 static u32 pneigh_hash(const void *pkey, unsigned int key_len) 755 { 756 u32 hash_val = *(u32 *)(pkey + key_len - 4); 757 hash_val ^= (hash_val >> 16); 758 hash_val ^= hash_val >> 8; 759 hash_val ^= hash_val >> 4; 760 hash_val &= PNEIGH_HASHMASK; 761 return hash_val; 762 } 763 764 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n, 765 struct net *net, 766 const void *pkey, 767 unsigned int key_len, 768 struct net_device *dev) 769 { 770 while (n) { 771 if (!memcmp(n->key, pkey, key_len) && 772 net_eq(pneigh_net(n), net) && 773 (n->dev == dev || !n->dev)) 774 return n; 775 n = n->next; 776 } 777 return NULL; 778 } 779 780 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, 781 struct net *net, const void *pkey, struct net_device *dev) 782 { 783 unsigned int key_len = tbl->key_len; 784 u32 hash_val = pneigh_hash(pkey, key_len); 785 786 return __pneigh_lookup_1(tbl->phash_buckets[hash_val], 787 net, pkey, key_len, dev); 788 } 789 EXPORT_SYMBOL_GPL(__pneigh_lookup); 790 791 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, 792 struct net *net, const void *pkey, 793 struct net_device *dev, int creat) 794 { 795 struct pneigh_entry *n; 796 unsigned int key_len = tbl->key_len; 797 u32 hash_val = pneigh_hash(pkey, key_len); 798 799 read_lock_bh(&tbl->lock); 800 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val], 801 net, pkey, key_len, dev); 802 read_unlock_bh(&tbl->lock); 803 804 if (n || !creat) 805 goto out; 806 807 ASSERT_RTNL(); 808 809 n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL); 810 if (!n) 811 goto out; 812 813 write_pnet(&n->net, net); 814 memcpy(n->key, pkey, key_len); 815 n->dev = dev; 816 netdev_hold(dev, &n->dev_tracker, GFP_KERNEL); 817 818 if (tbl->pconstructor && tbl->pconstructor(n)) { 819 netdev_put(dev, &n->dev_tracker); 820 kfree(n); 821 n = NULL; 822 goto out; 823 } 824 825 write_lock_bh(&tbl->lock); 826 n->next = tbl->phash_buckets[hash_val]; 827 tbl->phash_buckets[hash_val] = n; 828 write_unlock_bh(&tbl->lock); 829 out: 830 return n; 831 } 832 EXPORT_SYMBOL(pneigh_lookup); 833 834 835 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey, 836 struct net_device *dev) 837 { 838 struct pneigh_entry *n, **np; 839 unsigned int key_len = tbl->key_len; 840 u32 hash_val = pneigh_hash(pkey, key_len); 841 842 write_lock_bh(&tbl->lock); 843 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL; 844 np = &n->next) { 845 if (!memcmp(n->key, pkey, key_len) && n->dev == dev && 846 net_eq(pneigh_net(n), net)) { 847 *np = n->next; 848 write_unlock_bh(&tbl->lock); 849 if (tbl->pdestructor) 850 tbl->pdestructor(n); 851 netdev_put(n->dev, &n->dev_tracker); 852 kfree(n); 853 return 0; 854 } 855 } 856 write_unlock_bh(&tbl->lock); 857 return -ENOENT; 858 } 859 860 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, 861 struct net_device *dev) 862 { 863 struct pneigh_entry *n, **np, *freelist = NULL; 864 u32 h; 865 866 for (h = 0; h <= PNEIGH_HASHMASK; h++) { 867 np = &tbl->phash_buckets[h]; 868 while ((n = *np) != NULL) { 869 if (!dev || n->dev == dev) { 870 *np = n->next; 871 n->next = freelist; 872 freelist = n; 873 continue; 874 } 875 np = &n->next; 876 } 877 } 878 write_unlock_bh(&tbl->lock); 879 while ((n = freelist)) { 880 freelist = n->next; 881 n->next = NULL; 882 if (tbl->pdestructor) 883 tbl->pdestructor(n); 884 netdev_put(n->dev, &n->dev_tracker); 885 kfree(n); 886 } 887 return -ENOENT; 888 } 889 890 static void neigh_parms_destroy(struct neigh_parms *parms); 891 892 static inline void neigh_parms_put(struct neigh_parms *parms) 893 { 894 if (refcount_dec_and_test(&parms->refcnt)) 895 neigh_parms_destroy(parms); 896 } 897 898 /* 899 * neighbour must already be out of the table; 900 * 901 */ 902 void neigh_destroy(struct neighbour *neigh) 903 { 904 struct net_device *dev = neigh->dev; 905 906 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys); 907 908 if (!neigh->dead) { 909 pr_warn("Destroying alive neighbour %p\n", neigh); 910 dump_stack(); 911 return; 912 } 913 914 if (neigh_del_timer(neigh)) 915 pr_warn("Impossible event\n"); 916 917 write_lock_bh(&neigh->lock); 918 __skb_queue_purge(&neigh->arp_queue); 919 write_unlock_bh(&neigh->lock); 920 neigh->arp_queue_len_bytes = 0; 921 922 if (dev->netdev_ops->ndo_neigh_destroy) 923 dev->netdev_ops->ndo_neigh_destroy(dev, neigh); 924 925 netdev_put(dev, &neigh->dev_tracker); 926 neigh_parms_put(neigh->parms); 927 928 neigh_dbg(2, "neigh %p is destroyed\n", neigh); 929 930 atomic_dec(&neigh->tbl->entries); 931 kfree_rcu(neigh, rcu); 932 } 933 EXPORT_SYMBOL(neigh_destroy); 934 935 /* Neighbour state is suspicious; 936 disable fast path. 937 938 Called with write_locked neigh. 939 */ 940 static void neigh_suspect(struct neighbour *neigh) 941 { 942 neigh_dbg(2, "neigh %p is suspected\n", neigh); 943 944 neigh->output = neigh->ops->output; 945 } 946 947 /* Neighbour state is OK; 948 enable fast path. 949 950 Called with write_locked neigh. 951 */ 952 static void neigh_connect(struct neighbour *neigh) 953 { 954 neigh_dbg(2, "neigh %p is connected\n", neigh); 955 956 neigh->output = neigh->ops->connected_output; 957 } 958 959 static void neigh_periodic_work(struct work_struct *work) 960 { 961 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work); 962 struct neighbour *n; 963 struct neighbour __rcu **np; 964 unsigned int i; 965 struct neigh_hash_table *nht; 966 967 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs); 968 969 write_lock_bh(&tbl->lock); 970 nht = rcu_dereference_protected(tbl->nht, 971 lockdep_is_held(&tbl->lock)); 972 973 /* 974 * periodically recompute ReachableTime from random function 975 */ 976 977 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) { 978 struct neigh_parms *p; 979 tbl->last_rand = jiffies; 980 list_for_each_entry(p, &tbl->parms_list, list) 981 p->reachable_time = 982 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 983 } 984 985 if (atomic_read(&tbl->entries) < tbl->gc_thresh1) 986 goto out; 987 988 for (i = 0 ; i < (1 << nht->hash_shift); i++) { 989 np = &nht->hash_buckets[i]; 990 991 while ((n = rcu_dereference_protected(*np, 992 lockdep_is_held(&tbl->lock))) != NULL) { 993 unsigned int state; 994 995 write_lock(&n->lock); 996 997 state = n->nud_state; 998 if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) || 999 (n->flags & NTF_EXT_LEARNED)) { 1000 write_unlock(&n->lock); 1001 goto next_elt; 1002 } 1003 1004 if (time_before(n->used, n->confirmed)) 1005 n->used = n->confirmed; 1006 1007 if (refcount_read(&n->refcnt) == 1 && 1008 (state == NUD_FAILED || 1009 time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) { 1010 *np = n->next; 1011 neigh_mark_dead(n); 1012 write_unlock(&n->lock); 1013 neigh_cleanup_and_release(n); 1014 continue; 1015 } 1016 write_unlock(&n->lock); 1017 1018 next_elt: 1019 np = &n->next; 1020 } 1021 /* 1022 * It's fine to release lock here, even if hash table 1023 * grows while we are preempted. 1024 */ 1025 write_unlock_bh(&tbl->lock); 1026 cond_resched(); 1027 write_lock_bh(&tbl->lock); 1028 nht = rcu_dereference_protected(tbl->nht, 1029 lockdep_is_held(&tbl->lock)); 1030 } 1031 out: 1032 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks. 1033 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2 1034 * BASE_REACHABLE_TIME. 1035 */ 1036 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work, 1037 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1); 1038 write_unlock_bh(&tbl->lock); 1039 } 1040 1041 static __inline__ int neigh_max_probes(struct neighbour *n) 1042 { 1043 struct neigh_parms *p = n->parms; 1044 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) + 1045 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) : 1046 NEIGH_VAR(p, MCAST_PROBES)); 1047 } 1048 1049 static void neigh_invalidate(struct neighbour *neigh) 1050 __releases(neigh->lock) 1051 __acquires(neigh->lock) 1052 { 1053 struct sk_buff *skb; 1054 1055 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed); 1056 neigh_dbg(2, "neigh %p is failed\n", neigh); 1057 neigh->updated = jiffies; 1058 1059 /* It is very thin place. report_unreachable is very complicated 1060 routine. Particularly, it can hit the same neighbour entry! 1061 1062 So that, we try to be accurate and avoid dead loop. --ANK 1063 */ 1064 while (neigh->nud_state == NUD_FAILED && 1065 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { 1066 write_unlock(&neigh->lock); 1067 neigh->ops->error_report(neigh, skb); 1068 write_lock(&neigh->lock); 1069 } 1070 __skb_queue_purge(&neigh->arp_queue); 1071 neigh->arp_queue_len_bytes = 0; 1072 } 1073 1074 static void neigh_probe(struct neighbour *neigh) 1075 __releases(neigh->lock) 1076 { 1077 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue); 1078 /* keep skb alive even if arp_queue overflows */ 1079 if (skb) 1080 skb = skb_clone(skb, GFP_ATOMIC); 1081 write_unlock(&neigh->lock); 1082 if (neigh->ops->solicit) 1083 neigh->ops->solicit(neigh, skb); 1084 atomic_inc(&neigh->probes); 1085 consume_skb(skb); 1086 } 1087 1088 /* Called when a timer expires for a neighbour entry. */ 1089 1090 static void neigh_timer_handler(struct timer_list *t) 1091 { 1092 unsigned long now, next; 1093 struct neighbour *neigh = from_timer(neigh, t, timer); 1094 unsigned int state; 1095 int notify = 0; 1096 1097 write_lock(&neigh->lock); 1098 1099 state = neigh->nud_state; 1100 now = jiffies; 1101 next = now + HZ; 1102 1103 if (!(state & NUD_IN_TIMER)) 1104 goto out; 1105 1106 if (state & NUD_REACHABLE) { 1107 if (time_before_eq(now, 1108 neigh->confirmed + neigh->parms->reachable_time)) { 1109 neigh_dbg(2, "neigh %p is still alive\n", neigh); 1110 next = neigh->confirmed + neigh->parms->reachable_time; 1111 } else if (time_before_eq(now, 1112 neigh->used + 1113 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) { 1114 neigh_dbg(2, "neigh %p is delayed\n", neigh); 1115 neigh->nud_state = NUD_DELAY; 1116 neigh->updated = jiffies; 1117 neigh_suspect(neigh); 1118 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME); 1119 } else { 1120 neigh_dbg(2, "neigh %p is suspected\n", neigh); 1121 neigh->nud_state = NUD_STALE; 1122 neigh->updated = jiffies; 1123 neigh_suspect(neigh); 1124 notify = 1; 1125 } 1126 } else if (state & NUD_DELAY) { 1127 if (time_before_eq(now, 1128 neigh->confirmed + 1129 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) { 1130 neigh_dbg(2, "neigh %p is now reachable\n", neigh); 1131 neigh->nud_state = NUD_REACHABLE; 1132 neigh->updated = jiffies; 1133 neigh_connect(neigh); 1134 notify = 1; 1135 next = neigh->confirmed + neigh->parms->reachable_time; 1136 } else { 1137 neigh_dbg(2, "neigh %p is probed\n", neigh); 1138 neigh->nud_state = NUD_PROBE; 1139 neigh->updated = jiffies; 1140 atomic_set(&neigh->probes, 0); 1141 notify = 1; 1142 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), 1143 HZ/100); 1144 } 1145 } else { 1146 /* NUD_PROBE|NUD_INCOMPLETE */ 1147 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100); 1148 } 1149 1150 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) && 1151 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) { 1152 neigh->nud_state = NUD_FAILED; 1153 notify = 1; 1154 neigh_invalidate(neigh); 1155 goto out; 1156 } 1157 1158 if (neigh->nud_state & NUD_IN_TIMER) { 1159 if (time_before(next, jiffies + HZ/100)) 1160 next = jiffies + HZ/100; 1161 if (!mod_timer(&neigh->timer, next)) 1162 neigh_hold(neigh); 1163 } 1164 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) { 1165 neigh_probe(neigh); 1166 } else { 1167 out: 1168 write_unlock(&neigh->lock); 1169 } 1170 1171 if (notify) 1172 neigh_update_notify(neigh, 0); 1173 1174 trace_neigh_timer_handler(neigh, 0); 1175 1176 neigh_release(neigh); 1177 } 1178 1179 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb, 1180 const bool immediate_ok) 1181 { 1182 int rc; 1183 bool immediate_probe = false; 1184 1185 write_lock_bh(&neigh->lock); 1186 1187 rc = 0; 1188 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)) 1189 goto out_unlock_bh; 1190 if (neigh->dead) 1191 goto out_dead; 1192 1193 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) { 1194 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) + 1195 NEIGH_VAR(neigh->parms, APP_PROBES)) { 1196 unsigned long next, now = jiffies; 1197 1198 atomic_set(&neigh->probes, 1199 NEIGH_VAR(neigh->parms, UCAST_PROBES)); 1200 neigh_del_timer(neigh); 1201 neigh->nud_state = NUD_INCOMPLETE; 1202 neigh->updated = now; 1203 if (!immediate_ok) { 1204 next = now + 1; 1205 } else { 1206 immediate_probe = true; 1207 next = now + max(NEIGH_VAR(neigh->parms, 1208 RETRANS_TIME), 1209 HZ / 100); 1210 } 1211 neigh_add_timer(neigh, next); 1212 } else { 1213 neigh->nud_state = NUD_FAILED; 1214 neigh->updated = jiffies; 1215 write_unlock_bh(&neigh->lock); 1216 1217 kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_FAILED); 1218 return 1; 1219 } 1220 } else if (neigh->nud_state & NUD_STALE) { 1221 neigh_dbg(2, "neigh %p is delayed\n", neigh); 1222 neigh_del_timer(neigh); 1223 neigh->nud_state = NUD_DELAY; 1224 neigh->updated = jiffies; 1225 neigh_add_timer(neigh, jiffies + 1226 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME)); 1227 } 1228 1229 if (neigh->nud_state == NUD_INCOMPLETE) { 1230 if (skb) { 1231 while (neigh->arp_queue_len_bytes + skb->truesize > 1232 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) { 1233 struct sk_buff *buff; 1234 1235 buff = __skb_dequeue(&neigh->arp_queue); 1236 if (!buff) 1237 break; 1238 neigh->arp_queue_len_bytes -= buff->truesize; 1239 kfree_skb_reason(buff, SKB_DROP_REASON_NEIGH_QUEUEFULL); 1240 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); 1241 } 1242 skb_dst_force(skb); 1243 __skb_queue_tail(&neigh->arp_queue, skb); 1244 neigh->arp_queue_len_bytes += skb->truesize; 1245 } 1246 rc = 1; 1247 } 1248 out_unlock_bh: 1249 if (immediate_probe) 1250 neigh_probe(neigh); 1251 else 1252 write_unlock(&neigh->lock); 1253 local_bh_enable(); 1254 trace_neigh_event_send_done(neigh, rc); 1255 return rc; 1256 1257 out_dead: 1258 if (neigh->nud_state & NUD_STALE) 1259 goto out_unlock_bh; 1260 write_unlock_bh(&neigh->lock); 1261 kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_DEAD); 1262 trace_neigh_event_send_dead(neigh, 1); 1263 return 1; 1264 } 1265 EXPORT_SYMBOL(__neigh_event_send); 1266 1267 static void neigh_update_hhs(struct neighbour *neigh) 1268 { 1269 struct hh_cache *hh; 1270 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *) 1271 = NULL; 1272 1273 if (neigh->dev->header_ops) 1274 update = neigh->dev->header_ops->cache_update; 1275 1276 if (update) { 1277 hh = &neigh->hh; 1278 if (READ_ONCE(hh->hh_len)) { 1279 write_seqlock_bh(&hh->hh_lock); 1280 update(hh, neigh->dev, neigh->ha); 1281 write_sequnlock_bh(&hh->hh_lock); 1282 } 1283 } 1284 } 1285 1286 /* Generic update routine. 1287 -- lladdr is new lladdr or NULL, if it is not supplied. 1288 -- new is new state. 1289 -- flags 1290 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr, 1291 if it is different. 1292 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected" 1293 lladdr instead of overriding it 1294 if it is different. 1295 NEIGH_UPDATE_F_ADMIN means that the change is administrative. 1296 NEIGH_UPDATE_F_USE means that the entry is user triggered. 1297 NEIGH_UPDATE_F_MANAGED means that the entry will be auto-refreshed. 1298 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing 1299 NTF_ROUTER flag. 1300 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as 1301 a router. 1302 1303 Caller MUST hold reference count on the entry. 1304 */ 1305 static int __neigh_update(struct neighbour *neigh, const u8 *lladdr, 1306 u8 new, u32 flags, u32 nlmsg_pid, 1307 struct netlink_ext_ack *extack) 1308 { 1309 bool gc_update = false, managed_update = false; 1310 int update_isrouter = 0; 1311 struct net_device *dev; 1312 int err, notify = 0; 1313 u8 old; 1314 1315 trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid); 1316 1317 write_lock_bh(&neigh->lock); 1318 1319 dev = neigh->dev; 1320 old = neigh->nud_state; 1321 err = -EPERM; 1322 1323 if (neigh->dead) { 1324 NL_SET_ERR_MSG(extack, "Neighbor entry is now dead"); 1325 new = old; 1326 goto out; 1327 } 1328 if (!(flags & NEIGH_UPDATE_F_ADMIN) && 1329 (old & (NUD_NOARP | NUD_PERMANENT))) 1330 goto out; 1331 1332 neigh_update_flags(neigh, flags, ¬ify, &gc_update, &managed_update); 1333 if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) { 1334 new = old & ~NUD_PERMANENT; 1335 neigh->nud_state = new; 1336 err = 0; 1337 goto out; 1338 } 1339 1340 if (!(new & NUD_VALID)) { 1341 neigh_del_timer(neigh); 1342 if (old & NUD_CONNECTED) 1343 neigh_suspect(neigh); 1344 neigh->nud_state = new; 1345 err = 0; 1346 notify = old & NUD_VALID; 1347 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) && 1348 (new & NUD_FAILED)) { 1349 neigh_invalidate(neigh); 1350 notify = 1; 1351 } 1352 goto out; 1353 } 1354 1355 /* Compare new lladdr with cached one */ 1356 if (!dev->addr_len) { 1357 /* First case: device needs no address. */ 1358 lladdr = neigh->ha; 1359 } else if (lladdr) { 1360 /* The second case: if something is already cached 1361 and a new address is proposed: 1362 - compare new & old 1363 - if they are different, check override flag 1364 */ 1365 if ((old & NUD_VALID) && 1366 !memcmp(lladdr, neigh->ha, dev->addr_len)) 1367 lladdr = neigh->ha; 1368 } else { 1369 /* No address is supplied; if we know something, 1370 use it, otherwise discard the request. 1371 */ 1372 err = -EINVAL; 1373 if (!(old & NUD_VALID)) { 1374 NL_SET_ERR_MSG(extack, "No link layer address given"); 1375 goto out; 1376 } 1377 lladdr = neigh->ha; 1378 } 1379 1380 /* Update confirmed timestamp for neighbour entry after we 1381 * received ARP packet even if it doesn't change IP to MAC binding. 1382 */ 1383 if (new & NUD_CONNECTED) 1384 neigh->confirmed = jiffies; 1385 1386 /* If entry was valid and address is not changed, 1387 do not change entry state, if new one is STALE. 1388 */ 1389 err = 0; 1390 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER; 1391 if (old & NUD_VALID) { 1392 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) { 1393 update_isrouter = 0; 1394 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) && 1395 (old & NUD_CONNECTED)) { 1396 lladdr = neigh->ha; 1397 new = NUD_STALE; 1398 } else 1399 goto out; 1400 } else { 1401 if (lladdr == neigh->ha && new == NUD_STALE && 1402 !(flags & NEIGH_UPDATE_F_ADMIN)) 1403 new = old; 1404 } 1405 } 1406 1407 /* Update timestamp only once we know we will make a change to the 1408 * neighbour entry. Otherwise we risk to move the locktime window with 1409 * noop updates and ignore relevant ARP updates. 1410 */ 1411 if (new != old || lladdr != neigh->ha) 1412 neigh->updated = jiffies; 1413 1414 if (new != old) { 1415 neigh_del_timer(neigh); 1416 if (new & NUD_PROBE) 1417 atomic_set(&neigh->probes, 0); 1418 if (new & NUD_IN_TIMER) 1419 neigh_add_timer(neigh, (jiffies + 1420 ((new & NUD_REACHABLE) ? 1421 neigh->parms->reachable_time : 1422 0))); 1423 neigh->nud_state = new; 1424 notify = 1; 1425 } 1426 1427 if (lladdr != neigh->ha) { 1428 write_seqlock(&neigh->ha_lock); 1429 memcpy(&neigh->ha, lladdr, dev->addr_len); 1430 write_sequnlock(&neigh->ha_lock); 1431 neigh_update_hhs(neigh); 1432 if (!(new & NUD_CONNECTED)) 1433 neigh->confirmed = jiffies - 1434 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1); 1435 notify = 1; 1436 } 1437 if (new == old) 1438 goto out; 1439 if (new & NUD_CONNECTED) 1440 neigh_connect(neigh); 1441 else 1442 neigh_suspect(neigh); 1443 if (!(old & NUD_VALID)) { 1444 struct sk_buff *skb; 1445 1446 /* Again: avoid dead loop if something went wrong */ 1447 1448 while (neigh->nud_state & NUD_VALID && 1449 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { 1450 struct dst_entry *dst = skb_dst(skb); 1451 struct neighbour *n2, *n1 = neigh; 1452 write_unlock_bh(&neigh->lock); 1453 1454 rcu_read_lock(); 1455 1456 /* Why not just use 'neigh' as-is? The problem is that 1457 * things such as shaper, eql, and sch_teql can end up 1458 * using alternative, different, neigh objects to output 1459 * the packet in the output path. So what we need to do 1460 * here is re-lookup the top-level neigh in the path so 1461 * we can reinject the packet there. 1462 */ 1463 n2 = NULL; 1464 if (dst && dst->obsolete != DST_OBSOLETE_DEAD) { 1465 n2 = dst_neigh_lookup_skb(dst, skb); 1466 if (n2) 1467 n1 = n2; 1468 } 1469 n1->output(n1, skb); 1470 if (n2) 1471 neigh_release(n2); 1472 rcu_read_unlock(); 1473 1474 write_lock_bh(&neigh->lock); 1475 } 1476 __skb_queue_purge(&neigh->arp_queue); 1477 neigh->arp_queue_len_bytes = 0; 1478 } 1479 out: 1480 if (update_isrouter) 1481 neigh_update_is_router(neigh, flags, ¬ify); 1482 write_unlock_bh(&neigh->lock); 1483 if (((new ^ old) & NUD_PERMANENT) || gc_update) 1484 neigh_update_gc_list(neigh); 1485 if (managed_update) 1486 neigh_update_managed_list(neigh); 1487 if (notify) 1488 neigh_update_notify(neigh, nlmsg_pid); 1489 trace_neigh_update_done(neigh, err); 1490 return err; 1491 } 1492 1493 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, 1494 u32 flags, u32 nlmsg_pid) 1495 { 1496 return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL); 1497 } 1498 EXPORT_SYMBOL(neigh_update); 1499 1500 /* Update the neigh to listen temporarily for probe responses, even if it is 1501 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing. 1502 */ 1503 void __neigh_set_probe_once(struct neighbour *neigh) 1504 { 1505 if (neigh->dead) 1506 return; 1507 neigh->updated = jiffies; 1508 if (!(neigh->nud_state & NUD_FAILED)) 1509 return; 1510 neigh->nud_state = NUD_INCOMPLETE; 1511 atomic_set(&neigh->probes, neigh_max_probes(neigh)); 1512 neigh_add_timer(neigh, 1513 jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), 1514 HZ/100)); 1515 } 1516 EXPORT_SYMBOL(__neigh_set_probe_once); 1517 1518 struct neighbour *neigh_event_ns(struct neigh_table *tbl, 1519 u8 *lladdr, void *saddr, 1520 struct net_device *dev) 1521 { 1522 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev, 1523 lladdr || !dev->addr_len); 1524 if (neigh) 1525 neigh_update(neigh, lladdr, NUD_STALE, 1526 NEIGH_UPDATE_F_OVERRIDE, 0); 1527 return neigh; 1528 } 1529 EXPORT_SYMBOL(neigh_event_ns); 1530 1531 /* called with read_lock_bh(&n->lock); */ 1532 static void neigh_hh_init(struct neighbour *n) 1533 { 1534 struct net_device *dev = n->dev; 1535 __be16 prot = n->tbl->protocol; 1536 struct hh_cache *hh = &n->hh; 1537 1538 write_lock_bh(&n->lock); 1539 1540 /* Only one thread can come in here and initialize the 1541 * hh_cache entry. 1542 */ 1543 if (!hh->hh_len) 1544 dev->header_ops->cache(n, hh, prot); 1545 1546 write_unlock_bh(&n->lock); 1547 } 1548 1549 /* Slow and careful. */ 1550 1551 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb) 1552 { 1553 int rc = 0; 1554 1555 if (!neigh_event_send(neigh, skb)) { 1556 int err; 1557 struct net_device *dev = neigh->dev; 1558 unsigned int seq; 1559 1560 if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len)) 1561 neigh_hh_init(neigh); 1562 1563 do { 1564 __skb_pull(skb, skb_network_offset(skb)); 1565 seq = read_seqbegin(&neigh->ha_lock); 1566 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 1567 neigh->ha, NULL, skb->len); 1568 } while (read_seqretry(&neigh->ha_lock, seq)); 1569 1570 if (err >= 0) 1571 rc = dev_queue_xmit(skb); 1572 else 1573 goto out_kfree_skb; 1574 } 1575 out: 1576 return rc; 1577 out_kfree_skb: 1578 rc = -EINVAL; 1579 kfree_skb(skb); 1580 goto out; 1581 } 1582 EXPORT_SYMBOL(neigh_resolve_output); 1583 1584 /* As fast as possible without hh cache */ 1585 1586 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb) 1587 { 1588 struct net_device *dev = neigh->dev; 1589 unsigned int seq; 1590 int err; 1591 1592 do { 1593 __skb_pull(skb, skb_network_offset(skb)); 1594 seq = read_seqbegin(&neigh->ha_lock); 1595 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 1596 neigh->ha, NULL, skb->len); 1597 } while (read_seqretry(&neigh->ha_lock, seq)); 1598 1599 if (err >= 0) 1600 err = dev_queue_xmit(skb); 1601 else { 1602 err = -EINVAL; 1603 kfree_skb(skb); 1604 } 1605 return err; 1606 } 1607 EXPORT_SYMBOL(neigh_connected_output); 1608 1609 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb) 1610 { 1611 return dev_queue_xmit(skb); 1612 } 1613 EXPORT_SYMBOL(neigh_direct_output); 1614 1615 static void neigh_managed_work(struct work_struct *work) 1616 { 1617 struct neigh_table *tbl = container_of(work, struct neigh_table, 1618 managed_work.work); 1619 struct neighbour *neigh; 1620 1621 write_lock_bh(&tbl->lock); 1622 list_for_each_entry(neigh, &tbl->managed_list, managed_list) 1623 neigh_event_send_probe(neigh, NULL, false); 1624 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 1625 NEIGH_VAR(&tbl->parms, INTERVAL_PROBE_TIME_MS)); 1626 write_unlock_bh(&tbl->lock); 1627 } 1628 1629 static void neigh_proxy_process(struct timer_list *t) 1630 { 1631 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer); 1632 long sched_next = 0; 1633 unsigned long now = jiffies; 1634 struct sk_buff *skb, *n; 1635 1636 spin_lock(&tbl->proxy_queue.lock); 1637 1638 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) { 1639 long tdif = NEIGH_CB(skb)->sched_next - now; 1640 1641 if (tdif <= 0) { 1642 struct net_device *dev = skb->dev; 1643 1644 neigh_parms_qlen_dec(dev, tbl->family); 1645 __skb_unlink(skb, &tbl->proxy_queue); 1646 1647 if (tbl->proxy_redo && netif_running(dev)) { 1648 rcu_read_lock(); 1649 tbl->proxy_redo(skb); 1650 rcu_read_unlock(); 1651 } else { 1652 kfree_skb(skb); 1653 } 1654 1655 dev_put(dev); 1656 } else if (!sched_next || tdif < sched_next) 1657 sched_next = tdif; 1658 } 1659 del_timer(&tbl->proxy_timer); 1660 if (sched_next) 1661 mod_timer(&tbl->proxy_timer, jiffies + sched_next); 1662 spin_unlock(&tbl->proxy_queue.lock); 1663 } 1664 1665 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, 1666 struct sk_buff *skb) 1667 { 1668 unsigned long sched_next = jiffies + 1669 prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY)); 1670 1671 if (p->qlen > NEIGH_VAR(p, PROXY_QLEN)) { 1672 kfree_skb(skb); 1673 return; 1674 } 1675 1676 NEIGH_CB(skb)->sched_next = sched_next; 1677 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED; 1678 1679 spin_lock(&tbl->proxy_queue.lock); 1680 if (del_timer(&tbl->proxy_timer)) { 1681 if (time_before(tbl->proxy_timer.expires, sched_next)) 1682 sched_next = tbl->proxy_timer.expires; 1683 } 1684 skb_dst_drop(skb); 1685 dev_hold(skb->dev); 1686 __skb_queue_tail(&tbl->proxy_queue, skb); 1687 p->qlen++; 1688 mod_timer(&tbl->proxy_timer, sched_next); 1689 spin_unlock(&tbl->proxy_queue.lock); 1690 } 1691 EXPORT_SYMBOL(pneigh_enqueue); 1692 1693 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl, 1694 struct net *net, int ifindex) 1695 { 1696 struct neigh_parms *p; 1697 1698 list_for_each_entry(p, &tbl->parms_list, list) { 1699 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) || 1700 (!p->dev && !ifindex && net_eq(net, &init_net))) 1701 return p; 1702 } 1703 1704 return NULL; 1705 } 1706 1707 struct neigh_parms *neigh_parms_alloc(struct net_device *dev, 1708 struct neigh_table *tbl) 1709 { 1710 struct neigh_parms *p; 1711 struct net *net = dev_net(dev); 1712 const struct net_device_ops *ops = dev->netdev_ops; 1713 1714 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL); 1715 if (p) { 1716 p->tbl = tbl; 1717 refcount_set(&p->refcnt, 1); 1718 p->reachable_time = 1719 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 1720 p->qlen = 0; 1721 netdev_hold(dev, &p->dev_tracker, GFP_KERNEL); 1722 p->dev = dev; 1723 write_pnet(&p->net, net); 1724 p->sysctl_table = NULL; 1725 1726 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { 1727 netdev_put(dev, &p->dev_tracker); 1728 kfree(p); 1729 return NULL; 1730 } 1731 1732 write_lock_bh(&tbl->lock); 1733 list_add(&p->list, &tbl->parms.list); 1734 write_unlock_bh(&tbl->lock); 1735 1736 neigh_parms_data_state_cleanall(p); 1737 } 1738 return p; 1739 } 1740 EXPORT_SYMBOL(neigh_parms_alloc); 1741 1742 static void neigh_rcu_free_parms(struct rcu_head *head) 1743 { 1744 struct neigh_parms *parms = 1745 container_of(head, struct neigh_parms, rcu_head); 1746 1747 neigh_parms_put(parms); 1748 } 1749 1750 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms) 1751 { 1752 if (!parms || parms == &tbl->parms) 1753 return; 1754 write_lock_bh(&tbl->lock); 1755 list_del(&parms->list); 1756 parms->dead = 1; 1757 write_unlock_bh(&tbl->lock); 1758 netdev_put(parms->dev, &parms->dev_tracker); 1759 call_rcu(&parms->rcu_head, neigh_rcu_free_parms); 1760 } 1761 EXPORT_SYMBOL(neigh_parms_release); 1762 1763 static void neigh_parms_destroy(struct neigh_parms *parms) 1764 { 1765 kfree(parms); 1766 } 1767 1768 static struct lock_class_key neigh_table_proxy_queue_class; 1769 1770 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly; 1771 1772 void neigh_table_init(int index, struct neigh_table *tbl) 1773 { 1774 unsigned long now = jiffies; 1775 unsigned long phsize; 1776 1777 INIT_LIST_HEAD(&tbl->parms_list); 1778 INIT_LIST_HEAD(&tbl->gc_list); 1779 INIT_LIST_HEAD(&tbl->managed_list); 1780 1781 list_add(&tbl->parms.list, &tbl->parms_list); 1782 write_pnet(&tbl->parms.net, &init_net); 1783 refcount_set(&tbl->parms.refcnt, 1); 1784 tbl->parms.reachable_time = 1785 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME)); 1786 tbl->parms.qlen = 0; 1787 1788 tbl->stats = alloc_percpu(struct neigh_statistics); 1789 if (!tbl->stats) 1790 panic("cannot create neighbour cache statistics"); 1791 1792 #ifdef CONFIG_PROC_FS 1793 if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat, 1794 &neigh_stat_seq_ops, tbl)) 1795 panic("cannot create neighbour proc dir entry"); 1796 #endif 1797 1798 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3)); 1799 1800 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *); 1801 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL); 1802 1803 if (!tbl->nht || !tbl->phash_buckets) 1804 panic("cannot allocate neighbour cache hashes"); 1805 1806 if (!tbl->entry_size) 1807 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) + 1808 tbl->key_len, NEIGH_PRIV_ALIGN); 1809 else 1810 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN); 1811 1812 rwlock_init(&tbl->lock); 1813 1814 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work); 1815 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work, 1816 tbl->parms.reachable_time); 1817 INIT_DEFERRABLE_WORK(&tbl->managed_work, neigh_managed_work); 1818 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 0); 1819 1820 timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0); 1821 skb_queue_head_init_class(&tbl->proxy_queue, 1822 &neigh_table_proxy_queue_class); 1823 1824 tbl->last_flush = now; 1825 tbl->last_rand = now + tbl->parms.reachable_time * 20; 1826 1827 neigh_tables[index] = tbl; 1828 } 1829 EXPORT_SYMBOL(neigh_table_init); 1830 1831 int neigh_table_clear(int index, struct neigh_table *tbl) 1832 { 1833 neigh_tables[index] = NULL; 1834 /* It is not clean... Fix it to unload IPv6 module safely */ 1835 cancel_delayed_work_sync(&tbl->managed_work); 1836 cancel_delayed_work_sync(&tbl->gc_work); 1837 del_timer_sync(&tbl->proxy_timer); 1838 pneigh_queue_purge(&tbl->proxy_queue, NULL, tbl->family); 1839 neigh_ifdown(tbl, NULL); 1840 if (atomic_read(&tbl->entries)) 1841 pr_crit("neighbour leakage\n"); 1842 1843 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu, 1844 neigh_hash_free_rcu); 1845 tbl->nht = NULL; 1846 1847 kfree(tbl->phash_buckets); 1848 tbl->phash_buckets = NULL; 1849 1850 remove_proc_entry(tbl->id, init_net.proc_net_stat); 1851 1852 free_percpu(tbl->stats); 1853 tbl->stats = NULL; 1854 1855 return 0; 1856 } 1857 EXPORT_SYMBOL(neigh_table_clear); 1858 1859 static struct neigh_table *neigh_find_table(int family) 1860 { 1861 struct neigh_table *tbl = NULL; 1862 1863 switch (family) { 1864 case AF_INET: 1865 tbl = neigh_tables[NEIGH_ARP_TABLE]; 1866 break; 1867 case AF_INET6: 1868 tbl = neigh_tables[NEIGH_ND_TABLE]; 1869 break; 1870 case AF_DECnet: 1871 tbl = neigh_tables[NEIGH_DN_TABLE]; 1872 break; 1873 } 1874 1875 return tbl; 1876 } 1877 1878 const struct nla_policy nda_policy[NDA_MAX+1] = { 1879 [NDA_UNSPEC] = { .strict_start_type = NDA_NH_ID }, 1880 [NDA_DST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1881 [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1882 [NDA_CACHEINFO] = { .len = sizeof(struct nda_cacheinfo) }, 1883 [NDA_PROBES] = { .type = NLA_U32 }, 1884 [NDA_VLAN] = { .type = NLA_U16 }, 1885 [NDA_PORT] = { .type = NLA_U16 }, 1886 [NDA_VNI] = { .type = NLA_U32 }, 1887 [NDA_IFINDEX] = { .type = NLA_U32 }, 1888 [NDA_MASTER] = { .type = NLA_U32 }, 1889 [NDA_PROTOCOL] = { .type = NLA_U8 }, 1890 [NDA_NH_ID] = { .type = NLA_U32 }, 1891 [NDA_FLAGS_EXT] = NLA_POLICY_MASK(NLA_U32, NTF_EXT_MASK), 1892 [NDA_FDB_EXT_ATTRS] = { .type = NLA_NESTED }, 1893 }; 1894 1895 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, 1896 struct netlink_ext_ack *extack) 1897 { 1898 struct net *net = sock_net(skb->sk); 1899 struct ndmsg *ndm; 1900 struct nlattr *dst_attr; 1901 struct neigh_table *tbl; 1902 struct neighbour *neigh; 1903 struct net_device *dev = NULL; 1904 int err = -EINVAL; 1905 1906 ASSERT_RTNL(); 1907 if (nlmsg_len(nlh) < sizeof(*ndm)) 1908 goto out; 1909 1910 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST); 1911 if (!dst_attr) { 1912 NL_SET_ERR_MSG(extack, "Network address not specified"); 1913 goto out; 1914 } 1915 1916 ndm = nlmsg_data(nlh); 1917 if (ndm->ndm_ifindex) { 1918 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 1919 if (dev == NULL) { 1920 err = -ENODEV; 1921 goto out; 1922 } 1923 } 1924 1925 tbl = neigh_find_table(ndm->ndm_family); 1926 if (tbl == NULL) 1927 return -EAFNOSUPPORT; 1928 1929 if (nla_len(dst_attr) < (int)tbl->key_len) { 1930 NL_SET_ERR_MSG(extack, "Invalid network address"); 1931 goto out; 1932 } 1933 1934 if (ndm->ndm_flags & NTF_PROXY) { 1935 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev); 1936 goto out; 1937 } 1938 1939 if (dev == NULL) 1940 goto out; 1941 1942 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev); 1943 if (neigh == NULL) { 1944 err = -ENOENT; 1945 goto out; 1946 } 1947 1948 err = __neigh_update(neigh, NULL, NUD_FAILED, 1949 NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN, 1950 NETLINK_CB(skb).portid, extack); 1951 write_lock_bh(&tbl->lock); 1952 neigh_release(neigh); 1953 neigh_remove_one(neigh, tbl); 1954 write_unlock_bh(&tbl->lock); 1955 1956 out: 1957 return err; 1958 } 1959 1960 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, 1961 struct netlink_ext_ack *extack) 1962 { 1963 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE | 1964 NEIGH_UPDATE_F_OVERRIDE_ISROUTER; 1965 struct net *net = sock_net(skb->sk); 1966 struct ndmsg *ndm; 1967 struct nlattr *tb[NDA_MAX+1]; 1968 struct neigh_table *tbl; 1969 struct net_device *dev = NULL; 1970 struct neighbour *neigh; 1971 void *dst, *lladdr; 1972 u8 protocol = 0; 1973 u32 ndm_flags; 1974 int err; 1975 1976 ASSERT_RTNL(); 1977 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, 1978 nda_policy, extack); 1979 if (err < 0) 1980 goto out; 1981 1982 err = -EINVAL; 1983 if (!tb[NDA_DST]) { 1984 NL_SET_ERR_MSG(extack, "Network address not specified"); 1985 goto out; 1986 } 1987 1988 ndm = nlmsg_data(nlh); 1989 ndm_flags = ndm->ndm_flags; 1990 if (tb[NDA_FLAGS_EXT]) { 1991 u32 ext = nla_get_u32(tb[NDA_FLAGS_EXT]); 1992 1993 BUILD_BUG_ON(sizeof(neigh->flags) * BITS_PER_BYTE < 1994 (sizeof(ndm->ndm_flags) * BITS_PER_BYTE + 1995 hweight32(NTF_EXT_MASK))); 1996 ndm_flags |= (ext << NTF_EXT_SHIFT); 1997 } 1998 if (ndm->ndm_ifindex) { 1999 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 2000 if (dev == NULL) { 2001 err = -ENODEV; 2002 goto out; 2003 } 2004 2005 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) { 2006 NL_SET_ERR_MSG(extack, "Invalid link address"); 2007 goto out; 2008 } 2009 } 2010 2011 tbl = neigh_find_table(ndm->ndm_family); 2012 if (tbl == NULL) 2013 return -EAFNOSUPPORT; 2014 2015 if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) { 2016 NL_SET_ERR_MSG(extack, "Invalid network address"); 2017 goto out; 2018 } 2019 2020 dst = nla_data(tb[NDA_DST]); 2021 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL; 2022 2023 if (tb[NDA_PROTOCOL]) 2024 protocol = nla_get_u8(tb[NDA_PROTOCOL]); 2025 if (ndm_flags & NTF_PROXY) { 2026 struct pneigh_entry *pn; 2027 2028 if (ndm_flags & NTF_MANAGED) { 2029 NL_SET_ERR_MSG(extack, "Invalid NTF_* flag combination"); 2030 goto out; 2031 } 2032 2033 err = -ENOBUFS; 2034 pn = pneigh_lookup(tbl, net, dst, dev, 1); 2035 if (pn) { 2036 pn->flags = ndm_flags; 2037 if (protocol) 2038 pn->protocol = protocol; 2039 err = 0; 2040 } 2041 goto out; 2042 } 2043 2044 if (!dev) { 2045 NL_SET_ERR_MSG(extack, "Device not specified"); 2046 goto out; 2047 } 2048 2049 if (tbl->allow_add && !tbl->allow_add(dev, extack)) { 2050 err = -EINVAL; 2051 goto out; 2052 } 2053 2054 neigh = neigh_lookup(tbl, dst, dev); 2055 if (neigh == NULL) { 2056 bool ndm_permanent = ndm->ndm_state & NUD_PERMANENT; 2057 bool exempt_from_gc = ndm_permanent || 2058 ndm_flags & NTF_EXT_LEARNED; 2059 2060 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { 2061 err = -ENOENT; 2062 goto out; 2063 } 2064 if (ndm_permanent && (ndm_flags & NTF_MANAGED)) { 2065 NL_SET_ERR_MSG(extack, "Invalid NTF_* flag for permanent entry"); 2066 err = -EINVAL; 2067 goto out; 2068 } 2069 2070 neigh = ___neigh_create(tbl, dst, dev, 2071 ndm_flags & 2072 (NTF_EXT_LEARNED | NTF_MANAGED), 2073 exempt_from_gc, true); 2074 if (IS_ERR(neigh)) { 2075 err = PTR_ERR(neigh); 2076 goto out; 2077 } 2078 } else { 2079 if (nlh->nlmsg_flags & NLM_F_EXCL) { 2080 err = -EEXIST; 2081 neigh_release(neigh); 2082 goto out; 2083 } 2084 2085 if (!(nlh->nlmsg_flags & NLM_F_REPLACE)) 2086 flags &= ~(NEIGH_UPDATE_F_OVERRIDE | 2087 NEIGH_UPDATE_F_OVERRIDE_ISROUTER); 2088 } 2089 2090 if (protocol) 2091 neigh->protocol = protocol; 2092 if (ndm_flags & NTF_EXT_LEARNED) 2093 flags |= NEIGH_UPDATE_F_EXT_LEARNED; 2094 if (ndm_flags & NTF_ROUTER) 2095 flags |= NEIGH_UPDATE_F_ISROUTER; 2096 if (ndm_flags & NTF_MANAGED) 2097 flags |= NEIGH_UPDATE_F_MANAGED; 2098 if (ndm_flags & NTF_USE) 2099 flags |= NEIGH_UPDATE_F_USE; 2100 2101 err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags, 2102 NETLINK_CB(skb).portid, extack); 2103 if (!err && ndm_flags & (NTF_USE | NTF_MANAGED)) { 2104 neigh_event_send(neigh, NULL); 2105 err = 0; 2106 } 2107 neigh_release(neigh); 2108 out: 2109 return err; 2110 } 2111 2112 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms) 2113 { 2114 struct nlattr *nest; 2115 2116 nest = nla_nest_start_noflag(skb, NDTA_PARMS); 2117 if (nest == NULL) 2118 return -ENOBUFS; 2119 2120 if ((parms->dev && 2121 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) || 2122 nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) || 2123 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, 2124 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) || 2125 /* approximative value for deprecated QUEUE_LEN (in packets) */ 2126 nla_put_u32(skb, NDTPA_QUEUE_LEN, 2127 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) || 2128 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) || 2129 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) || 2130 nla_put_u32(skb, NDTPA_UCAST_PROBES, 2131 NEIGH_VAR(parms, UCAST_PROBES)) || 2132 nla_put_u32(skb, NDTPA_MCAST_PROBES, 2133 NEIGH_VAR(parms, MCAST_PROBES)) || 2134 nla_put_u32(skb, NDTPA_MCAST_REPROBES, 2135 NEIGH_VAR(parms, MCAST_REPROBES)) || 2136 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time, 2137 NDTPA_PAD) || 2138 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME, 2139 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) || 2140 nla_put_msecs(skb, NDTPA_GC_STALETIME, 2141 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) || 2142 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME, 2143 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) || 2144 nla_put_msecs(skb, NDTPA_RETRANS_TIME, 2145 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) || 2146 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, 2147 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) || 2148 nla_put_msecs(skb, NDTPA_PROXY_DELAY, 2149 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) || 2150 nla_put_msecs(skb, NDTPA_LOCKTIME, 2151 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD) || 2152 nla_put_msecs(skb, NDTPA_INTERVAL_PROBE_TIME_MS, 2153 NEIGH_VAR(parms, INTERVAL_PROBE_TIME_MS), NDTPA_PAD)) 2154 goto nla_put_failure; 2155 return nla_nest_end(skb, nest); 2156 2157 nla_put_failure: 2158 nla_nest_cancel(skb, nest); 2159 return -EMSGSIZE; 2160 } 2161 2162 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, 2163 u32 pid, u32 seq, int type, int flags) 2164 { 2165 struct nlmsghdr *nlh; 2166 struct ndtmsg *ndtmsg; 2167 2168 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags); 2169 if (nlh == NULL) 2170 return -EMSGSIZE; 2171 2172 ndtmsg = nlmsg_data(nlh); 2173 2174 read_lock_bh(&tbl->lock); 2175 ndtmsg->ndtm_family = tbl->family; 2176 ndtmsg->ndtm_pad1 = 0; 2177 ndtmsg->ndtm_pad2 = 0; 2178 2179 if (nla_put_string(skb, NDTA_NAME, tbl->id) || 2180 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) || 2181 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) || 2182 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) || 2183 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3)) 2184 goto nla_put_failure; 2185 { 2186 unsigned long now = jiffies; 2187 long flush_delta = now - tbl->last_flush; 2188 long rand_delta = now - tbl->last_rand; 2189 struct neigh_hash_table *nht; 2190 struct ndt_config ndc = { 2191 .ndtc_key_len = tbl->key_len, 2192 .ndtc_entry_size = tbl->entry_size, 2193 .ndtc_entries = atomic_read(&tbl->entries), 2194 .ndtc_last_flush = jiffies_to_msecs(flush_delta), 2195 .ndtc_last_rand = jiffies_to_msecs(rand_delta), 2196 .ndtc_proxy_qlen = tbl->proxy_queue.qlen, 2197 }; 2198 2199 rcu_read_lock_bh(); 2200 nht = rcu_dereference_bh(tbl->nht); 2201 ndc.ndtc_hash_rnd = nht->hash_rnd[0]; 2202 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1); 2203 rcu_read_unlock_bh(); 2204 2205 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc)) 2206 goto nla_put_failure; 2207 } 2208 2209 { 2210 int cpu; 2211 struct ndt_stats ndst; 2212 2213 memset(&ndst, 0, sizeof(ndst)); 2214 2215 for_each_possible_cpu(cpu) { 2216 struct neigh_statistics *st; 2217 2218 st = per_cpu_ptr(tbl->stats, cpu); 2219 ndst.ndts_allocs += st->allocs; 2220 ndst.ndts_destroys += st->destroys; 2221 ndst.ndts_hash_grows += st->hash_grows; 2222 ndst.ndts_res_failed += st->res_failed; 2223 ndst.ndts_lookups += st->lookups; 2224 ndst.ndts_hits += st->hits; 2225 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast; 2226 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast; 2227 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs; 2228 ndst.ndts_forced_gc_runs += st->forced_gc_runs; 2229 ndst.ndts_table_fulls += st->table_fulls; 2230 } 2231 2232 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst, 2233 NDTA_PAD)) 2234 goto nla_put_failure; 2235 } 2236 2237 BUG_ON(tbl->parms.dev); 2238 if (neightbl_fill_parms(skb, &tbl->parms) < 0) 2239 goto nla_put_failure; 2240 2241 read_unlock_bh(&tbl->lock); 2242 nlmsg_end(skb, nlh); 2243 return 0; 2244 2245 nla_put_failure: 2246 read_unlock_bh(&tbl->lock); 2247 nlmsg_cancel(skb, nlh); 2248 return -EMSGSIZE; 2249 } 2250 2251 static int neightbl_fill_param_info(struct sk_buff *skb, 2252 struct neigh_table *tbl, 2253 struct neigh_parms *parms, 2254 u32 pid, u32 seq, int type, 2255 unsigned int flags) 2256 { 2257 struct ndtmsg *ndtmsg; 2258 struct nlmsghdr *nlh; 2259 2260 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags); 2261 if (nlh == NULL) 2262 return -EMSGSIZE; 2263 2264 ndtmsg = nlmsg_data(nlh); 2265 2266 read_lock_bh(&tbl->lock); 2267 ndtmsg->ndtm_family = tbl->family; 2268 ndtmsg->ndtm_pad1 = 0; 2269 ndtmsg->ndtm_pad2 = 0; 2270 2271 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 || 2272 neightbl_fill_parms(skb, parms) < 0) 2273 goto errout; 2274 2275 read_unlock_bh(&tbl->lock); 2276 nlmsg_end(skb, nlh); 2277 return 0; 2278 errout: 2279 read_unlock_bh(&tbl->lock); 2280 nlmsg_cancel(skb, nlh); 2281 return -EMSGSIZE; 2282 } 2283 2284 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = { 2285 [NDTA_NAME] = { .type = NLA_STRING }, 2286 [NDTA_THRESH1] = { .type = NLA_U32 }, 2287 [NDTA_THRESH2] = { .type = NLA_U32 }, 2288 [NDTA_THRESH3] = { .type = NLA_U32 }, 2289 [NDTA_GC_INTERVAL] = { .type = NLA_U64 }, 2290 [NDTA_PARMS] = { .type = NLA_NESTED }, 2291 }; 2292 2293 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = { 2294 [NDTPA_IFINDEX] = { .type = NLA_U32 }, 2295 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 }, 2296 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 }, 2297 [NDTPA_APP_PROBES] = { .type = NLA_U32 }, 2298 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 }, 2299 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 }, 2300 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 }, 2301 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 }, 2302 [NDTPA_GC_STALETIME] = { .type = NLA_U64 }, 2303 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 }, 2304 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 }, 2305 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 }, 2306 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 }, 2307 [NDTPA_LOCKTIME] = { .type = NLA_U64 }, 2308 [NDTPA_INTERVAL_PROBE_TIME_MS] = { .type = NLA_U64, .min = 1 }, 2309 }; 2310 2311 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, 2312 struct netlink_ext_ack *extack) 2313 { 2314 struct net *net = sock_net(skb->sk); 2315 struct neigh_table *tbl; 2316 struct ndtmsg *ndtmsg; 2317 struct nlattr *tb[NDTA_MAX+1]; 2318 bool found = false; 2319 int err, tidx; 2320 2321 err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX, 2322 nl_neightbl_policy, extack); 2323 if (err < 0) 2324 goto errout; 2325 2326 if (tb[NDTA_NAME] == NULL) { 2327 err = -EINVAL; 2328 goto errout; 2329 } 2330 2331 ndtmsg = nlmsg_data(nlh); 2332 2333 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) { 2334 tbl = neigh_tables[tidx]; 2335 if (!tbl) 2336 continue; 2337 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family) 2338 continue; 2339 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) { 2340 found = true; 2341 break; 2342 } 2343 } 2344 2345 if (!found) 2346 return -ENOENT; 2347 2348 /* 2349 * We acquire tbl->lock to be nice to the periodic timers and 2350 * make sure they always see a consistent set of values. 2351 */ 2352 write_lock_bh(&tbl->lock); 2353 2354 if (tb[NDTA_PARMS]) { 2355 struct nlattr *tbp[NDTPA_MAX+1]; 2356 struct neigh_parms *p; 2357 int i, ifindex = 0; 2358 2359 err = nla_parse_nested_deprecated(tbp, NDTPA_MAX, 2360 tb[NDTA_PARMS], 2361 nl_ntbl_parm_policy, extack); 2362 if (err < 0) 2363 goto errout_tbl_lock; 2364 2365 if (tbp[NDTPA_IFINDEX]) 2366 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]); 2367 2368 p = lookup_neigh_parms(tbl, net, ifindex); 2369 if (p == NULL) { 2370 err = -ENOENT; 2371 goto errout_tbl_lock; 2372 } 2373 2374 for (i = 1; i <= NDTPA_MAX; i++) { 2375 if (tbp[i] == NULL) 2376 continue; 2377 2378 switch (i) { 2379 case NDTPA_QUEUE_LEN: 2380 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES, 2381 nla_get_u32(tbp[i]) * 2382 SKB_TRUESIZE(ETH_FRAME_LEN)); 2383 break; 2384 case NDTPA_QUEUE_LENBYTES: 2385 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES, 2386 nla_get_u32(tbp[i])); 2387 break; 2388 case NDTPA_PROXY_QLEN: 2389 NEIGH_VAR_SET(p, PROXY_QLEN, 2390 nla_get_u32(tbp[i])); 2391 break; 2392 case NDTPA_APP_PROBES: 2393 NEIGH_VAR_SET(p, APP_PROBES, 2394 nla_get_u32(tbp[i])); 2395 break; 2396 case NDTPA_UCAST_PROBES: 2397 NEIGH_VAR_SET(p, UCAST_PROBES, 2398 nla_get_u32(tbp[i])); 2399 break; 2400 case NDTPA_MCAST_PROBES: 2401 NEIGH_VAR_SET(p, MCAST_PROBES, 2402 nla_get_u32(tbp[i])); 2403 break; 2404 case NDTPA_MCAST_REPROBES: 2405 NEIGH_VAR_SET(p, MCAST_REPROBES, 2406 nla_get_u32(tbp[i])); 2407 break; 2408 case NDTPA_BASE_REACHABLE_TIME: 2409 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME, 2410 nla_get_msecs(tbp[i])); 2411 /* update reachable_time as well, otherwise, the change will 2412 * only be effective after the next time neigh_periodic_work 2413 * decides to recompute it (can be multiple minutes) 2414 */ 2415 p->reachable_time = 2416 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 2417 break; 2418 case NDTPA_GC_STALETIME: 2419 NEIGH_VAR_SET(p, GC_STALETIME, 2420 nla_get_msecs(tbp[i])); 2421 break; 2422 case NDTPA_DELAY_PROBE_TIME: 2423 NEIGH_VAR_SET(p, DELAY_PROBE_TIME, 2424 nla_get_msecs(tbp[i])); 2425 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p); 2426 break; 2427 case NDTPA_INTERVAL_PROBE_TIME_MS: 2428 NEIGH_VAR_SET(p, INTERVAL_PROBE_TIME_MS, 2429 nla_get_msecs(tbp[i])); 2430 break; 2431 case NDTPA_RETRANS_TIME: 2432 NEIGH_VAR_SET(p, RETRANS_TIME, 2433 nla_get_msecs(tbp[i])); 2434 break; 2435 case NDTPA_ANYCAST_DELAY: 2436 NEIGH_VAR_SET(p, ANYCAST_DELAY, 2437 nla_get_msecs(tbp[i])); 2438 break; 2439 case NDTPA_PROXY_DELAY: 2440 NEIGH_VAR_SET(p, PROXY_DELAY, 2441 nla_get_msecs(tbp[i])); 2442 break; 2443 case NDTPA_LOCKTIME: 2444 NEIGH_VAR_SET(p, LOCKTIME, 2445 nla_get_msecs(tbp[i])); 2446 break; 2447 } 2448 } 2449 } 2450 2451 err = -ENOENT; 2452 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] || 2453 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) && 2454 !net_eq(net, &init_net)) 2455 goto errout_tbl_lock; 2456 2457 if (tb[NDTA_THRESH1]) 2458 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]); 2459 2460 if (tb[NDTA_THRESH2]) 2461 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]); 2462 2463 if (tb[NDTA_THRESH3]) 2464 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]); 2465 2466 if (tb[NDTA_GC_INTERVAL]) 2467 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]); 2468 2469 err = 0; 2470 2471 errout_tbl_lock: 2472 write_unlock_bh(&tbl->lock); 2473 errout: 2474 return err; 2475 } 2476 2477 static int neightbl_valid_dump_info(const struct nlmsghdr *nlh, 2478 struct netlink_ext_ack *extack) 2479 { 2480 struct ndtmsg *ndtm; 2481 2482 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) { 2483 NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request"); 2484 return -EINVAL; 2485 } 2486 2487 ndtm = nlmsg_data(nlh); 2488 if (ndtm->ndtm_pad1 || ndtm->ndtm_pad2) { 2489 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request"); 2490 return -EINVAL; 2491 } 2492 2493 if (nlmsg_attrlen(nlh, sizeof(*ndtm))) { 2494 NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request"); 2495 return -EINVAL; 2496 } 2497 2498 return 0; 2499 } 2500 2501 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) 2502 { 2503 const struct nlmsghdr *nlh = cb->nlh; 2504 struct net *net = sock_net(skb->sk); 2505 int family, tidx, nidx = 0; 2506 int tbl_skip = cb->args[0]; 2507 int neigh_skip = cb->args[1]; 2508 struct neigh_table *tbl; 2509 2510 if (cb->strict_check) { 2511 int err = neightbl_valid_dump_info(nlh, cb->extack); 2512 2513 if (err < 0) 2514 return err; 2515 } 2516 2517 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 2518 2519 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) { 2520 struct neigh_parms *p; 2521 2522 tbl = neigh_tables[tidx]; 2523 if (!tbl) 2524 continue; 2525 2526 if (tidx < tbl_skip || (family && tbl->family != family)) 2527 continue; 2528 2529 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid, 2530 nlh->nlmsg_seq, RTM_NEWNEIGHTBL, 2531 NLM_F_MULTI) < 0) 2532 break; 2533 2534 nidx = 0; 2535 p = list_next_entry(&tbl->parms, list); 2536 list_for_each_entry_from(p, &tbl->parms_list, list) { 2537 if (!net_eq(neigh_parms_net(p), net)) 2538 continue; 2539 2540 if (nidx < neigh_skip) 2541 goto next; 2542 2543 if (neightbl_fill_param_info(skb, tbl, p, 2544 NETLINK_CB(cb->skb).portid, 2545 nlh->nlmsg_seq, 2546 RTM_NEWNEIGHTBL, 2547 NLM_F_MULTI) < 0) 2548 goto out; 2549 next: 2550 nidx++; 2551 } 2552 2553 neigh_skip = 0; 2554 } 2555 out: 2556 cb->args[0] = tidx; 2557 cb->args[1] = nidx; 2558 2559 return skb->len; 2560 } 2561 2562 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh, 2563 u32 pid, u32 seq, int type, unsigned int flags) 2564 { 2565 u32 neigh_flags, neigh_flags_ext; 2566 unsigned long now = jiffies; 2567 struct nda_cacheinfo ci; 2568 struct nlmsghdr *nlh; 2569 struct ndmsg *ndm; 2570 2571 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags); 2572 if (nlh == NULL) 2573 return -EMSGSIZE; 2574 2575 neigh_flags_ext = neigh->flags >> NTF_EXT_SHIFT; 2576 neigh_flags = neigh->flags & NTF_OLD_MASK; 2577 2578 ndm = nlmsg_data(nlh); 2579 ndm->ndm_family = neigh->ops->family; 2580 ndm->ndm_pad1 = 0; 2581 ndm->ndm_pad2 = 0; 2582 ndm->ndm_flags = neigh_flags; 2583 ndm->ndm_type = neigh->type; 2584 ndm->ndm_ifindex = neigh->dev->ifindex; 2585 2586 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key)) 2587 goto nla_put_failure; 2588 2589 read_lock_bh(&neigh->lock); 2590 ndm->ndm_state = neigh->nud_state; 2591 if (neigh->nud_state & NUD_VALID) { 2592 char haddr[MAX_ADDR_LEN]; 2593 2594 neigh_ha_snapshot(haddr, neigh, neigh->dev); 2595 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) { 2596 read_unlock_bh(&neigh->lock); 2597 goto nla_put_failure; 2598 } 2599 } 2600 2601 ci.ndm_used = jiffies_to_clock_t(now - neigh->used); 2602 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed); 2603 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated); 2604 ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1; 2605 read_unlock_bh(&neigh->lock); 2606 2607 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) || 2608 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) 2609 goto nla_put_failure; 2610 2611 if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol)) 2612 goto nla_put_failure; 2613 if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext)) 2614 goto nla_put_failure; 2615 2616 nlmsg_end(skb, nlh); 2617 return 0; 2618 2619 nla_put_failure: 2620 nlmsg_cancel(skb, nlh); 2621 return -EMSGSIZE; 2622 } 2623 2624 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn, 2625 u32 pid, u32 seq, int type, unsigned int flags, 2626 struct neigh_table *tbl) 2627 { 2628 u32 neigh_flags, neigh_flags_ext; 2629 struct nlmsghdr *nlh; 2630 struct ndmsg *ndm; 2631 2632 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags); 2633 if (nlh == NULL) 2634 return -EMSGSIZE; 2635 2636 neigh_flags_ext = pn->flags >> NTF_EXT_SHIFT; 2637 neigh_flags = pn->flags & NTF_OLD_MASK; 2638 2639 ndm = nlmsg_data(nlh); 2640 ndm->ndm_family = tbl->family; 2641 ndm->ndm_pad1 = 0; 2642 ndm->ndm_pad2 = 0; 2643 ndm->ndm_flags = neigh_flags | NTF_PROXY; 2644 ndm->ndm_type = RTN_UNICAST; 2645 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0; 2646 ndm->ndm_state = NUD_NONE; 2647 2648 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key)) 2649 goto nla_put_failure; 2650 2651 if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol)) 2652 goto nla_put_failure; 2653 if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext)) 2654 goto nla_put_failure; 2655 2656 nlmsg_end(skb, nlh); 2657 return 0; 2658 2659 nla_put_failure: 2660 nlmsg_cancel(skb, nlh); 2661 return -EMSGSIZE; 2662 } 2663 2664 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid) 2665 { 2666 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 2667 __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid); 2668 } 2669 2670 static bool neigh_master_filtered(struct net_device *dev, int master_idx) 2671 { 2672 struct net_device *master; 2673 2674 if (!master_idx) 2675 return false; 2676 2677 master = dev ? netdev_master_upper_dev_get(dev) : NULL; 2678 2679 /* 0 is already used to denote NDA_MASTER wasn't passed, therefore need another 2680 * invalid value for ifindex to denote "no master". 2681 */ 2682 if (master_idx == -1) 2683 return !!master; 2684 2685 if (!master || master->ifindex != master_idx) 2686 return true; 2687 2688 return false; 2689 } 2690 2691 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx) 2692 { 2693 if (filter_idx && (!dev || dev->ifindex != filter_idx)) 2694 return true; 2695 2696 return false; 2697 } 2698 2699 struct neigh_dump_filter { 2700 int master_idx; 2701 int dev_idx; 2702 }; 2703 2704 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, 2705 struct netlink_callback *cb, 2706 struct neigh_dump_filter *filter) 2707 { 2708 struct net *net = sock_net(skb->sk); 2709 struct neighbour *n; 2710 int rc, h, s_h = cb->args[1]; 2711 int idx, s_idx = idx = cb->args[2]; 2712 struct neigh_hash_table *nht; 2713 unsigned int flags = NLM_F_MULTI; 2714 2715 if (filter->dev_idx || filter->master_idx) 2716 flags |= NLM_F_DUMP_FILTERED; 2717 2718 rcu_read_lock_bh(); 2719 nht = rcu_dereference_bh(tbl->nht); 2720 2721 for (h = s_h; h < (1 << nht->hash_shift); h++) { 2722 if (h > s_h) 2723 s_idx = 0; 2724 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; 2725 n != NULL; 2726 n = rcu_dereference_bh(n->next)) { 2727 if (idx < s_idx || !net_eq(dev_net(n->dev), net)) 2728 goto next; 2729 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) || 2730 neigh_master_filtered(n->dev, filter->master_idx)) 2731 goto next; 2732 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid, 2733 cb->nlh->nlmsg_seq, 2734 RTM_NEWNEIGH, 2735 flags) < 0) { 2736 rc = -1; 2737 goto out; 2738 } 2739 next: 2740 idx++; 2741 } 2742 } 2743 rc = skb->len; 2744 out: 2745 rcu_read_unlock_bh(); 2746 cb->args[1] = h; 2747 cb->args[2] = idx; 2748 return rc; 2749 } 2750 2751 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, 2752 struct netlink_callback *cb, 2753 struct neigh_dump_filter *filter) 2754 { 2755 struct pneigh_entry *n; 2756 struct net *net = sock_net(skb->sk); 2757 int rc, h, s_h = cb->args[3]; 2758 int idx, s_idx = idx = cb->args[4]; 2759 unsigned int flags = NLM_F_MULTI; 2760 2761 if (filter->dev_idx || filter->master_idx) 2762 flags |= NLM_F_DUMP_FILTERED; 2763 2764 read_lock_bh(&tbl->lock); 2765 2766 for (h = s_h; h <= PNEIGH_HASHMASK; h++) { 2767 if (h > s_h) 2768 s_idx = 0; 2769 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { 2770 if (idx < s_idx || pneigh_net(n) != net) 2771 goto next; 2772 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) || 2773 neigh_master_filtered(n->dev, filter->master_idx)) 2774 goto next; 2775 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid, 2776 cb->nlh->nlmsg_seq, 2777 RTM_NEWNEIGH, flags, tbl) < 0) { 2778 read_unlock_bh(&tbl->lock); 2779 rc = -1; 2780 goto out; 2781 } 2782 next: 2783 idx++; 2784 } 2785 } 2786 2787 read_unlock_bh(&tbl->lock); 2788 rc = skb->len; 2789 out: 2790 cb->args[3] = h; 2791 cb->args[4] = idx; 2792 return rc; 2793 2794 } 2795 2796 static int neigh_valid_dump_req(const struct nlmsghdr *nlh, 2797 bool strict_check, 2798 struct neigh_dump_filter *filter, 2799 struct netlink_ext_ack *extack) 2800 { 2801 struct nlattr *tb[NDA_MAX + 1]; 2802 int err, i; 2803 2804 if (strict_check) { 2805 struct ndmsg *ndm; 2806 2807 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 2808 NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request"); 2809 return -EINVAL; 2810 } 2811 2812 ndm = nlmsg_data(nlh); 2813 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex || 2814 ndm->ndm_state || ndm->ndm_type) { 2815 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request"); 2816 return -EINVAL; 2817 } 2818 2819 if (ndm->ndm_flags & ~NTF_PROXY) { 2820 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request"); 2821 return -EINVAL; 2822 } 2823 2824 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), 2825 tb, NDA_MAX, nda_policy, 2826 extack); 2827 } else { 2828 err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb, 2829 NDA_MAX, nda_policy, extack); 2830 } 2831 if (err < 0) 2832 return err; 2833 2834 for (i = 0; i <= NDA_MAX; ++i) { 2835 if (!tb[i]) 2836 continue; 2837 2838 /* all new attributes should require strict_check */ 2839 switch (i) { 2840 case NDA_IFINDEX: 2841 filter->dev_idx = nla_get_u32(tb[i]); 2842 break; 2843 case NDA_MASTER: 2844 filter->master_idx = nla_get_u32(tb[i]); 2845 break; 2846 default: 2847 if (strict_check) { 2848 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request"); 2849 return -EINVAL; 2850 } 2851 } 2852 } 2853 2854 return 0; 2855 } 2856 2857 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) 2858 { 2859 const struct nlmsghdr *nlh = cb->nlh; 2860 struct neigh_dump_filter filter = {}; 2861 struct neigh_table *tbl; 2862 int t, family, s_t; 2863 int proxy = 0; 2864 int err; 2865 2866 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 2867 2868 /* check for full ndmsg structure presence, family member is 2869 * the same for both structures 2870 */ 2871 if (nlmsg_len(nlh) >= sizeof(struct ndmsg) && 2872 ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY) 2873 proxy = 1; 2874 2875 err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack); 2876 if (err < 0 && cb->strict_check) 2877 return err; 2878 2879 s_t = cb->args[0]; 2880 2881 for (t = 0; t < NEIGH_NR_TABLES; t++) { 2882 tbl = neigh_tables[t]; 2883 2884 if (!tbl) 2885 continue; 2886 if (t < s_t || (family && tbl->family != family)) 2887 continue; 2888 if (t > s_t) 2889 memset(&cb->args[1], 0, sizeof(cb->args) - 2890 sizeof(cb->args[0])); 2891 if (proxy) 2892 err = pneigh_dump_table(tbl, skb, cb, &filter); 2893 else 2894 err = neigh_dump_table(tbl, skb, cb, &filter); 2895 if (err < 0) 2896 break; 2897 } 2898 2899 cb->args[0] = t; 2900 return skb->len; 2901 } 2902 2903 static int neigh_valid_get_req(const struct nlmsghdr *nlh, 2904 struct neigh_table **tbl, 2905 void **dst, int *dev_idx, u8 *ndm_flags, 2906 struct netlink_ext_ack *extack) 2907 { 2908 struct nlattr *tb[NDA_MAX + 1]; 2909 struct ndmsg *ndm; 2910 int err, i; 2911 2912 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 2913 NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request"); 2914 return -EINVAL; 2915 } 2916 2917 ndm = nlmsg_data(nlh); 2918 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 2919 ndm->ndm_type) { 2920 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request"); 2921 return -EINVAL; 2922 } 2923 2924 if (ndm->ndm_flags & ~NTF_PROXY) { 2925 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request"); 2926 return -EINVAL; 2927 } 2928 2929 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 2930 NDA_MAX, nda_policy, extack); 2931 if (err < 0) 2932 return err; 2933 2934 *ndm_flags = ndm->ndm_flags; 2935 *dev_idx = ndm->ndm_ifindex; 2936 *tbl = neigh_find_table(ndm->ndm_family); 2937 if (*tbl == NULL) { 2938 NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request"); 2939 return -EAFNOSUPPORT; 2940 } 2941 2942 for (i = 0; i <= NDA_MAX; ++i) { 2943 if (!tb[i]) 2944 continue; 2945 2946 switch (i) { 2947 case NDA_DST: 2948 if (nla_len(tb[i]) != (int)(*tbl)->key_len) { 2949 NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request"); 2950 return -EINVAL; 2951 } 2952 *dst = nla_data(tb[i]); 2953 break; 2954 default: 2955 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request"); 2956 return -EINVAL; 2957 } 2958 } 2959 2960 return 0; 2961 } 2962 2963 static inline size_t neigh_nlmsg_size(void) 2964 { 2965 return NLMSG_ALIGN(sizeof(struct ndmsg)) 2966 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */ 2967 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */ 2968 + nla_total_size(sizeof(struct nda_cacheinfo)) 2969 + nla_total_size(4) /* NDA_PROBES */ 2970 + nla_total_size(4) /* NDA_FLAGS_EXT */ 2971 + nla_total_size(1); /* NDA_PROTOCOL */ 2972 } 2973 2974 static int neigh_get_reply(struct net *net, struct neighbour *neigh, 2975 u32 pid, u32 seq) 2976 { 2977 struct sk_buff *skb; 2978 int err = 0; 2979 2980 skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL); 2981 if (!skb) 2982 return -ENOBUFS; 2983 2984 err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0); 2985 if (err) { 2986 kfree_skb(skb); 2987 goto errout; 2988 } 2989 2990 err = rtnl_unicast(skb, net, pid); 2991 errout: 2992 return err; 2993 } 2994 2995 static inline size_t pneigh_nlmsg_size(void) 2996 { 2997 return NLMSG_ALIGN(sizeof(struct ndmsg)) 2998 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */ 2999 + nla_total_size(4) /* NDA_FLAGS_EXT */ 3000 + nla_total_size(1); /* NDA_PROTOCOL */ 3001 } 3002 3003 static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh, 3004 u32 pid, u32 seq, struct neigh_table *tbl) 3005 { 3006 struct sk_buff *skb; 3007 int err = 0; 3008 3009 skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL); 3010 if (!skb) 3011 return -ENOBUFS; 3012 3013 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl); 3014 if (err) { 3015 kfree_skb(skb); 3016 goto errout; 3017 } 3018 3019 err = rtnl_unicast(skb, net, pid); 3020 errout: 3021 return err; 3022 } 3023 3024 static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, 3025 struct netlink_ext_ack *extack) 3026 { 3027 struct net *net = sock_net(in_skb->sk); 3028 struct net_device *dev = NULL; 3029 struct neigh_table *tbl = NULL; 3030 struct neighbour *neigh; 3031 void *dst = NULL; 3032 u8 ndm_flags = 0; 3033 int dev_idx = 0; 3034 int err; 3035 3036 err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags, 3037 extack); 3038 if (err < 0) 3039 return err; 3040 3041 if (dev_idx) { 3042 dev = __dev_get_by_index(net, dev_idx); 3043 if (!dev) { 3044 NL_SET_ERR_MSG(extack, "Unknown device ifindex"); 3045 return -ENODEV; 3046 } 3047 } 3048 3049 if (!dst) { 3050 NL_SET_ERR_MSG(extack, "Network address not specified"); 3051 return -EINVAL; 3052 } 3053 3054 if (ndm_flags & NTF_PROXY) { 3055 struct pneigh_entry *pn; 3056 3057 pn = pneigh_lookup(tbl, net, dst, dev, 0); 3058 if (!pn) { 3059 NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found"); 3060 return -ENOENT; 3061 } 3062 return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid, 3063 nlh->nlmsg_seq, tbl); 3064 } 3065 3066 if (!dev) { 3067 NL_SET_ERR_MSG(extack, "No device specified"); 3068 return -EINVAL; 3069 } 3070 3071 neigh = neigh_lookup(tbl, dst, dev); 3072 if (!neigh) { 3073 NL_SET_ERR_MSG(extack, "Neighbour entry not found"); 3074 return -ENOENT; 3075 } 3076 3077 err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid, 3078 nlh->nlmsg_seq); 3079 3080 neigh_release(neigh); 3081 3082 return err; 3083 } 3084 3085 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie) 3086 { 3087 int chain; 3088 struct neigh_hash_table *nht; 3089 3090 rcu_read_lock_bh(); 3091 nht = rcu_dereference_bh(tbl->nht); 3092 3093 read_lock(&tbl->lock); /* avoid resizes */ 3094 for (chain = 0; chain < (1 << nht->hash_shift); chain++) { 3095 struct neighbour *n; 3096 3097 for (n = rcu_dereference_bh(nht->hash_buckets[chain]); 3098 n != NULL; 3099 n = rcu_dereference_bh(n->next)) 3100 cb(n, cookie); 3101 } 3102 read_unlock(&tbl->lock); 3103 rcu_read_unlock_bh(); 3104 } 3105 EXPORT_SYMBOL(neigh_for_each); 3106 3107 /* The tbl->lock must be held as a writer and BH disabled. */ 3108 void __neigh_for_each_release(struct neigh_table *tbl, 3109 int (*cb)(struct neighbour *)) 3110 { 3111 int chain; 3112 struct neigh_hash_table *nht; 3113 3114 nht = rcu_dereference_protected(tbl->nht, 3115 lockdep_is_held(&tbl->lock)); 3116 for (chain = 0; chain < (1 << nht->hash_shift); chain++) { 3117 struct neighbour *n; 3118 struct neighbour __rcu **np; 3119 3120 np = &nht->hash_buckets[chain]; 3121 while ((n = rcu_dereference_protected(*np, 3122 lockdep_is_held(&tbl->lock))) != NULL) { 3123 int release; 3124 3125 write_lock(&n->lock); 3126 release = cb(n); 3127 if (release) { 3128 rcu_assign_pointer(*np, 3129 rcu_dereference_protected(n->next, 3130 lockdep_is_held(&tbl->lock))); 3131 neigh_mark_dead(n); 3132 } else 3133 np = &n->next; 3134 write_unlock(&n->lock); 3135 if (release) 3136 neigh_cleanup_and_release(n); 3137 } 3138 } 3139 } 3140 EXPORT_SYMBOL(__neigh_for_each_release); 3141 3142 int neigh_xmit(int index, struct net_device *dev, 3143 const void *addr, struct sk_buff *skb) 3144 { 3145 int err = -EAFNOSUPPORT; 3146 if (likely(index < NEIGH_NR_TABLES)) { 3147 struct neigh_table *tbl; 3148 struct neighbour *neigh; 3149 3150 tbl = neigh_tables[index]; 3151 if (!tbl) 3152 goto out; 3153 rcu_read_lock_bh(); 3154 if (index == NEIGH_ARP_TABLE) { 3155 u32 key = *((u32 *)addr); 3156 3157 neigh = __ipv4_neigh_lookup_noref(dev, key); 3158 } else { 3159 neigh = __neigh_lookup_noref(tbl, addr, dev); 3160 } 3161 if (!neigh) 3162 neigh = __neigh_create(tbl, addr, dev, false); 3163 err = PTR_ERR(neigh); 3164 if (IS_ERR(neigh)) { 3165 rcu_read_unlock_bh(); 3166 goto out_kfree_skb; 3167 } 3168 err = neigh->output(neigh, skb); 3169 rcu_read_unlock_bh(); 3170 } 3171 else if (index == NEIGH_LINK_TABLE) { 3172 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 3173 addr, NULL, skb->len); 3174 if (err < 0) 3175 goto out_kfree_skb; 3176 err = dev_queue_xmit(skb); 3177 } 3178 out: 3179 return err; 3180 out_kfree_skb: 3181 kfree_skb(skb); 3182 goto out; 3183 } 3184 EXPORT_SYMBOL(neigh_xmit); 3185 3186 #ifdef CONFIG_PROC_FS 3187 3188 static struct neighbour *neigh_get_first(struct seq_file *seq) 3189 { 3190 struct neigh_seq_state *state = seq->private; 3191 struct net *net = seq_file_net(seq); 3192 struct neigh_hash_table *nht = state->nht; 3193 struct neighbour *n = NULL; 3194 int bucket; 3195 3196 state->flags &= ~NEIGH_SEQ_IS_PNEIGH; 3197 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) { 3198 n = rcu_dereference_bh(nht->hash_buckets[bucket]); 3199 3200 while (n) { 3201 if (!net_eq(dev_net(n->dev), net)) 3202 goto next; 3203 if (state->neigh_sub_iter) { 3204 loff_t fakep = 0; 3205 void *v; 3206 3207 v = state->neigh_sub_iter(state, n, &fakep); 3208 if (!v) 3209 goto next; 3210 } 3211 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP)) 3212 break; 3213 if (n->nud_state & ~NUD_NOARP) 3214 break; 3215 next: 3216 n = rcu_dereference_bh(n->next); 3217 } 3218 3219 if (n) 3220 break; 3221 } 3222 state->bucket = bucket; 3223 3224 return n; 3225 } 3226 3227 static struct neighbour *neigh_get_next(struct seq_file *seq, 3228 struct neighbour *n, 3229 loff_t *pos) 3230 { 3231 struct neigh_seq_state *state = seq->private; 3232 struct net *net = seq_file_net(seq); 3233 struct neigh_hash_table *nht = state->nht; 3234 3235 if (state->neigh_sub_iter) { 3236 void *v = state->neigh_sub_iter(state, n, pos); 3237 if (v) 3238 return n; 3239 } 3240 n = rcu_dereference_bh(n->next); 3241 3242 while (1) { 3243 while (n) { 3244 if (!net_eq(dev_net(n->dev), net)) 3245 goto next; 3246 if (state->neigh_sub_iter) { 3247 void *v = state->neigh_sub_iter(state, n, pos); 3248 if (v) 3249 return n; 3250 goto next; 3251 } 3252 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP)) 3253 break; 3254 3255 if (n->nud_state & ~NUD_NOARP) 3256 break; 3257 next: 3258 n = rcu_dereference_bh(n->next); 3259 } 3260 3261 if (n) 3262 break; 3263 3264 if (++state->bucket >= (1 << nht->hash_shift)) 3265 break; 3266 3267 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]); 3268 } 3269 3270 if (n && pos) 3271 --(*pos); 3272 return n; 3273 } 3274 3275 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos) 3276 { 3277 struct neighbour *n = neigh_get_first(seq); 3278 3279 if (n) { 3280 --(*pos); 3281 while (*pos) { 3282 n = neigh_get_next(seq, n, pos); 3283 if (!n) 3284 break; 3285 } 3286 } 3287 return *pos ? NULL : n; 3288 } 3289 3290 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq) 3291 { 3292 struct neigh_seq_state *state = seq->private; 3293 struct net *net = seq_file_net(seq); 3294 struct neigh_table *tbl = state->tbl; 3295 struct pneigh_entry *pn = NULL; 3296 int bucket; 3297 3298 state->flags |= NEIGH_SEQ_IS_PNEIGH; 3299 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) { 3300 pn = tbl->phash_buckets[bucket]; 3301 while (pn && !net_eq(pneigh_net(pn), net)) 3302 pn = pn->next; 3303 if (pn) 3304 break; 3305 } 3306 state->bucket = bucket; 3307 3308 return pn; 3309 } 3310 3311 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq, 3312 struct pneigh_entry *pn, 3313 loff_t *pos) 3314 { 3315 struct neigh_seq_state *state = seq->private; 3316 struct net *net = seq_file_net(seq); 3317 struct neigh_table *tbl = state->tbl; 3318 3319 do { 3320 pn = pn->next; 3321 } while (pn && !net_eq(pneigh_net(pn), net)); 3322 3323 while (!pn) { 3324 if (++state->bucket > PNEIGH_HASHMASK) 3325 break; 3326 pn = tbl->phash_buckets[state->bucket]; 3327 while (pn && !net_eq(pneigh_net(pn), net)) 3328 pn = pn->next; 3329 if (pn) 3330 break; 3331 } 3332 3333 if (pn && pos) 3334 --(*pos); 3335 3336 return pn; 3337 } 3338 3339 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos) 3340 { 3341 struct pneigh_entry *pn = pneigh_get_first(seq); 3342 3343 if (pn) { 3344 --(*pos); 3345 while (*pos) { 3346 pn = pneigh_get_next(seq, pn, pos); 3347 if (!pn) 3348 break; 3349 } 3350 } 3351 return *pos ? NULL : pn; 3352 } 3353 3354 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos) 3355 { 3356 struct neigh_seq_state *state = seq->private; 3357 void *rc; 3358 loff_t idxpos = *pos; 3359 3360 rc = neigh_get_idx(seq, &idxpos); 3361 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY)) 3362 rc = pneigh_get_idx(seq, &idxpos); 3363 3364 return rc; 3365 } 3366 3367 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags) 3368 __acquires(tbl->lock) 3369 __acquires(rcu_bh) 3370 { 3371 struct neigh_seq_state *state = seq->private; 3372 3373 state->tbl = tbl; 3374 state->bucket = 0; 3375 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH); 3376 3377 rcu_read_lock_bh(); 3378 state->nht = rcu_dereference_bh(tbl->nht); 3379 read_lock(&tbl->lock); 3380 3381 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN; 3382 } 3383 EXPORT_SYMBOL(neigh_seq_start); 3384 3385 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3386 { 3387 struct neigh_seq_state *state; 3388 void *rc; 3389 3390 if (v == SEQ_START_TOKEN) { 3391 rc = neigh_get_first(seq); 3392 goto out; 3393 } 3394 3395 state = seq->private; 3396 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) { 3397 rc = neigh_get_next(seq, v, NULL); 3398 if (rc) 3399 goto out; 3400 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY)) 3401 rc = pneigh_get_first(seq); 3402 } else { 3403 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY); 3404 rc = pneigh_get_next(seq, v, NULL); 3405 } 3406 out: 3407 ++(*pos); 3408 return rc; 3409 } 3410 EXPORT_SYMBOL(neigh_seq_next); 3411 3412 void neigh_seq_stop(struct seq_file *seq, void *v) 3413 __releases(tbl->lock) 3414 __releases(rcu_bh) 3415 { 3416 struct neigh_seq_state *state = seq->private; 3417 struct neigh_table *tbl = state->tbl; 3418 3419 read_unlock(&tbl->lock); 3420 rcu_read_unlock_bh(); 3421 } 3422 EXPORT_SYMBOL(neigh_seq_stop); 3423 3424 /* statistics via seq_file */ 3425 3426 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos) 3427 { 3428 struct neigh_table *tbl = pde_data(file_inode(seq->file)); 3429 int cpu; 3430 3431 if (*pos == 0) 3432 return SEQ_START_TOKEN; 3433 3434 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { 3435 if (!cpu_possible(cpu)) 3436 continue; 3437 *pos = cpu+1; 3438 return per_cpu_ptr(tbl->stats, cpu); 3439 } 3440 return NULL; 3441 } 3442 3443 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3444 { 3445 struct neigh_table *tbl = pde_data(file_inode(seq->file)); 3446 int cpu; 3447 3448 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { 3449 if (!cpu_possible(cpu)) 3450 continue; 3451 *pos = cpu+1; 3452 return per_cpu_ptr(tbl->stats, cpu); 3453 } 3454 (*pos)++; 3455 return NULL; 3456 } 3457 3458 static void neigh_stat_seq_stop(struct seq_file *seq, void *v) 3459 { 3460 3461 } 3462 3463 static int neigh_stat_seq_show(struct seq_file *seq, void *v) 3464 { 3465 struct neigh_table *tbl = pde_data(file_inode(seq->file)); 3466 struct neigh_statistics *st = v; 3467 3468 if (v == SEQ_START_TOKEN) { 3469 seq_puts(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n"); 3470 return 0; 3471 } 3472 3473 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx " 3474 "%08lx %08lx %08lx " 3475 "%08lx %08lx %08lx\n", 3476 atomic_read(&tbl->entries), 3477 3478 st->allocs, 3479 st->destroys, 3480 st->hash_grows, 3481 3482 st->lookups, 3483 st->hits, 3484 3485 st->res_failed, 3486 3487 st->rcv_probes_mcast, 3488 st->rcv_probes_ucast, 3489 3490 st->periodic_gc_runs, 3491 st->forced_gc_runs, 3492 st->unres_discards, 3493 st->table_fulls 3494 ); 3495 3496 return 0; 3497 } 3498 3499 static const struct seq_operations neigh_stat_seq_ops = { 3500 .start = neigh_stat_seq_start, 3501 .next = neigh_stat_seq_next, 3502 .stop = neigh_stat_seq_stop, 3503 .show = neigh_stat_seq_show, 3504 }; 3505 #endif /* CONFIG_PROC_FS */ 3506 3507 static void __neigh_notify(struct neighbour *n, int type, int flags, 3508 u32 pid) 3509 { 3510 struct net *net = dev_net(n->dev); 3511 struct sk_buff *skb; 3512 int err = -ENOBUFS; 3513 3514 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC); 3515 if (skb == NULL) 3516 goto errout; 3517 3518 err = neigh_fill_info(skb, n, pid, 0, type, flags); 3519 if (err < 0) { 3520 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */ 3521 WARN_ON(err == -EMSGSIZE); 3522 kfree_skb(skb); 3523 goto errout; 3524 } 3525 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 3526 return; 3527 errout: 3528 if (err < 0) 3529 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 3530 } 3531 3532 void neigh_app_ns(struct neighbour *n) 3533 { 3534 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0); 3535 } 3536 EXPORT_SYMBOL(neigh_app_ns); 3537 3538 #ifdef CONFIG_SYSCTL 3539 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN); 3540 3541 static int proc_unres_qlen(struct ctl_table *ctl, int write, 3542 void *buffer, size_t *lenp, loff_t *ppos) 3543 { 3544 int size, ret; 3545 struct ctl_table tmp = *ctl; 3546 3547 tmp.extra1 = SYSCTL_ZERO; 3548 tmp.extra2 = &unres_qlen_max; 3549 tmp.data = &size; 3550 3551 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN); 3552 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 3553 3554 if (write && !ret) 3555 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN); 3556 return ret; 3557 } 3558 3559 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p, 3560 int index) 3561 { 3562 struct net_device *dev; 3563 int family = neigh_parms_family(p); 3564 3565 rcu_read_lock(); 3566 for_each_netdev_rcu(net, dev) { 3567 struct neigh_parms *dst_p = 3568 neigh_get_dev_parms_rcu(dev, family); 3569 3570 if (dst_p && !test_bit(index, dst_p->data_state)) 3571 dst_p->data[index] = p->data[index]; 3572 } 3573 rcu_read_unlock(); 3574 } 3575 3576 static void neigh_proc_update(struct ctl_table *ctl, int write) 3577 { 3578 struct net_device *dev = ctl->extra1; 3579 struct neigh_parms *p = ctl->extra2; 3580 struct net *net = neigh_parms_net(p); 3581 int index = (int *) ctl->data - p->data; 3582 3583 if (!write) 3584 return; 3585 3586 set_bit(index, p->data_state); 3587 if (index == NEIGH_VAR_DELAY_PROBE_TIME) 3588 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p); 3589 if (!dev) /* NULL dev means this is default value */ 3590 neigh_copy_dflt_parms(net, p, index); 3591 } 3592 3593 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write, 3594 void *buffer, size_t *lenp, 3595 loff_t *ppos) 3596 { 3597 struct ctl_table tmp = *ctl; 3598 int ret; 3599 3600 tmp.extra1 = SYSCTL_ZERO; 3601 tmp.extra2 = SYSCTL_INT_MAX; 3602 3603 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 3604 neigh_proc_update(ctl, write); 3605 return ret; 3606 } 3607 3608 static int neigh_proc_dointvec_ms_jiffies_positive(struct ctl_table *ctl, int write, 3609 void *buffer, size_t *lenp, loff_t *ppos) 3610 { 3611 struct ctl_table tmp = *ctl; 3612 int ret; 3613 3614 int min = msecs_to_jiffies(1); 3615 3616 tmp.extra1 = &min; 3617 tmp.extra2 = NULL; 3618 3619 ret = proc_dointvec_ms_jiffies_minmax(&tmp, write, buffer, lenp, ppos); 3620 neigh_proc_update(ctl, write); 3621 return ret; 3622 } 3623 3624 int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer, 3625 size_t *lenp, loff_t *ppos) 3626 { 3627 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 3628 3629 neigh_proc_update(ctl, write); 3630 return ret; 3631 } 3632 EXPORT_SYMBOL(neigh_proc_dointvec); 3633 3634 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer, 3635 size_t *lenp, loff_t *ppos) 3636 { 3637 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); 3638 3639 neigh_proc_update(ctl, write); 3640 return ret; 3641 } 3642 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies); 3643 3644 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write, 3645 void *buffer, size_t *lenp, 3646 loff_t *ppos) 3647 { 3648 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos); 3649 3650 neigh_proc_update(ctl, write); 3651 return ret; 3652 } 3653 3654 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write, 3655 void *buffer, size_t *lenp, loff_t *ppos) 3656 { 3657 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos); 3658 3659 neigh_proc_update(ctl, write); 3660 return ret; 3661 } 3662 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies); 3663 3664 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write, 3665 void *buffer, size_t *lenp, 3666 loff_t *ppos) 3667 { 3668 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos); 3669 3670 neigh_proc_update(ctl, write); 3671 return ret; 3672 } 3673 3674 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write, 3675 void *buffer, size_t *lenp, 3676 loff_t *ppos) 3677 { 3678 struct neigh_parms *p = ctl->extra2; 3679 int ret; 3680 3681 if (strcmp(ctl->procname, "base_reachable_time") == 0) 3682 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); 3683 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0) 3684 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos); 3685 else 3686 ret = -1; 3687 3688 if (write && ret == 0) { 3689 /* update reachable_time as well, otherwise, the change will 3690 * only be effective after the next time neigh_periodic_work 3691 * decides to recompute it 3692 */ 3693 p->reachable_time = 3694 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 3695 } 3696 return ret; 3697 } 3698 3699 #define NEIGH_PARMS_DATA_OFFSET(index) \ 3700 (&((struct neigh_parms *) 0)->data[index]) 3701 3702 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \ 3703 [NEIGH_VAR_ ## attr] = { \ 3704 .procname = name, \ 3705 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \ 3706 .maxlen = sizeof(int), \ 3707 .mode = mval, \ 3708 .proc_handler = proc, \ 3709 } 3710 3711 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \ 3712 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax) 3713 3714 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \ 3715 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies) 3716 3717 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \ 3718 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies) 3719 3720 #define NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(attr, name) \ 3721 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies_positive) 3722 3723 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \ 3724 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies) 3725 3726 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \ 3727 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen) 3728 3729 static struct neigh_sysctl_table { 3730 struct ctl_table_header *sysctl_header; 3731 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1]; 3732 } neigh_sysctl_template __read_mostly = { 3733 .neigh_vars = { 3734 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"), 3735 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"), 3736 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"), 3737 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"), 3738 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"), 3739 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"), 3740 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"), 3741 NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(INTERVAL_PROBE_TIME_MS, 3742 "interval_probe_time_ms"), 3743 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"), 3744 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"), 3745 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"), 3746 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"), 3747 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"), 3748 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"), 3749 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"), 3750 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"), 3751 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"), 3752 [NEIGH_VAR_GC_INTERVAL] = { 3753 .procname = "gc_interval", 3754 .maxlen = sizeof(int), 3755 .mode = 0644, 3756 .proc_handler = proc_dointvec_jiffies, 3757 }, 3758 [NEIGH_VAR_GC_THRESH1] = { 3759 .procname = "gc_thresh1", 3760 .maxlen = sizeof(int), 3761 .mode = 0644, 3762 .extra1 = SYSCTL_ZERO, 3763 .extra2 = SYSCTL_INT_MAX, 3764 .proc_handler = proc_dointvec_minmax, 3765 }, 3766 [NEIGH_VAR_GC_THRESH2] = { 3767 .procname = "gc_thresh2", 3768 .maxlen = sizeof(int), 3769 .mode = 0644, 3770 .extra1 = SYSCTL_ZERO, 3771 .extra2 = SYSCTL_INT_MAX, 3772 .proc_handler = proc_dointvec_minmax, 3773 }, 3774 [NEIGH_VAR_GC_THRESH3] = { 3775 .procname = "gc_thresh3", 3776 .maxlen = sizeof(int), 3777 .mode = 0644, 3778 .extra1 = SYSCTL_ZERO, 3779 .extra2 = SYSCTL_INT_MAX, 3780 .proc_handler = proc_dointvec_minmax, 3781 }, 3782 {}, 3783 }, 3784 }; 3785 3786 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, 3787 proc_handler *handler) 3788 { 3789 int i; 3790 struct neigh_sysctl_table *t; 3791 const char *dev_name_source; 3792 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ]; 3793 char *p_name; 3794 3795 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL_ACCOUNT); 3796 if (!t) 3797 goto err; 3798 3799 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) { 3800 t->neigh_vars[i].data += (long) p; 3801 t->neigh_vars[i].extra1 = dev; 3802 t->neigh_vars[i].extra2 = p; 3803 } 3804 3805 if (dev) { 3806 dev_name_source = dev->name; 3807 /* Terminate the table early */ 3808 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0, 3809 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL])); 3810 } else { 3811 struct neigh_table *tbl = p->tbl; 3812 dev_name_source = "default"; 3813 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval; 3814 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1; 3815 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2; 3816 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3; 3817 } 3818 3819 if (handler) { 3820 /* RetransTime */ 3821 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler; 3822 /* ReachableTime */ 3823 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler; 3824 /* RetransTime (in milliseconds)*/ 3825 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler; 3826 /* ReachableTime (in milliseconds) */ 3827 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler; 3828 } else { 3829 /* Those handlers will update p->reachable_time after 3830 * base_reachable_time(_ms) is set to ensure the new timer starts being 3831 * applied after the next neighbour update instead of waiting for 3832 * neigh_periodic_work to update its value (can be multiple minutes) 3833 * So any handler that replaces them should do this as well 3834 */ 3835 /* ReachableTime */ 3836 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = 3837 neigh_proc_base_reachable_time; 3838 /* ReachableTime (in milliseconds) */ 3839 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = 3840 neigh_proc_base_reachable_time; 3841 } 3842 3843 switch (neigh_parms_family(p)) { 3844 case AF_INET: 3845 p_name = "ipv4"; 3846 break; 3847 case AF_INET6: 3848 p_name = "ipv6"; 3849 break; 3850 default: 3851 BUG(); 3852 } 3853 3854 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s", 3855 p_name, dev_name_source); 3856 t->sysctl_header = 3857 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars); 3858 if (!t->sysctl_header) 3859 goto free; 3860 3861 p->sysctl_table = t; 3862 return 0; 3863 3864 free: 3865 kfree(t); 3866 err: 3867 return -ENOBUFS; 3868 } 3869 EXPORT_SYMBOL(neigh_sysctl_register); 3870 3871 void neigh_sysctl_unregister(struct neigh_parms *p) 3872 { 3873 if (p->sysctl_table) { 3874 struct neigh_sysctl_table *t = p->sysctl_table; 3875 p->sysctl_table = NULL; 3876 unregister_net_sysctl_table(t->sysctl_header); 3877 kfree(t); 3878 } 3879 } 3880 EXPORT_SYMBOL(neigh_sysctl_unregister); 3881 3882 #endif /* CONFIG_SYSCTL */ 3883 3884 static int __init neigh_init(void) 3885 { 3886 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0); 3887 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0); 3888 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0); 3889 3890 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info, 3891 0); 3892 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0); 3893 3894 return 0; 3895 } 3896 3897 subsys_initcall(neigh_init); 3898
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.