1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ 3 4 #include <linux/bpf.h> 5 #include <linux/filter.h> 6 #include <linux/errno.h> 7 #include <linux/file.h> 8 #include <linux/net.h> 9 #include <linux/workqueue.h> 10 #include <linux/skmsg.h> 11 #include <linux/list.h> 12 #include <linux/jhash.h> 13 #include <linux/sock_diag.h> 14 #include <net/udp.h> 15 16 struct bpf_stab { 17 struct bpf_map map; 18 struct sock **sks; 19 struct sk_psock_progs progs; 20 raw_spinlock_t lock; 21 }; 22 23 #define SOCK_CREATE_FLAG_MASK \ 24 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) 25 26 static struct bpf_map *sock_map_alloc(union bpf_attr *attr) 27 { 28 struct bpf_stab *stab; 29 u64 cost; 30 int err; 31 32 if (!capable(CAP_NET_ADMIN)) 33 return ERR_PTR(-EPERM); 34 if (attr->max_entries == 0 || 35 attr->key_size != 4 || 36 (attr->value_size != sizeof(u32) && 37 attr->value_size != sizeof(u64)) || 38 attr->map_flags & ~SOCK_CREATE_FLAG_MASK) 39 return ERR_PTR(-EINVAL); 40 41 stab = kzalloc(sizeof(*stab), GFP_USER); 42 if (!stab) 43 return ERR_PTR(-ENOMEM); 44 45 bpf_map_init_from_attr(&stab->map, attr); 46 raw_spin_lock_init(&stab->lock); 47 48 /* Make sure page count doesn't overflow. */ 49 cost = (u64) stab->map.max_entries * sizeof(struct sock *); 50 err = bpf_map_charge_init(&stab->map.memory, cost); 51 if (err) 52 goto free_stab; 53 54 stab->sks = bpf_map_area_alloc(stab->map.max_entries * 55 sizeof(struct sock *), 56 stab->map.numa_node); 57 if (stab->sks) 58 return &stab->map; 59 err = -ENOMEM; 60 bpf_map_charge_finish(&stab->map.memory); 61 free_stab: 62 kfree(stab); 63 return ERR_PTR(err); 64 } 65 66 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog) 67 { 68 u32 ufd = attr->target_fd; 69 struct bpf_map *map; 70 struct fd f; 71 int ret; 72 73 if (attr->attach_flags || attr->replace_bpf_fd) 74 return -EINVAL; 75 76 f = fdget(ufd); 77 map = __bpf_map_get(f); 78 if (IS_ERR(map)) 79 return PTR_ERR(map); 80 ret = sock_map_prog_update(map, prog, NULL, attr->attach_type); 81 fdput(f); 82 return ret; 83 } 84 85 int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) 86 { 87 u32 ufd = attr->target_fd; 88 struct bpf_prog *prog; 89 struct bpf_map *map; 90 struct fd f; 91 int ret; 92 93 if (attr->attach_flags || attr->replace_bpf_fd) 94 return -EINVAL; 95 96 f = fdget(ufd); 97 map = __bpf_map_get(f); 98 if (IS_ERR(map)) 99 return PTR_ERR(map); 100 101 prog = bpf_prog_get(attr->attach_bpf_fd); 102 if (IS_ERR(prog)) { 103 ret = PTR_ERR(prog); 104 goto put_map; 105 } 106 107 if (prog->type != ptype) { 108 ret = -EINVAL; 109 goto put_prog; 110 } 111 112 ret = sock_map_prog_update(map, NULL, prog, attr->attach_type); 113 put_prog: 114 bpf_prog_put(prog); 115 put_map: 116 fdput(f); 117 return ret; 118 } 119 120 static void sock_map_sk_acquire(struct sock *sk) 121 __acquires(&sk->sk_lock.slock) 122 { 123 lock_sock(sk); 124 preempt_disable(); 125 rcu_read_lock(); 126 } 127 128 static void sock_map_sk_release(struct sock *sk) 129 __releases(&sk->sk_lock.slock) 130 { 131 rcu_read_unlock(); 132 preempt_enable(); 133 release_sock(sk); 134 } 135 136 static void sock_map_add_link(struct sk_psock *psock, 137 struct sk_psock_link *link, 138 struct bpf_map *map, void *link_raw) 139 { 140 link->link_raw = link_raw; 141 link->map = map; 142 spin_lock_bh(&psock->link_lock); 143 list_add_tail(&link->list, &psock->link); 144 spin_unlock_bh(&psock->link_lock); 145 } 146 147 static void sock_map_del_link(struct sock *sk, 148 struct sk_psock *psock, void *link_raw) 149 { 150 struct sk_psock_link *link, *tmp; 151 bool strp_stop = false; 152 153 spin_lock_bh(&psock->link_lock); 154 list_for_each_entry_safe(link, tmp, &psock->link, list) { 155 if (link->link_raw == link_raw) { 156 struct bpf_map *map = link->map; 157 struct bpf_stab *stab = container_of(map, struct bpf_stab, 158 map); 159 if (psock->parser.enabled && stab->progs.skb_parser) 160 strp_stop = true; 161 list_del(&link->list); 162 sk_psock_free_link(link); 163 } 164 } 165 spin_unlock_bh(&psock->link_lock); 166 if (strp_stop) { 167 write_lock_bh(&sk->sk_callback_lock); 168 sk_psock_stop_strp(sk, psock); 169 write_unlock_bh(&sk->sk_callback_lock); 170 } 171 } 172 173 static void sock_map_unref(struct sock *sk, void *link_raw) 174 { 175 struct sk_psock *psock = sk_psock(sk); 176 177 if (likely(psock)) { 178 sock_map_del_link(sk, psock, link_raw); 179 sk_psock_put(sk, psock); 180 } 181 } 182 183 static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock) 184 { 185 struct proto *prot; 186 187 sock_owned_by_me(sk); 188 189 switch (sk->sk_type) { 190 case SOCK_STREAM: 191 prot = tcp_bpf_get_proto(sk, psock); 192 break; 193 194 case SOCK_DGRAM: 195 prot = udp_bpf_get_proto(sk, psock); 196 break; 197 198 default: 199 return -EINVAL; 200 } 201 202 if (IS_ERR(prot)) 203 return PTR_ERR(prot); 204 205 sk_psock_update_proto(sk, psock, prot); 206 return 0; 207 } 208 209 static struct sk_psock *sock_map_psock_get_checked(struct sock *sk) 210 { 211 struct sk_psock *psock; 212 213 rcu_read_lock(); 214 psock = sk_psock(sk); 215 if (psock) { 216 if (sk->sk_prot->close != sock_map_close) { 217 psock = ERR_PTR(-EBUSY); 218 goto out; 219 } 220 221 if (!refcount_inc_not_zero(&psock->refcnt)) 222 psock = ERR_PTR(-EBUSY); 223 } 224 out: 225 rcu_read_unlock(); 226 return psock; 227 } 228 229 static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs, 230 struct sock *sk) 231 { 232 struct bpf_prog *msg_parser, *skb_parser, *skb_verdict; 233 struct sk_psock *psock; 234 bool skb_progs; 235 int ret; 236 237 skb_verdict = READ_ONCE(progs->skb_verdict); 238 skb_parser = READ_ONCE(progs->skb_parser); 239 skb_progs = skb_parser && skb_verdict; 240 if (skb_progs) { 241 skb_verdict = bpf_prog_inc_not_zero(skb_verdict); 242 if (IS_ERR(skb_verdict)) 243 return PTR_ERR(skb_verdict); 244 skb_parser = bpf_prog_inc_not_zero(skb_parser); 245 if (IS_ERR(skb_parser)) { 246 bpf_prog_put(skb_verdict); 247 return PTR_ERR(skb_parser); 248 } 249 } 250 251 msg_parser = READ_ONCE(progs->msg_parser); 252 if (msg_parser) { 253 msg_parser = bpf_prog_inc_not_zero(msg_parser); 254 if (IS_ERR(msg_parser)) { 255 ret = PTR_ERR(msg_parser); 256 goto out; 257 } 258 } 259 260 psock = sock_map_psock_get_checked(sk); 261 if (IS_ERR(psock)) { 262 ret = PTR_ERR(psock); 263 goto out_progs; 264 } 265 266 if (psock) { 267 if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) || 268 (skb_progs && READ_ONCE(psock->progs.skb_parser))) { 269 sk_psock_put(sk, psock); 270 ret = -EBUSY; 271 goto out_progs; 272 } 273 } else { 274 psock = sk_psock_init(sk, map->numa_node); 275 if (!psock) { 276 ret = -ENOMEM; 277 goto out_progs; 278 } 279 } 280 281 if (msg_parser) 282 psock_set_prog(&psock->progs.msg_parser, msg_parser); 283 284 ret = sock_map_init_proto(sk, psock); 285 if (ret < 0) 286 goto out_drop; 287 288 write_lock_bh(&sk->sk_callback_lock); 289 if (skb_progs && !psock->parser.enabled) { 290 ret = sk_psock_init_strp(sk, psock); 291 if (ret) { 292 write_unlock_bh(&sk->sk_callback_lock); 293 goto out_drop; 294 } 295 psock_set_prog(&psock->progs.skb_verdict, skb_verdict); 296 psock_set_prog(&psock->progs.skb_parser, skb_parser); 297 sk_psock_start_strp(sk, psock); 298 } 299 write_unlock_bh(&sk->sk_callback_lock); 300 return 0; 301 out_drop: 302 sk_psock_put(sk, psock); 303 out_progs: 304 if (msg_parser) 305 bpf_prog_put(msg_parser); 306 out: 307 if (skb_progs) { 308 bpf_prog_put(skb_verdict); 309 bpf_prog_put(skb_parser); 310 } 311 return ret; 312 } 313 314 static int sock_map_link_no_progs(struct bpf_map *map, struct sock *sk) 315 { 316 struct sk_psock *psock; 317 int ret; 318 319 psock = sock_map_psock_get_checked(sk); 320 if (IS_ERR(psock)) 321 return PTR_ERR(psock); 322 323 if (!psock) { 324 psock = sk_psock_init(sk, map->numa_node); 325 if (!psock) 326 return -ENOMEM; 327 } 328 329 ret = sock_map_init_proto(sk, psock); 330 if (ret < 0) 331 sk_psock_put(sk, psock); 332 return ret; 333 } 334 335 static void sock_map_free(struct bpf_map *map) 336 { 337 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 338 int i; 339 340 /* After the sync no updates or deletes will be in-flight so it 341 * is safe to walk map and remove entries without risking a race 342 * in EEXIST update case. 343 */ 344 synchronize_rcu(); 345 for (i = 0; i < stab->map.max_entries; i++) { 346 struct sock **psk = &stab->sks[i]; 347 struct sock *sk; 348 349 sk = xchg(psk, NULL); 350 if (sk) { 351 lock_sock(sk); 352 rcu_read_lock(); 353 sock_map_unref(sk, psk); 354 rcu_read_unlock(); 355 release_sock(sk); 356 } 357 } 358 359 /* wait for psock readers accessing its map link */ 360 synchronize_rcu(); 361 362 bpf_map_area_free(stab->sks); 363 kfree(stab); 364 } 365 366 static void sock_map_release_progs(struct bpf_map *map) 367 { 368 psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs); 369 } 370 371 static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) 372 { 373 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 374 375 WARN_ON_ONCE(!rcu_read_lock_held()); 376 377 if (unlikely(key >= map->max_entries)) 378 return NULL; 379 return READ_ONCE(stab->sks[key]); 380 } 381 382 static void *sock_map_lookup(struct bpf_map *map, void *key) 383 { 384 return __sock_map_lookup_elem(map, *(u32 *)key); 385 } 386 387 static void *sock_map_lookup_sys(struct bpf_map *map, void *key) 388 { 389 struct sock *sk; 390 391 if (map->value_size != sizeof(u64)) 392 return ERR_PTR(-ENOSPC); 393 394 sk = __sock_map_lookup_elem(map, *(u32 *)key); 395 if (!sk) 396 return ERR_PTR(-ENOENT); 397 398 sock_gen_cookie(sk); 399 return &sk->sk_cookie; 400 } 401 402 static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test, 403 struct sock **psk) 404 { 405 struct sock *sk; 406 int err = 0; 407 408 raw_spin_lock_bh(&stab->lock); 409 sk = *psk; 410 if (!sk_test || sk_test == sk) 411 sk = xchg(psk, NULL); 412 413 if (likely(sk)) 414 sock_map_unref(sk, psk); 415 else 416 err = -EINVAL; 417 418 raw_spin_unlock_bh(&stab->lock); 419 return err; 420 } 421 422 static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk, 423 void *link_raw) 424 { 425 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 426 427 __sock_map_delete(stab, sk, link_raw); 428 } 429 430 static int sock_map_delete_elem(struct bpf_map *map, void *key) 431 { 432 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 433 u32 i = *(u32 *)key; 434 struct sock **psk; 435 436 if (unlikely(i >= map->max_entries)) 437 return -EINVAL; 438 439 psk = &stab->sks[i]; 440 return __sock_map_delete(stab, NULL, psk); 441 } 442 443 static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next) 444 { 445 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 446 u32 i = key ? *(u32 *)key : U32_MAX; 447 u32 *key_next = next; 448 449 if (i == stab->map.max_entries - 1) 450 return -ENOENT; 451 if (i >= stab->map.max_entries) 452 *key_next = 0; 453 else 454 *key_next = i + 1; 455 return 0; 456 } 457 458 static bool sock_map_redirect_allowed(const struct sock *sk); 459 460 static int sock_map_update_common(struct bpf_map *map, u32 idx, 461 struct sock *sk, u64 flags) 462 { 463 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 464 struct sk_psock_link *link; 465 struct sk_psock *psock; 466 struct sock *osk; 467 int ret; 468 469 WARN_ON_ONCE(!rcu_read_lock_held()); 470 if (unlikely(flags > BPF_EXIST)) 471 return -EINVAL; 472 if (unlikely(idx >= map->max_entries)) 473 return -E2BIG; 474 if (inet_csk_has_ulp(sk)) 475 return -EINVAL; 476 477 link = sk_psock_init_link(); 478 if (!link) 479 return -ENOMEM; 480 481 /* Only sockets we can redirect into/from in BPF need to hold 482 * refs to parser/verdict progs and have their sk_data_ready 483 * and sk_write_space callbacks overridden. 484 */ 485 if (sock_map_redirect_allowed(sk)) 486 ret = sock_map_link(map, &stab->progs, sk); 487 else 488 ret = sock_map_link_no_progs(map, sk); 489 if (ret < 0) 490 goto out_free; 491 492 psock = sk_psock(sk); 493 WARN_ON_ONCE(!psock); 494 495 raw_spin_lock_bh(&stab->lock); 496 osk = stab->sks[idx]; 497 if (osk && flags == BPF_NOEXIST) { 498 ret = -EEXIST; 499 goto out_unlock; 500 } else if (!osk && flags == BPF_EXIST) { 501 ret = -ENOENT; 502 goto out_unlock; 503 } 504 505 sock_map_add_link(psock, link, map, &stab->sks[idx]); 506 stab->sks[idx] = sk; 507 if (osk) 508 sock_map_unref(osk, &stab->sks[idx]); 509 raw_spin_unlock_bh(&stab->lock); 510 return 0; 511 out_unlock: 512 raw_spin_unlock_bh(&stab->lock); 513 if (psock) 514 sk_psock_put(sk, psock); 515 out_free: 516 sk_psock_free_link(link); 517 return ret; 518 } 519 520 static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops) 521 { 522 return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB || 523 ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB || 524 ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB; 525 } 526 527 static bool sk_is_tcp(const struct sock *sk) 528 { 529 return sk->sk_type == SOCK_STREAM && 530 sk->sk_protocol == IPPROTO_TCP; 531 } 532 533 static bool sk_is_udp(const struct sock *sk) 534 { 535 return sk->sk_type == SOCK_DGRAM && 536 sk->sk_protocol == IPPROTO_UDP; 537 } 538 539 static bool sock_map_redirect_allowed(const struct sock *sk) 540 { 541 return sk_is_tcp(sk) && sk->sk_state != TCP_LISTEN; 542 } 543 544 static bool sock_map_sk_is_suitable(const struct sock *sk) 545 { 546 return sk_is_tcp(sk) || sk_is_udp(sk); 547 } 548 549 static bool sock_map_sk_state_allowed(const struct sock *sk) 550 { 551 if (sk_is_tcp(sk)) 552 return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN); 553 else if (sk_is_udp(sk)) 554 return sk_hashed(sk); 555 556 return false; 557 } 558 559 static int sock_map_update_elem(struct bpf_map *map, void *key, 560 void *value, u64 flags) 561 { 562 u32 idx = *(u32 *)key; 563 struct socket *sock; 564 struct sock *sk; 565 int ret; 566 u64 ufd; 567 568 if (map->value_size == sizeof(u64)) 569 ufd = *(u64 *)value; 570 else 571 ufd = *(u32 *)value; 572 if (ufd > S32_MAX) 573 return -EINVAL; 574 575 sock = sockfd_lookup(ufd, &ret); 576 if (!sock) 577 return ret; 578 sk = sock->sk; 579 if (!sk) { 580 ret = -EINVAL; 581 goto out; 582 } 583 if (!sock_map_sk_is_suitable(sk)) { 584 ret = -EOPNOTSUPP; 585 goto out; 586 } 587 588 sock_map_sk_acquire(sk); 589 if (!sock_map_sk_state_allowed(sk)) 590 ret = -EOPNOTSUPP; 591 else 592 ret = sock_map_update_common(map, idx, sk, flags); 593 sock_map_sk_release(sk); 594 out: 595 fput(sock->file); 596 return ret; 597 } 598 599 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops, 600 struct bpf_map *, map, void *, key, u64, flags) 601 { 602 WARN_ON_ONCE(!rcu_read_lock_held()); 603 604 if (likely(sock_map_sk_is_suitable(sops->sk) && 605 sock_map_op_okay(sops))) 606 return sock_map_update_common(map, *(u32 *)key, sops->sk, 607 flags); 608 return -EOPNOTSUPP; 609 } 610 611 const struct bpf_func_proto bpf_sock_map_update_proto = { 612 .func = bpf_sock_map_update, 613 .gpl_only = false, 614 .pkt_access = true, 615 .ret_type = RET_INTEGER, 616 .arg1_type = ARG_PTR_TO_CTX, 617 .arg2_type = ARG_CONST_MAP_PTR, 618 .arg3_type = ARG_PTR_TO_MAP_KEY, 619 .arg4_type = ARG_ANYTHING, 620 }; 621 622 BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb, 623 struct bpf_map *, map, u32, key, u64, flags) 624 { 625 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 626 struct sock *sk; 627 628 if (unlikely(flags & ~(BPF_F_INGRESS))) 629 return SK_DROP; 630 631 sk = __sock_map_lookup_elem(map, key); 632 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 633 return SK_DROP; 634 635 tcb->bpf.flags = flags; 636 tcb->bpf.sk_redir = sk; 637 return SK_PASS; 638 } 639 640 const struct bpf_func_proto bpf_sk_redirect_map_proto = { 641 .func = bpf_sk_redirect_map, 642 .gpl_only = false, 643 .ret_type = RET_INTEGER, 644 .arg1_type = ARG_PTR_TO_CTX, 645 .arg2_type = ARG_CONST_MAP_PTR, 646 .arg3_type = ARG_ANYTHING, 647 .arg4_type = ARG_ANYTHING, 648 }; 649 650 BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg, 651 struct bpf_map *, map, u32, key, u64, flags) 652 { 653 struct sock *sk; 654 655 if (unlikely(flags & ~(BPF_F_INGRESS))) 656 return SK_DROP; 657 658 sk = __sock_map_lookup_elem(map, key); 659 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 660 return SK_DROP; 661 662 msg->flags = flags; 663 msg->sk_redir = sk; 664 return SK_PASS; 665 } 666 667 const struct bpf_func_proto bpf_msg_redirect_map_proto = { 668 .func = bpf_msg_redirect_map, 669 .gpl_only = false, 670 .ret_type = RET_INTEGER, 671 .arg1_type = ARG_PTR_TO_CTX, 672 .arg2_type = ARG_CONST_MAP_PTR, 673 .arg3_type = ARG_ANYTHING, 674 .arg4_type = ARG_ANYTHING, 675 }; 676 677 const struct bpf_map_ops sock_map_ops = { 678 .map_alloc = sock_map_alloc, 679 .map_free = sock_map_free, 680 .map_get_next_key = sock_map_get_next_key, 681 .map_lookup_elem_sys_only = sock_map_lookup_sys, 682 .map_update_elem = sock_map_update_elem, 683 .map_delete_elem = sock_map_delete_elem, 684 .map_lookup_elem = sock_map_lookup, 685 .map_release_uref = sock_map_release_progs, 686 .map_check_btf = map_check_no_btf, 687 }; 688 689 struct bpf_htab_elem { 690 struct rcu_head rcu; 691 u32 hash; 692 struct sock *sk; 693 struct hlist_node node; 694 u8 key[]; 695 }; 696 697 struct bpf_htab_bucket { 698 struct hlist_head head; 699 raw_spinlock_t lock; 700 }; 701 702 struct bpf_htab { 703 struct bpf_map map; 704 struct bpf_htab_bucket *buckets; 705 u32 buckets_num; 706 u32 elem_size; 707 struct sk_psock_progs progs; 708 atomic_t count; 709 }; 710 711 static inline u32 sock_hash_bucket_hash(const void *key, u32 len) 712 { 713 return jhash(key, len, 0); 714 } 715 716 static struct bpf_htab_bucket *sock_hash_select_bucket(struct bpf_htab *htab, 717 u32 hash) 718 { 719 return &htab->buckets[hash & (htab->buckets_num - 1)]; 720 } 721 722 static struct bpf_htab_elem * 723 sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key, 724 u32 key_size) 725 { 726 struct bpf_htab_elem *elem; 727 728 hlist_for_each_entry_rcu(elem, head, node) { 729 if (elem->hash == hash && 730 !memcmp(&elem->key, key, key_size)) 731 return elem; 732 } 733 734 return NULL; 735 } 736 737 static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key) 738 { 739 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 740 u32 key_size = map->key_size, hash; 741 struct bpf_htab_bucket *bucket; 742 struct bpf_htab_elem *elem; 743 744 WARN_ON_ONCE(!rcu_read_lock_held()); 745 746 hash = sock_hash_bucket_hash(key, key_size); 747 bucket = sock_hash_select_bucket(htab, hash); 748 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); 749 750 return elem ? elem->sk : NULL; 751 } 752 753 static void sock_hash_free_elem(struct bpf_htab *htab, 754 struct bpf_htab_elem *elem) 755 { 756 atomic_dec(&htab->count); 757 kfree_rcu(elem, rcu); 758 } 759 760 static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk, 761 void *link_raw) 762 { 763 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 764 struct bpf_htab_elem *elem_probe, *elem = link_raw; 765 struct bpf_htab_bucket *bucket; 766 767 WARN_ON_ONCE(!rcu_read_lock_held()); 768 bucket = sock_hash_select_bucket(htab, elem->hash); 769 770 /* elem may be deleted in parallel from the map, but access here 771 * is okay since it's going away only after RCU grace period. 772 * However, we need to check whether it's still present. 773 */ 774 raw_spin_lock_bh(&bucket->lock); 775 elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash, 776 elem->key, map->key_size); 777 if (elem_probe && elem_probe == elem) { 778 hlist_del_rcu(&elem->node); 779 sock_map_unref(elem->sk, elem); 780 sock_hash_free_elem(htab, elem); 781 } 782 raw_spin_unlock_bh(&bucket->lock); 783 } 784 785 static int sock_hash_delete_elem(struct bpf_map *map, void *key) 786 { 787 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 788 u32 hash, key_size = map->key_size; 789 struct bpf_htab_bucket *bucket; 790 struct bpf_htab_elem *elem; 791 int ret = -ENOENT; 792 793 hash = sock_hash_bucket_hash(key, key_size); 794 bucket = sock_hash_select_bucket(htab, hash); 795 796 raw_spin_lock_bh(&bucket->lock); 797 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); 798 if (elem) { 799 hlist_del_rcu(&elem->node); 800 sock_map_unref(elem->sk, elem); 801 sock_hash_free_elem(htab, elem); 802 ret = 0; 803 } 804 raw_spin_unlock_bh(&bucket->lock); 805 return ret; 806 } 807 808 static struct bpf_htab_elem *sock_hash_alloc_elem(struct bpf_htab *htab, 809 void *key, u32 key_size, 810 u32 hash, struct sock *sk, 811 struct bpf_htab_elem *old) 812 { 813 struct bpf_htab_elem *new; 814 815 if (atomic_inc_return(&htab->count) > htab->map.max_entries) { 816 if (!old) { 817 atomic_dec(&htab->count); 818 return ERR_PTR(-E2BIG); 819 } 820 } 821 822 new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, 823 htab->map.numa_node); 824 if (!new) { 825 atomic_dec(&htab->count); 826 return ERR_PTR(-ENOMEM); 827 } 828 memcpy(new->key, key, key_size); 829 new->sk = sk; 830 new->hash = hash; 831 return new; 832 } 833 834 static int sock_hash_update_common(struct bpf_map *map, void *key, 835 struct sock *sk, u64 flags) 836 { 837 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 838 u32 key_size = map->key_size, hash; 839 struct bpf_htab_elem *elem, *elem_new; 840 struct bpf_htab_bucket *bucket; 841 struct sk_psock_link *link; 842 struct sk_psock *psock; 843 int ret; 844 845 WARN_ON_ONCE(!rcu_read_lock_held()); 846 if (unlikely(flags > BPF_EXIST)) 847 return -EINVAL; 848 if (inet_csk_has_ulp(sk)) 849 return -EINVAL; 850 851 link = sk_psock_init_link(); 852 if (!link) 853 return -ENOMEM; 854 855 /* Only sockets we can redirect into/from in BPF need to hold 856 * refs to parser/verdict progs and have their sk_data_ready 857 * and sk_write_space callbacks overridden. 858 */ 859 if (sock_map_redirect_allowed(sk)) 860 ret = sock_map_link(map, &htab->progs, sk); 861 else 862 ret = sock_map_link_no_progs(map, sk); 863 if (ret < 0) 864 goto out_free; 865 866 psock = sk_psock(sk); 867 WARN_ON_ONCE(!psock); 868 869 hash = sock_hash_bucket_hash(key, key_size); 870 bucket = sock_hash_select_bucket(htab, hash); 871 872 raw_spin_lock_bh(&bucket->lock); 873 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); 874 if (elem && flags == BPF_NOEXIST) { 875 ret = -EEXIST; 876 goto out_unlock; 877 } else if (!elem && flags == BPF_EXIST) { 878 ret = -ENOENT; 879 goto out_unlock; 880 } 881 882 elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem); 883 if (IS_ERR(elem_new)) { 884 ret = PTR_ERR(elem_new); 885 goto out_unlock; 886 } 887 888 sock_map_add_link(psock, link, map, elem_new); 889 /* Add new element to the head of the list, so that 890 * concurrent search will find it before old elem. 891 */ 892 hlist_add_head_rcu(&elem_new->node, &bucket->head); 893 if (elem) { 894 hlist_del_rcu(&elem->node); 895 sock_map_unref(elem->sk, elem); 896 sock_hash_free_elem(htab, elem); 897 } 898 raw_spin_unlock_bh(&bucket->lock); 899 return 0; 900 out_unlock: 901 raw_spin_unlock_bh(&bucket->lock); 902 sk_psock_put(sk, psock); 903 out_free: 904 sk_psock_free_link(link); 905 return ret; 906 } 907 908 static int sock_hash_update_elem(struct bpf_map *map, void *key, 909 void *value, u64 flags) 910 { 911 struct socket *sock; 912 struct sock *sk; 913 int ret; 914 u64 ufd; 915 916 if (map->value_size == sizeof(u64)) 917 ufd = *(u64 *)value; 918 else 919 ufd = *(u32 *)value; 920 if (ufd > S32_MAX) 921 return -EINVAL; 922 923 sock = sockfd_lookup(ufd, &ret); 924 if (!sock) 925 return ret; 926 sk = sock->sk; 927 if (!sk) { 928 ret = -EINVAL; 929 goto out; 930 } 931 if (!sock_map_sk_is_suitable(sk)) { 932 ret = -EOPNOTSUPP; 933 goto out; 934 } 935 936 sock_map_sk_acquire(sk); 937 if (!sock_map_sk_state_allowed(sk)) 938 ret = -EOPNOTSUPP; 939 else 940 ret = sock_hash_update_common(map, key, sk, flags); 941 sock_map_sk_release(sk); 942 out: 943 fput(sock->file); 944 return ret; 945 } 946 947 static int sock_hash_get_next_key(struct bpf_map *map, void *key, 948 void *key_next) 949 { 950 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 951 struct bpf_htab_elem *elem, *elem_next; 952 u32 hash, key_size = map->key_size; 953 struct hlist_head *head; 954 int i = 0; 955 956 if (!key) 957 goto find_first_elem; 958 hash = sock_hash_bucket_hash(key, key_size); 959 head = &sock_hash_select_bucket(htab, hash)->head; 960 elem = sock_hash_lookup_elem_raw(head, hash, key, key_size); 961 if (!elem) 962 goto find_first_elem; 963 964 elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&elem->node)), 965 struct bpf_htab_elem, node); 966 if (elem_next) { 967 memcpy(key_next, elem_next->key, key_size); 968 return 0; 969 } 970 971 i = hash & (htab->buckets_num - 1); 972 i++; 973 find_first_elem: 974 for (; i < htab->buckets_num; i++) { 975 head = &sock_hash_select_bucket(htab, i)->head; 976 elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), 977 struct bpf_htab_elem, node); 978 if (elem_next) { 979 memcpy(key_next, elem_next->key, key_size); 980 return 0; 981 } 982 } 983 984 return -ENOENT; 985 } 986 987 static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) 988 { 989 struct bpf_htab *htab; 990 int i, err; 991 u64 cost; 992 993 if (!capable(CAP_NET_ADMIN)) 994 return ERR_PTR(-EPERM); 995 if (attr->max_entries == 0 || 996 attr->key_size == 0 || 997 (attr->value_size != sizeof(u32) && 998 attr->value_size != sizeof(u64)) || 999 attr->map_flags & ~SOCK_CREATE_FLAG_MASK) 1000 return ERR_PTR(-EINVAL); 1001 if (attr->key_size > MAX_BPF_STACK) 1002 return ERR_PTR(-E2BIG); 1003 1004 htab = kzalloc(sizeof(*htab), GFP_USER); 1005 if (!htab) 1006 return ERR_PTR(-ENOMEM); 1007 1008 bpf_map_init_from_attr(&htab->map, attr); 1009 1010 htab->buckets_num = roundup_pow_of_two(htab->map.max_entries); 1011 htab->elem_size = sizeof(struct bpf_htab_elem) + 1012 round_up(htab->map.key_size, 8); 1013 if (htab->buckets_num == 0 || 1014 htab->buckets_num > U32_MAX / sizeof(struct bpf_htab_bucket)) { 1015 err = -EINVAL; 1016 goto free_htab; 1017 } 1018 1019 cost = (u64) htab->buckets_num * sizeof(struct bpf_htab_bucket) + 1020 (u64) htab->elem_size * htab->map.max_entries; 1021 if (cost >= U32_MAX - PAGE_SIZE) { 1022 err = -EINVAL; 1023 goto free_htab; 1024 } 1025 err = bpf_map_charge_init(&htab->map.memory, cost); 1026 if (err) 1027 goto free_htab; 1028 1029 htab->buckets = bpf_map_area_alloc(htab->buckets_num * 1030 sizeof(struct bpf_htab_bucket), 1031 htab->map.numa_node); 1032 if (!htab->buckets) { 1033 bpf_map_charge_finish(&htab->map.memory); 1034 err = -ENOMEM; 1035 goto free_htab; 1036 } 1037 1038 for (i = 0; i < htab->buckets_num; i++) { 1039 INIT_HLIST_HEAD(&htab->buckets[i].head); 1040 raw_spin_lock_init(&htab->buckets[i].lock); 1041 } 1042 1043 return &htab->map; 1044 free_htab: 1045 kfree(htab); 1046 return ERR_PTR(err); 1047 } 1048 1049 static void sock_hash_free(struct bpf_map *map) 1050 { 1051 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1052 struct bpf_htab_bucket *bucket; 1053 struct hlist_head unlink_list; 1054 struct bpf_htab_elem *elem; 1055 struct hlist_node *node; 1056 int i; 1057 1058 /* After the sync no updates or deletes will be in-flight so it 1059 * is safe to walk map and remove entries without risking a race 1060 * in EEXIST update case. 1061 */ 1062 synchronize_rcu(); 1063 for (i = 0; i < htab->buckets_num; i++) { 1064 bucket = sock_hash_select_bucket(htab, i); 1065 1066 /* We are racing with sock_hash_delete_from_link to 1067 * enter the spin-lock critical section. Every socket on 1068 * the list is still linked to sockhash. Since link 1069 * exists, psock exists and holds a ref to socket. That 1070 * lets us to grab a socket ref too. 1071 */ 1072 raw_spin_lock_bh(&bucket->lock); 1073 hlist_for_each_entry(elem, &bucket->head, node) 1074 sock_hold(elem->sk); 1075 hlist_move_list(&bucket->head, &unlink_list); 1076 raw_spin_unlock_bh(&bucket->lock); 1077 1078 /* Process removed entries out of atomic context to 1079 * block for socket lock before deleting the psock's 1080 * link to sockhash. 1081 */ 1082 hlist_for_each_entry_safe(elem, node, &unlink_list, node) { 1083 hlist_del(&elem->node); 1084 lock_sock(elem->sk); 1085 rcu_read_lock(); 1086 sock_map_unref(elem->sk, elem); 1087 rcu_read_unlock(); 1088 release_sock(elem->sk); 1089 sock_put(elem->sk); 1090 sock_hash_free_elem(htab, elem); 1091 } 1092 } 1093 1094 /* wait for psock readers accessing its map link */ 1095 synchronize_rcu(); 1096 1097 bpf_map_area_free(htab->buckets); 1098 kfree(htab); 1099 } 1100 1101 static void *sock_hash_lookup_sys(struct bpf_map *map, void *key) 1102 { 1103 struct sock *sk; 1104 1105 if (map->value_size != sizeof(u64)) 1106 return ERR_PTR(-ENOSPC); 1107 1108 sk = __sock_hash_lookup_elem(map, key); 1109 if (!sk) 1110 return ERR_PTR(-ENOENT); 1111 1112 sock_gen_cookie(sk); 1113 return &sk->sk_cookie; 1114 } 1115 1116 static void *sock_hash_lookup(struct bpf_map *map, void *key) 1117 { 1118 return __sock_hash_lookup_elem(map, key); 1119 } 1120 1121 static void sock_hash_release_progs(struct bpf_map *map) 1122 { 1123 psock_progs_drop(&container_of(map, struct bpf_htab, map)->progs); 1124 } 1125 1126 BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops, 1127 struct bpf_map *, map, void *, key, u64, flags) 1128 { 1129 WARN_ON_ONCE(!rcu_read_lock_held()); 1130 1131 if (likely(sock_map_sk_is_suitable(sops->sk) && 1132 sock_map_op_okay(sops))) 1133 return sock_hash_update_common(map, key, sops->sk, flags); 1134 return -EOPNOTSUPP; 1135 } 1136 1137 const struct bpf_func_proto bpf_sock_hash_update_proto = { 1138 .func = bpf_sock_hash_update, 1139 .gpl_only = false, 1140 .pkt_access = true, 1141 .ret_type = RET_INTEGER, 1142 .arg1_type = ARG_PTR_TO_CTX, 1143 .arg2_type = ARG_CONST_MAP_PTR, 1144 .arg3_type = ARG_PTR_TO_MAP_KEY, 1145 .arg4_type = ARG_ANYTHING, 1146 }; 1147 1148 BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb, 1149 struct bpf_map *, map, void *, key, u64, flags) 1150 { 1151 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 1152 struct sock *sk; 1153 1154 if (unlikely(flags & ~(BPF_F_INGRESS))) 1155 return SK_DROP; 1156 1157 sk = __sock_hash_lookup_elem(map, key); 1158 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 1159 return SK_DROP; 1160 1161 tcb->bpf.flags = flags; 1162 tcb->bpf.sk_redir = sk; 1163 return SK_PASS; 1164 } 1165 1166 const struct bpf_func_proto bpf_sk_redirect_hash_proto = { 1167 .func = bpf_sk_redirect_hash, 1168 .gpl_only = false, 1169 .ret_type = RET_INTEGER, 1170 .arg1_type = ARG_PTR_TO_CTX, 1171 .arg2_type = ARG_CONST_MAP_PTR, 1172 .arg3_type = ARG_PTR_TO_MAP_KEY, 1173 .arg4_type = ARG_ANYTHING, 1174 }; 1175 1176 BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg, 1177 struct bpf_map *, map, void *, key, u64, flags) 1178 { 1179 struct sock *sk; 1180 1181 if (unlikely(flags & ~(BPF_F_INGRESS))) 1182 return SK_DROP; 1183 1184 sk = __sock_hash_lookup_elem(map, key); 1185 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 1186 return SK_DROP; 1187 1188 msg->flags = flags; 1189 msg->sk_redir = sk; 1190 return SK_PASS; 1191 } 1192 1193 const struct bpf_func_proto bpf_msg_redirect_hash_proto = { 1194 .func = bpf_msg_redirect_hash, 1195 .gpl_only = false, 1196 .ret_type = RET_INTEGER, 1197 .arg1_type = ARG_PTR_TO_CTX, 1198 .arg2_type = ARG_CONST_MAP_PTR, 1199 .arg3_type = ARG_PTR_TO_MAP_KEY, 1200 .arg4_type = ARG_ANYTHING, 1201 }; 1202 1203 const struct bpf_map_ops sock_hash_ops = { 1204 .map_alloc = sock_hash_alloc, 1205 .map_free = sock_hash_free, 1206 .map_get_next_key = sock_hash_get_next_key, 1207 .map_update_elem = sock_hash_update_elem, 1208 .map_delete_elem = sock_hash_delete_elem, 1209 .map_lookup_elem = sock_hash_lookup, 1210 .map_lookup_elem_sys_only = sock_hash_lookup_sys, 1211 .map_release_uref = sock_hash_release_progs, 1212 .map_check_btf = map_check_no_btf, 1213 }; 1214 1215 static struct sk_psock_progs *sock_map_progs(struct bpf_map *map) 1216 { 1217 switch (map->map_type) { 1218 case BPF_MAP_TYPE_SOCKMAP: 1219 return &container_of(map, struct bpf_stab, map)->progs; 1220 case BPF_MAP_TYPE_SOCKHASH: 1221 return &container_of(map, struct bpf_htab, map)->progs; 1222 default: 1223 break; 1224 } 1225 1226 return NULL; 1227 } 1228 1229 int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, 1230 struct bpf_prog *old, u32 which) 1231 { 1232 struct sk_psock_progs *progs = sock_map_progs(map); 1233 struct bpf_prog **pprog; 1234 1235 if (!progs) 1236 return -EOPNOTSUPP; 1237 1238 switch (which) { 1239 case BPF_SK_MSG_VERDICT: 1240 pprog = &progs->msg_parser; 1241 break; 1242 case BPF_SK_SKB_STREAM_PARSER: 1243 pprog = &progs->skb_parser; 1244 break; 1245 case BPF_SK_SKB_STREAM_VERDICT: 1246 pprog = &progs->skb_verdict; 1247 break; 1248 default: 1249 return -EOPNOTSUPP; 1250 } 1251 1252 if (old) 1253 return psock_replace_prog(pprog, prog, old); 1254 1255 psock_set_prog(pprog, prog); 1256 return 0; 1257 } 1258 1259 static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link) 1260 { 1261 switch (link->map->map_type) { 1262 case BPF_MAP_TYPE_SOCKMAP: 1263 return sock_map_delete_from_link(link->map, sk, 1264 link->link_raw); 1265 case BPF_MAP_TYPE_SOCKHASH: 1266 return sock_hash_delete_from_link(link->map, sk, 1267 link->link_raw); 1268 default: 1269 break; 1270 } 1271 } 1272 1273 static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock) 1274 { 1275 struct sk_psock_link *link; 1276 1277 while ((link = sk_psock_link_pop(psock))) { 1278 sock_map_unlink(sk, link); 1279 sk_psock_free_link(link); 1280 } 1281 } 1282 1283 void sock_map_unhash(struct sock *sk) 1284 { 1285 void (*saved_unhash)(struct sock *sk); 1286 struct sk_psock *psock; 1287 1288 rcu_read_lock(); 1289 psock = sk_psock(sk); 1290 if (unlikely(!psock)) { 1291 rcu_read_unlock(); 1292 if (sk->sk_prot->unhash) 1293 sk->sk_prot->unhash(sk); 1294 return; 1295 } 1296 1297 saved_unhash = psock->saved_unhash; 1298 sock_map_remove_links(sk, psock); 1299 rcu_read_unlock(); 1300 saved_unhash(sk); 1301 } 1302 1303 void sock_map_close(struct sock *sk, long timeout) 1304 { 1305 void (*saved_close)(struct sock *sk, long timeout); 1306 struct sk_psock *psock; 1307 1308 lock_sock(sk); 1309 rcu_read_lock(); 1310 psock = sk_psock(sk); 1311 if (unlikely(!psock)) { 1312 rcu_read_unlock(); 1313 release_sock(sk); 1314 return sk->sk_prot->close(sk, timeout); 1315 } 1316 1317 saved_close = psock->saved_close; 1318 sock_map_remove_links(sk, psock); 1319 rcu_read_unlock(); 1320 release_sock(sk); 1321 saved_close(sk, timeout); 1322 } 1323
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.