1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux Socket Filter - Kernel level socket filtering 4 * 5 * Based on the design of the Berkeley Packet Filter. The new 6 * internal format has been designed by PLUMgrid: 7 * 8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com 9 * 10 * Authors: 11 * 12 * Jay Schulist <jschlst@samba.org> 13 * Alexei Starovoitov <ast@plumgrid.com> 14 * Daniel Borkmann <dborkman@redhat.com> 15 * 16 * Andi Kleen - Fix a few bad bugs and races. 17 * Kris Katterjohn - Added many additional checks in bpf_check_classic() 18 */ 19 20 #include <linux/atomic.h> 21 #include <linux/module.h> 22 #include <linux/types.h> 23 #include <linux/mm.h> 24 #include <linux/fcntl.h> 25 #include <linux/socket.h> 26 #include <linux/sock_diag.h> 27 #include <linux/in.h> 28 #include <linux/inet.h> 29 #include <linux/netdevice.h> 30 #include <linux/if_packet.h> 31 #include <linux/if_arp.h> 32 #include <linux/gfp.h> 33 #include <net/inet_common.h> 34 #include <net/ip.h> 35 #include <net/protocol.h> 36 #include <net/netlink.h> 37 #include <linux/skbuff.h> 38 #include <linux/skmsg.h> 39 #include <net/sock.h> 40 #include <net/flow_dissector.h> 41 #include <linux/errno.h> 42 #include <linux/timer.h> 43 #include <linux/uaccess.h> 44 #include <asm/unaligned.h> 45 #include <linux/filter.h> 46 #include <linux/ratelimit.h> 47 #include <linux/seccomp.h> 48 #include <linux/if_vlan.h> 49 #include <linux/bpf.h> 50 #include <linux/btf.h> 51 #include <net/sch_generic.h> 52 #include <net/cls_cgroup.h> 53 #include <net/dst_metadata.h> 54 #include <net/dst.h> 55 #include <net/sock_reuseport.h> 56 #include <net/busy_poll.h> 57 #include <net/tcp.h> 58 #include <net/xfrm.h> 59 #include <net/udp.h> 60 #include <linux/bpf_trace.h> 61 #include <net/xdp_sock.h> 62 #include <linux/inetdevice.h> 63 #include <net/inet_hashtables.h> 64 #include <net/inet6_hashtables.h> 65 #include <net/ip_fib.h> 66 #include <net/nexthop.h> 67 #include <net/flow.h> 68 #include <net/arp.h> 69 #include <net/ipv6.h> 70 #include <net/net_namespace.h> 71 #include <linux/seg6_local.h> 72 #include <net/seg6.h> 73 #include <net/seg6_local.h> 74 #include <net/lwtunnel.h> 75 #include <net/ipv6_stubs.h> 76 #include <net/bpf_sk_storage.h> 77 #include <net/transp_v6.h> 78 #include <linux/btf_ids.h> 79 #include <net/tls.h> 80 #include <net/xdp.h> 81 #include <net/mptcp.h> 82 83 static const struct bpf_func_proto * 84 bpf_sk_base_func_proto(enum bpf_func_id func_id); 85 86 int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len) 87 { 88 if (in_compat_syscall()) { 89 struct compat_sock_fprog f32; 90 91 if (len != sizeof(f32)) 92 return -EINVAL; 93 if (copy_from_sockptr(&f32, src, sizeof(f32))) 94 return -EFAULT; 95 memset(dst, 0, sizeof(*dst)); 96 dst->len = f32.len; 97 dst->filter = compat_ptr(f32.filter); 98 } else { 99 if (len != sizeof(*dst)) 100 return -EINVAL; 101 if (copy_from_sockptr(dst, src, sizeof(*dst))) 102 return -EFAULT; 103 } 104 105 return 0; 106 } 107 EXPORT_SYMBOL_GPL(copy_bpf_fprog_from_user); 108 109 /** 110 * sk_filter_trim_cap - run a packet through a socket filter 111 * @sk: sock associated with &sk_buff 112 * @skb: buffer to filter 113 * @cap: limit on how short the eBPF program may trim the packet 114 * 115 * Run the eBPF program and then cut skb->data to correct size returned by 116 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller 117 * than pkt_len we keep whole skb->data. This is the socket level 118 * wrapper to bpf_prog_run. It returns 0 if the packet should 119 * be accepted or -EPERM if the packet should be tossed. 120 * 121 */ 122 int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap) 123 { 124 int err; 125 struct sk_filter *filter; 126 127 /* 128 * If the skb was allocated from pfmemalloc reserves, only 129 * allow SOCK_MEMALLOC sockets to use it as this socket is 130 * helping free memory 131 */ 132 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) { 133 NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP); 134 return -ENOMEM; 135 } 136 err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb); 137 if (err) 138 return err; 139 140 err = security_sock_rcv_skb(sk, skb); 141 if (err) 142 return err; 143 144 rcu_read_lock(); 145 filter = rcu_dereference(sk->sk_filter); 146 if (filter) { 147 struct sock *save_sk = skb->sk; 148 unsigned int pkt_len; 149 150 skb->sk = sk; 151 pkt_len = bpf_prog_run_save_cb(filter->prog, skb); 152 skb->sk = save_sk; 153 err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM; 154 } 155 rcu_read_unlock(); 156 157 return err; 158 } 159 EXPORT_SYMBOL(sk_filter_trim_cap); 160 161 BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb) 162 { 163 return skb_get_poff(skb); 164 } 165 166 BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x) 167 { 168 struct nlattr *nla; 169 170 if (skb_is_nonlinear(skb)) 171 return 0; 172 173 if (skb->len < sizeof(struct nlattr)) 174 return 0; 175 176 if (a > skb->len - sizeof(struct nlattr)) 177 return 0; 178 179 nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x); 180 if (nla) 181 return (void *) nla - (void *) skb->data; 182 183 return 0; 184 } 185 186 BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x) 187 { 188 struct nlattr *nla; 189 190 if (skb_is_nonlinear(skb)) 191 return 0; 192 193 if (skb->len < sizeof(struct nlattr)) 194 return 0; 195 196 if (a > skb->len - sizeof(struct nlattr)) 197 return 0; 198 199 nla = (struct nlattr *) &skb->data[a]; 200 if (nla->nla_len > skb->len - a) 201 return 0; 202 203 nla = nla_find_nested(nla, x); 204 if (nla) 205 return (void *) nla - (void *) skb->data; 206 207 return 0; 208 } 209 210 BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *, 211 data, int, headlen, int, offset) 212 { 213 u8 tmp, *ptr; 214 const int len = sizeof(tmp); 215 216 if (offset >= 0) { 217 if (headlen - offset >= len) 218 return *(u8 *)(data + offset); 219 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) 220 return tmp; 221 } else { 222 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); 223 if (likely(ptr)) 224 return *(u8 *)ptr; 225 } 226 227 return -EFAULT; 228 } 229 230 BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb, 231 int, offset) 232 { 233 return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len, 234 offset); 235 } 236 237 BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *, 238 data, int, headlen, int, offset) 239 { 240 __be16 tmp, *ptr; 241 const int len = sizeof(tmp); 242 243 if (offset >= 0) { 244 if (headlen - offset >= len) 245 return get_unaligned_be16(data + offset); 246 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) 247 return be16_to_cpu(tmp); 248 } else { 249 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); 250 if (likely(ptr)) 251 return get_unaligned_be16(ptr); 252 } 253 254 return -EFAULT; 255 } 256 257 BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb, 258 int, offset) 259 { 260 return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len, 261 offset); 262 } 263 264 BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *, 265 data, int, headlen, int, offset) 266 { 267 __be32 tmp, *ptr; 268 const int len = sizeof(tmp); 269 270 if (likely(offset >= 0)) { 271 if (headlen - offset >= len) 272 return get_unaligned_be32(data + offset); 273 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) 274 return be32_to_cpu(tmp); 275 } else { 276 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); 277 if (likely(ptr)) 278 return get_unaligned_be32(ptr); 279 } 280 281 return -EFAULT; 282 } 283 284 BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb, 285 int, offset) 286 { 287 return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len, 288 offset); 289 } 290 291 static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, 292 struct bpf_insn *insn_buf) 293 { 294 struct bpf_insn *insn = insn_buf; 295 296 switch (skb_field) { 297 case SKF_AD_MARK: 298 BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4); 299 300 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, 301 offsetof(struct sk_buff, mark)); 302 break; 303 304 case SKF_AD_PKTTYPE: 305 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET); 306 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX); 307 #ifdef __BIG_ENDIAN_BITFIELD 308 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5); 309 #endif 310 break; 311 312 case SKF_AD_QUEUE: 313 BUILD_BUG_ON(sizeof_field(struct sk_buff, queue_mapping) != 2); 314 315 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, 316 offsetof(struct sk_buff, queue_mapping)); 317 break; 318 319 case SKF_AD_VLAN_TAG: 320 BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2); 321 322 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */ 323 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, 324 offsetof(struct sk_buff, vlan_tci)); 325 break; 326 case SKF_AD_VLAN_TAG_PRESENT: 327 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET); 328 if (PKT_VLAN_PRESENT_BIT) 329 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT); 330 if (PKT_VLAN_PRESENT_BIT < 7) 331 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1); 332 break; 333 } 334 335 return insn - insn_buf; 336 } 337 338 static bool convert_bpf_extensions(struct sock_filter *fp, 339 struct bpf_insn **insnp) 340 { 341 struct bpf_insn *insn = *insnp; 342 u32 cnt; 343 344 switch (fp->k) { 345 case SKF_AD_OFF + SKF_AD_PROTOCOL: 346 BUILD_BUG_ON(sizeof_field(struct sk_buff, protocol) != 2); 347 348 /* A = *(u16 *) (CTX + offsetof(protocol)) */ 349 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, 350 offsetof(struct sk_buff, protocol)); 351 /* A = ntohs(A) [emitting a nop or swap16] */ 352 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); 353 break; 354 355 case SKF_AD_OFF + SKF_AD_PKTTYPE: 356 cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn); 357 insn += cnt - 1; 358 break; 359 360 case SKF_AD_OFF + SKF_AD_IFINDEX: 361 case SKF_AD_OFF + SKF_AD_HATYPE: 362 BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4); 363 BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2); 364 365 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), 366 BPF_REG_TMP, BPF_REG_CTX, 367 offsetof(struct sk_buff, dev)); 368 /* if (tmp != 0) goto pc + 1 */ 369 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1); 370 *insn++ = BPF_EXIT_INSN(); 371 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) 372 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP, 373 offsetof(struct net_device, ifindex)); 374 else 375 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP, 376 offsetof(struct net_device, type)); 377 break; 378 379 case SKF_AD_OFF + SKF_AD_MARK: 380 cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn); 381 insn += cnt - 1; 382 break; 383 384 case SKF_AD_OFF + SKF_AD_RXHASH: 385 BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4); 386 387 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, 388 offsetof(struct sk_buff, hash)); 389 break; 390 391 case SKF_AD_OFF + SKF_AD_QUEUE: 392 cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn); 393 insn += cnt - 1; 394 break; 395 396 case SKF_AD_OFF + SKF_AD_VLAN_TAG: 397 cnt = convert_skb_access(SKF_AD_VLAN_TAG, 398 BPF_REG_A, BPF_REG_CTX, insn); 399 insn += cnt - 1; 400 break; 401 402 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: 403 cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT, 404 BPF_REG_A, BPF_REG_CTX, insn); 405 insn += cnt - 1; 406 break; 407 408 case SKF_AD_OFF + SKF_AD_VLAN_TPID: 409 BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_proto) != 2); 410 411 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */ 412 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, 413 offsetof(struct sk_buff, vlan_proto)); 414 /* A = ntohs(A) [emitting a nop or swap16] */ 415 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); 416 break; 417 418 case SKF_AD_OFF + SKF_AD_PAY_OFFSET: 419 case SKF_AD_OFF + SKF_AD_NLATTR: 420 case SKF_AD_OFF + SKF_AD_NLATTR_NEST: 421 case SKF_AD_OFF + SKF_AD_CPU: 422 case SKF_AD_OFF + SKF_AD_RANDOM: 423 /* arg1 = CTX */ 424 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); 425 /* arg2 = A */ 426 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A); 427 /* arg3 = X */ 428 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X); 429 /* Emit call(arg1=CTX, arg2=A, arg3=X) */ 430 switch (fp->k) { 431 case SKF_AD_OFF + SKF_AD_PAY_OFFSET: 432 *insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset); 433 break; 434 case SKF_AD_OFF + SKF_AD_NLATTR: 435 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr); 436 break; 437 case SKF_AD_OFF + SKF_AD_NLATTR_NEST: 438 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest); 439 break; 440 case SKF_AD_OFF + SKF_AD_CPU: 441 *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id); 442 break; 443 case SKF_AD_OFF + SKF_AD_RANDOM: 444 *insn = BPF_EMIT_CALL(bpf_user_rnd_u32); 445 bpf_user_rnd_init_once(); 446 break; 447 } 448 break; 449 450 case SKF_AD_OFF + SKF_AD_ALU_XOR_X: 451 /* A ^= X */ 452 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X); 453 break; 454 455 default: 456 /* This is just a dummy call to avoid letting the compiler 457 * evict __bpf_call_base() as an optimization. Placed here 458 * where no-one bothers. 459 */ 460 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0); 461 return false; 462 } 463 464 *insnp = insn; 465 return true; 466 } 467 468 static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp) 469 { 470 const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS); 471 int size = bpf_size_to_bytes(BPF_SIZE(fp->code)); 472 bool endian = BPF_SIZE(fp->code) == BPF_H || 473 BPF_SIZE(fp->code) == BPF_W; 474 bool indirect = BPF_MODE(fp->code) == BPF_IND; 475 const int ip_align = NET_IP_ALIGN; 476 struct bpf_insn *insn = *insnp; 477 int offset = fp->k; 478 479 if (!indirect && 480 ((unaligned_ok && offset >= 0) || 481 (!unaligned_ok && offset >= 0 && 482 offset + ip_align >= 0 && 483 offset + ip_align % size == 0))) { 484 bool ldx_off_ok = offset <= S16_MAX; 485 486 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H); 487 if (offset) 488 *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset); 489 *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, 490 size, 2 + endian + (!ldx_off_ok * 2)); 491 if (ldx_off_ok) { 492 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, 493 BPF_REG_D, offset); 494 } else { 495 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D); 496 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset); 497 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, 498 BPF_REG_TMP, 0); 499 } 500 if (endian) 501 *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8); 502 *insn++ = BPF_JMP_A(8); 503 } 504 505 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); 506 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D); 507 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H); 508 if (!indirect) { 509 *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset); 510 } else { 511 *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X); 512 if (fp->k) 513 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset); 514 } 515 516 switch (BPF_SIZE(fp->code)) { 517 case BPF_B: 518 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8); 519 break; 520 case BPF_H: 521 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16); 522 break; 523 case BPF_W: 524 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32); 525 break; 526 default: 527 return false; 528 } 529 530 *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2); 531 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); 532 *insn = BPF_EXIT_INSN(); 533 534 *insnp = insn; 535 return true; 536 } 537 538 /** 539 * bpf_convert_filter - convert filter program 540 * @prog: the user passed filter program 541 * @len: the length of the user passed filter program 542 * @new_prog: allocated 'struct bpf_prog' or NULL 543 * @new_len: pointer to store length of converted program 544 * @seen_ld_abs: bool whether we've seen ld_abs/ind 545 * 546 * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn' 547 * style extended BPF (eBPF). 548 * Conversion workflow: 549 * 550 * 1) First pass for calculating the new program length: 551 * bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs) 552 * 553 * 2) 2nd pass to remap in two passes: 1st pass finds new 554 * jump offsets, 2nd pass remapping: 555 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs) 556 */ 557 static int bpf_convert_filter(struct sock_filter *prog, int len, 558 struct bpf_prog *new_prog, int *new_len, 559 bool *seen_ld_abs) 560 { 561 int new_flen = 0, pass = 0, target, i, stack_off; 562 struct bpf_insn *new_insn, *first_insn = NULL; 563 struct sock_filter *fp; 564 int *addrs = NULL; 565 u8 bpf_src; 566 567 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK); 568 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 569 570 if (len <= 0 || len > BPF_MAXINSNS) 571 return -EINVAL; 572 573 if (new_prog) { 574 first_insn = new_prog->insnsi; 575 addrs = kcalloc(len, sizeof(*addrs), 576 GFP_KERNEL | __GFP_NOWARN); 577 if (!addrs) 578 return -ENOMEM; 579 } 580 581 do_pass: 582 new_insn = first_insn; 583 fp = prog; 584 585 /* Classic BPF related prologue emission. */ 586 if (new_prog) { 587 /* Classic BPF expects A and X to be reset first. These need 588 * to be guaranteed to be the first two instructions. 589 */ 590 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); 591 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X); 592 593 /* All programs must keep CTX in callee saved BPF_REG_CTX. 594 * In eBPF case it's done by the compiler, here we need to 595 * do this ourself. Initial CTX is present in BPF_REG_ARG1. 596 */ 597 *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1); 598 if (*seen_ld_abs) { 599 /* For packet access in classic BPF, cache skb->data 600 * in callee-saved BPF R8 and skb->len - skb->data_len 601 * (headlen) in BPF R9. Since classic BPF is read-only 602 * on CTX, we only need to cache it once. 603 */ 604 *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), 605 BPF_REG_D, BPF_REG_CTX, 606 offsetof(struct sk_buff, data)); 607 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX, 608 offsetof(struct sk_buff, len)); 609 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX, 610 offsetof(struct sk_buff, data_len)); 611 *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP); 612 } 613 } else { 614 new_insn += 3; 615 } 616 617 for (i = 0; i < len; fp++, i++) { 618 struct bpf_insn tmp_insns[32] = { }; 619 struct bpf_insn *insn = tmp_insns; 620 621 if (addrs) 622 addrs[i] = new_insn - first_insn; 623 624 switch (fp->code) { 625 /* All arithmetic insns and skb loads map as-is. */ 626 case BPF_ALU | BPF_ADD | BPF_X: 627 case BPF_ALU | BPF_ADD | BPF_K: 628 case BPF_ALU | BPF_SUB | BPF_X: 629 case BPF_ALU | BPF_SUB | BPF_K: 630 case BPF_ALU | BPF_AND | BPF_X: 631 case BPF_ALU | BPF_AND | BPF_K: 632 case BPF_ALU | BPF_OR | BPF_X: 633 case BPF_ALU | BPF_OR | BPF_K: 634 case BPF_ALU | BPF_LSH | BPF_X: 635 case BPF_ALU | BPF_LSH | BPF_K: 636 case BPF_ALU | BPF_RSH | BPF_X: 637 case BPF_ALU | BPF_RSH | BPF_K: 638 case BPF_ALU | BPF_XOR | BPF_X: 639 case BPF_ALU | BPF_XOR | BPF_K: 640 case BPF_ALU | BPF_MUL | BPF_X: 641 case BPF_ALU | BPF_MUL | BPF_K: 642 case BPF_ALU | BPF_DIV | BPF_X: 643 case BPF_ALU | BPF_DIV | BPF_K: 644 case BPF_ALU | BPF_MOD | BPF_X: 645 case BPF_ALU | BPF_MOD | BPF_K: 646 case BPF_ALU | BPF_NEG: 647 case BPF_LD | BPF_ABS | BPF_W: 648 case BPF_LD | BPF_ABS | BPF_H: 649 case BPF_LD | BPF_ABS | BPF_B: 650 case BPF_LD | BPF_IND | BPF_W: 651 case BPF_LD | BPF_IND | BPF_H: 652 case BPF_LD | BPF_IND | BPF_B: 653 /* Check for overloaded BPF extension and 654 * directly convert it if found, otherwise 655 * just move on with mapping. 656 */ 657 if (BPF_CLASS(fp->code) == BPF_LD && 658 BPF_MODE(fp->code) == BPF_ABS && 659 convert_bpf_extensions(fp, &insn)) 660 break; 661 if (BPF_CLASS(fp->code) == BPF_LD && 662 convert_bpf_ld_abs(fp, &insn)) { 663 *seen_ld_abs = true; 664 break; 665 } 666 667 if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) || 668 fp->code == (BPF_ALU | BPF_MOD | BPF_X)) { 669 *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X); 670 /* Error with exception code on div/mod by 0. 671 * For cBPF programs, this was always return 0. 672 */ 673 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2); 674 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); 675 *insn++ = BPF_EXIT_INSN(); 676 } 677 678 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); 679 break; 680 681 /* Jump transformation cannot use BPF block macros 682 * everywhere as offset calculation and target updates 683 * require a bit more work than the rest, i.e. jump 684 * opcodes map as-is, but offsets need adjustment. 685 */ 686 687 #define BPF_EMIT_JMP \ 688 do { \ 689 const s32 off_min = S16_MIN, off_max = S16_MAX; \ 690 s32 off; \ 691 \ 692 if (target >= len || target < 0) \ 693 goto err; \ 694 off = addrs ? addrs[target] - addrs[i] - 1 : 0; \ 695 /* Adjust pc relative offset for 2nd or 3rd insn. */ \ 696 off -= insn - tmp_insns; \ 697 /* Reject anything not fitting into insn->off. */ \ 698 if (off < off_min || off > off_max) \ 699 goto err; \ 700 insn->off = off; \ 701 } while (0) 702 703 case BPF_JMP | BPF_JA: 704 target = i + fp->k + 1; 705 insn->code = fp->code; 706 BPF_EMIT_JMP; 707 break; 708 709 case BPF_JMP | BPF_JEQ | BPF_K: 710 case BPF_JMP | BPF_JEQ | BPF_X: 711 case BPF_JMP | BPF_JSET | BPF_K: 712 case BPF_JMP | BPF_JSET | BPF_X: 713 case BPF_JMP | BPF_JGT | BPF_K: 714 case BPF_JMP | BPF_JGT | BPF_X: 715 case BPF_JMP | BPF_JGE | BPF_K: 716 case BPF_JMP | BPF_JGE | BPF_X: 717 if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) { 718 /* BPF immediates are signed, zero extend 719 * immediate into tmp register and use it 720 * in compare insn. 721 */ 722 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k); 723 724 insn->dst_reg = BPF_REG_A; 725 insn->src_reg = BPF_REG_TMP; 726 bpf_src = BPF_X; 727 } else { 728 insn->dst_reg = BPF_REG_A; 729 insn->imm = fp->k; 730 bpf_src = BPF_SRC(fp->code); 731 insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0; 732 } 733 734 /* Common case where 'jump_false' is next insn. */ 735 if (fp->jf == 0) { 736 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; 737 target = i + fp->jt + 1; 738 BPF_EMIT_JMP; 739 break; 740 } 741 742 /* Convert some jumps when 'jump_true' is next insn. */ 743 if (fp->jt == 0) { 744 switch (BPF_OP(fp->code)) { 745 case BPF_JEQ: 746 insn->code = BPF_JMP | BPF_JNE | bpf_src; 747 break; 748 case BPF_JGT: 749 insn->code = BPF_JMP | BPF_JLE | bpf_src; 750 break; 751 case BPF_JGE: 752 insn->code = BPF_JMP | BPF_JLT | bpf_src; 753 break; 754 default: 755 goto jmp_rest; 756 } 757 758 target = i + fp->jf + 1; 759 BPF_EMIT_JMP; 760 break; 761 } 762 jmp_rest: 763 /* Other jumps are mapped into two insns: Jxx and JA. */ 764 target = i + fp->jt + 1; 765 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; 766 BPF_EMIT_JMP; 767 insn++; 768 769 insn->code = BPF_JMP | BPF_JA; 770 target = i + fp->jf + 1; 771 BPF_EMIT_JMP; 772 break; 773 774 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ 775 case BPF_LDX | BPF_MSH | BPF_B: { 776 struct sock_filter tmp = { 777 .code = BPF_LD | BPF_ABS | BPF_B, 778 .k = fp->k, 779 }; 780 781 *seen_ld_abs = true; 782 783 /* X = A */ 784 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); 785 /* A = BPF_R0 = *(u8 *) (skb->data + K) */ 786 convert_bpf_ld_abs(&tmp, &insn); 787 insn++; 788 /* A &= 0xf */ 789 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); 790 /* A <<= 2 */ 791 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); 792 /* tmp = X */ 793 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X); 794 /* X = A */ 795 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); 796 /* A = tmp */ 797 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP); 798 break; 799 } 800 /* RET_K is remaped into 2 insns. RET_A case doesn't need an 801 * extra mov as BPF_REG_0 is already mapped into BPF_REG_A. 802 */ 803 case BPF_RET | BPF_A: 804 case BPF_RET | BPF_K: 805 if (BPF_RVAL(fp->code) == BPF_K) 806 *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0, 807 0, fp->k); 808 *insn = BPF_EXIT_INSN(); 809 break; 810 811 /* Store to stack. */ 812 case BPF_ST: 813 case BPF_STX: 814 stack_off = fp->k * 4 + 4; 815 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) == 816 BPF_ST ? BPF_REG_A : BPF_REG_X, 817 -stack_off); 818 /* check_load_and_stores() verifies that classic BPF can 819 * load from stack only after write, so tracking 820 * stack_depth for ST|STX insns is enough 821 */ 822 if (new_prog && new_prog->aux->stack_depth < stack_off) 823 new_prog->aux->stack_depth = stack_off; 824 break; 825 826 /* Load from stack. */ 827 case BPF_LD | BPF_MEM: 828 case BPF_LDX | BPF_MEM: 829 stack_off = fp->k * 4 + 4; 830 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? 831 BPF_REG_A : BPF_REG_X, BPF_REG_FP, 832 -stack_off); 833 break; 834 835 /* A = K or X = K */ 836 case BPF_LD | BPF_IMM: 837 case BPF_LDX | BPF_IMM: 838 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ? 839 BPF_REG_A : BPF_REG_X, fp->k); 840 break; 841 842 /* X = A */ 843 case BPF_MISC | BPF_TAX: 844 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); 845 break; 846 847 /* A = X */ 848 case BPF_MISC | BPF_TXA: 849 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X); 850 break; 851 852 /* A = skb->len or X = skb->len */ 853 case BPF_LD | BPF_W | BPF_LEN: 854 case BPF_LDX | BPF_W | BPF_LEN: 855 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? 856 BPF_REG_A : BPF_REG_X, BPF_REG_CTX, 857 offsetof(struct sk_buff, len)); 858 break; 859 860 /* Access seccomp_data fields. */ 861 case BPF_LDX | BPF_ABS | BPF_W: 862 /* A = *(u32 *) (ctx + K) */ 863 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); 864 break; 865 866 /* Unknown instruction. */ 867 default: 868 goto err; 869 } 870 871 insn++; 872 if (new_prog) 873 memcpy(new_insn, tmp_insns, 874 sizeof(*insn) * (insn - tmp_insns)); 875 new_insn += insn - tmp_insns; 876 } 877 878 if (!new_prog) { 879 /* Only calculating new length. */ 880 *new_len = new_insn - first_insn; 881 if (*seen_ld_abs) 882 *new_len += 4; /* Prologue bits. */ 883 return 0; 884 } 885 886 pass++; 887 if (new_flen != new_insn - first_insn) { 888 new_flen = new_insn - first_insn; 889 if (pass > 2) 890 goto err; 891 goto do_pass; 892 } 893 894 kfree(addrs); 895 BUG_ON(*new_len != new_flen); 896 return 0; 897 err: 898 kfree(addrs); 899 return -EINVAL; 900 } 901 902 /* Security: 903 * 904 * As we dont want to clear mem[] array for each packet going through 905 * __bpf_prog_run(), we check that filter loaded by user never try to read 906 * a cell if not previously written, and we check all branches to be sure 907 * a malicious user doesn't try to abuse us. 908 */ 909 static int check_load_and_stores(const struct sock_filter *filter, int flen) 910 { 911 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */ 912 int pc, ret = 0; 913 914 BUILD_BUG_ON(BPF_MEMWORDS > 16); 915 916 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL); 917 if (!masks) 918 return -ENOMEM; 919 920 memset(masks, 0xff, flen * sizeof(*masks)); 921 922 for (pc = 0; pc < flen; pc++) { 923 memvalid &= masks[pc]; 924 925 switch (filter[pc].code) { 926 case BPF_ST: 927 case BPF_STX: 928 memvalid |= (1 << filter[pc].k); 929 break; 930 case BPF_LD | BPF_MEM: 931 case BPF_LDX | BPF_MEM: 932 if (!(memvalid & (1 << filter[pc].k))) { 933 ret = -EINVAL; 934 goto error; 935 } 936 break; 937 case BPF_JMP | BPF_JA: 938 /* A jump must set masks on target */ 939 masks[pc + 1 + filter[pc].k] &= memvalid; 940 memvalid = ~0; 941 break; 942 case BPF_JMP | BPF_JEQ | BPF_K: 943 case BPF_JMP | BPF_JEQ | BPF_X: 944 case BPF_JMP | BPF_JGE | BPF_K: 945 case BPF_JMP | BPF_JGE | BPF_X: 946 case BPF_JMP | BPF_JGT | BPF_K: 947 case BPF_JMP | BPF_JGT | BPF_X: 948 case BPF_JMP | BPF_JSET | BPF_K: 949 case BPF_JMP | BPF_JSET | BPF_X: 950 /* A jump must set masks on targets */ 951 masks[pc + 1 + filter[pc].jt] &= memvalid; 952 masks[pc + 1 + filter[pc].jf] &= memvalid; 953 memvalid = ~0; 954 break; 955 } 956 } 957 error: 958 kfree(masks); 959 return ret; 960 } 961 962 static bool chk_code_allowed(u16 code_to_probe) 963 { 964 static const bool codes[] = { 965 /* 32 bit ALU operations */ 966 [BPF_ALU | BPF_ADD | BPF_K] = true, 967 [BPF_ALU | BPF_ADD | BPF_X] = true, 968 [BPF_ALU | BPF_SUB | BPF_K] = true, 969 [BPF_ALU | BPF_SUB | BPF_X] = true, 970 [BPF_ALU | BPF_MUL | BPF_K] = true, 971 [BPF_ALU | BPF_MUL | BPF_X] = true, 972 [BPF_ALU | BPF_DIV | BPF_K] = true, 973 [BPF_ALU | BPF_DIV | BPF_X] = true, 974 [BPF_ALU | BPF_MOD | BPF_K] = true, 975 [BPF_ALU | BPF_MOD | BPF_X] = true, 976 [BPF_ALU | BPF_AND | BPF_K] = true, 977 [BPF_ALU | BPF_AND | BPF_X] = true, 978 [BPF_ALU | BPF_OR | BPF_K] = true, 979 [BPF_ALU | BPF_OR | BPF_X] = true, 980 [BPF_ALU | BPF_XOR | BPF_K] = true, 981 [BPF_ALU | BPF_XOR | BPF_X] = true, 982 [BPF_ALU | BPF_LSH | BPF_K] = true, 983 [BPF_ALU | BPF_LSH | BPF_X] = true, 984 [BPF_ALU | BPF_RSH | BPF_K] = true, 985 [BPF_ALU | BPF_RSH | BPF_X] = true, 986 [BPF_ALU | BPF_NEG] = true, 987 /* Load instructions */ 988 [BPF_LD | BPF_W | BPF_ABS] = true, 989 [BPF_LD | BPF_H | BPF_ABS] = true, 990 [BPF_LD | BPF_B | BPF_ABS] = true, 991 [BPF_LD | BPF_W | BPF_LEN] = true, 992 [BPF_LD | BPF_W | BPF_IND] = true, 993 [BPF_LD | BPF_H | BPF_IND] = true, 994 [BPF_LD | BPF_B | BPF_IND] = true, 995 [BPF_LD | BPF_IMM] = true, 996 [BPF_LD | BPF_MEM] = true, 997 [BPF_LDX | BPF_W | BPF_LEN] = true, 998 [BPF_LDX | BPF_B | BPF_MSH] = true, 999 [BPF_LDX | BPF_IMM] = true, 1000 [BPF_LDX | BPF_MEM] = true, 1001 /* Store instructions */ 1002 [BPF_ST] = true, 1003 [BPF_STX] = true, 1004 /* Misc instructions */ 1005 [BPF_MISC | BPF_TAX] = true, 1006 [BPF_MISC | BPF_TXA] = true, 1007 /* Return instructions */ 1008 [BPF_RET | BPF_K] = true, 1009 [BPF_RET | BPF_A] = true, 1010 /* Jump instructions */ 1011 [BPF_JMP | BPF_JA] = true, 1012 [BPF_JMP | BPF_JEQ | BPF_K] = true, 1013 [BPF_JMP | BPF_JEQ | BPF_X] = true, 1014 [BPF_JMP | BPF_JGE | BPF_K] = true, 1015 [BPF_JMP | BPF_JGE | BPF_X] = true, 1016 [BPF_JMP | BPF_JGT | BPF_K] = true, 1017 [BPF_JMP | BPF_JGT | BPF_X] = true, 1018 [BPF_JMP | BPF_JSET | BPF_K] = true, 1019 [BPF_JMP | BPF_JSET | BPF_X] = true, 1020 }; 1021 1022 if (code_to_probe >= ARRAY_SIZE(codes)) 1023 return false; 1024 1025 return codes[code_to_probe]; 1026 } 1027 1028 static bool bpf_check_basics_ok(const struct sock_filter *filter, 1029 unsigned int flen) 1030 { 1031 if (filter == NULL) 1032 return false; 1033 if (flen == 0 || flen > BPF_MAXINSNS) 1034 return false; 1035 1036 return true; 1037 } 1038 1039 /** 1040 * bpf_check_classic - verify socket filter code 1041 * @filter: filter to verify 1042 * @flen: length of filter 1043 * 1044 * Check the user's filter code. If we let some ugly 1045 * filter code slip through kaboom! The filter must contain 1046 * no references or jumps that are out of range, no illegal 1047 * instructions, and must end with a RET instruction. 1048 * 1049 * All jumps are forward as they are not signed. 1050 * 1051 * Returns 0 if the rule set is legal or -EINVAL if not. 1052 */ 1053 static int bpf_check_classic(const struct sock_filter *filter, 1054 unsigned int flen) 1055 { 1056 bool anc_found; 1057 int pc; 1058 1059 /* Check the filter code now */ 1060 for (pc = 0; pc < flen; pc++) { 1061 const struct sock_filter *ftest = &filter[pc]; 1062 1063 /* May we actually operate on this code? */ 1064 if (!chk_code_allowed(ftest->code)) 1065 return -EINVAL; 1066 1067 /* Some instructions need special checks */ 1068 switch (ftest->code) { 1069 case BPF_ALU | BPF_DIV | BPF_K: 1070 case BPF_ALU | BPF_MOD | BPF_K: 1071 /* Check for division by zero */ 1072 if (ftest->k == 0) 1073 return -EINVAL; 1074 break; 1075 case BPF_ALU | BPF_LSH | BPF_K: 1076 case BPF_ALU | BPF_RSH | BPF_K: 1077 if (ftest->k >= 32) 1078 return -EINVAL; 1079 break; 1080 case BPF_LD | BPF_MEM: 1081 case BPF_LDX | BPF_MEM: 1082 case BPF_ST: 1083 case BPF_STX: 1084 /* Check for invalid memory addresses */ 1085 if (ftest->k >= BPF_MEMWORDS) 1086 return -EINVAL; 1087 break; 1088 case BPF_JMP | BPF_JA: 1089 /* Note, the large ftest->k might cause loops. 1090 * Compare this with conditional jumps below, 1091 * where offsets are limited. --ANK (981016) 1092 */ 1093 if (ftest->k >= (unsigned int)(flen - pc - 1)) 1094 return -EINVAL; 1095 break; 1096 case BPF_JMP | BPF_JEQ | BPF_K: 1097 case BPF_JMP | BPF_JEQ | BPF_X: 1098 case BPF_JMP | BPF_JGE | BPF_K: 1099 case BPF_JMP | BPF_JGE | BPF_X: 1100 case BPF_JMP | BPF_JGT | BPF_K: 1101 case BPF_JMP | BPF_JGT | BPF_X: 1102 case BPF_JMP | BPF_JSET | BPF_K: 1103 case BPF_JMP | BPF_JSET | BPF_X: 1104 /* Both conditionals must be safe */ 1105 if (pc + ftest->jt + 1 >= flen || 1106 pc + ftest->jf + 1 >= flen) 1107 return -EINVAL; 1108 break; 1109 case BPF_LD | BPF_W | BPF_ABS: 1110 case BPF_LD | BPF_H | BPF_ABS: 1111 case BPF_LD | BPF_B | BPF_ABS: 1112 anc_found = false; 1113 if (bpf_anc_helper(ftest) & BPF_ANC) 1114 anc_found = true; 1115 /* Ancillary operation unknown or unsupported */ 1116 if (anc_found == false && ftest->k >= SKF_AD_OFF) 1117 return -EINVAL; 1118 } 1119 } 1120 1121 /* Last instruction must be a RET code */ 1122 switch (filter[flen - 1].code) { 1123 case BPF_RET | BPF_K: 1124 case BPF_RET | BPF_A: 1125 return check_load_and_stores(filter, flen); 1126 } 1127 1128 return -EINVAL; 1129 } 1130 1131 static int bpf_prog_store_orig_filter(struct bpf_prog *fp, 1132 const struct sock_fprog *fprog) 1133 { 1134 unsigned int fsize = bpf_classic_proglen(fprog); 1135 struct sock_fprog_kern *fkprog; 1136 1137 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL); 1138 if (!fp->orig_prog) 1139 return -ENOMEM; 1140 1141 fkprog = fp->orig_prog; 1142 fkprog->len = fprog->len; 1143 1144 fkprog->filter = kmemdup(fp->insns, fsize, 1145 GFP_KERNEL | __GFP_NOWARN); 1146 if (!fkprog->filter) { 1147 kfree(fp->orig_prog); 1148 return -ENOMEM; 1149 } 1150 1151 return 0; 1152 } 1153 1154 static void bpf_release_orig_filter(struct bpf_prog *fp) 1155 { 1156 struct sock_fprog_kern *fprog = fp->orig_prog; 1157 1158 if (fprog) { 1159 kfree(fprog->filter); 1160 kfree(fprog); 1161 } 1162 } 1163 1164 static void __bpf_prog_release(struct bpf_prog *prog) 1165 { 1166 if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) { 1167 bpf_prog_put(prog); 1168 } else { 1169 bpf_release_orig_filter(prog); 1170 bpf_prog_free(prog); 1171 } 1172 } 1173 1174 static void __sk_filter_release(struct sk_filter *fp) 1175 { 1176 __bpf_prog_release(fp->prog); 1177 kfree(fp); 1178 } 1179 1180 /** 1181 * sk_filter_release_rcu - Release a socket filter by rcu_head 1182 * @rcu: rcu_head that contains the sk_filter to free 1183 */ 1184 static void sk_filter_release_rcu(struct rcu_head *rcu) 1185 { 1186 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); 1187 1188 __sk_filter_release(fp); 1189 } 1190 1191 /** 1192 * sk_filter_release - release a socket filter 1193 * @fp: filter to remove 1194 * 1195 * Remove a filter from a socket and release its resources. 1196 */ 1197 static void sk_filter_release(struct sk_filter *fp) 1198 { 1199 if (refcount_dec_and_test(&fp->refcnt)) 1200 call_rcu(&fp->rcu, sk_filter_release_rcu); 1201 } 1202 1203 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) 1204 { 1205 u32 filter_size = bpf_prog_size(fp->prog->len); 1206 1207 atomic_sub(filter_size, &sk->sk_omem_alloc); 1208 sk_filter_release(fp); 1209 } 1210 1211 /* try to charge the socket memory if there is space available 1212 * return true on success 1213 */ 1214 static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp) 1215 { 1216 u32 filter_size = bpf_prog_size(fp->prog->len); 1217 int optmem_max = READ_ONCE(sysctl_optmem_max); 1218 1219 /* same check as in sock_kmalloc() */ 1220 if (filter_size <= optmem_max && 1221 atomic_read(&sk->sk_omem_alloc) + filter_size < optmem_max) { 1222 atomic_add(filter_size, &sk->sk_omem_alloc); 1223 return true; 1224 } 1225 return false; 1226 } 1227 1228 bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) 1229 { 1230 if (!refcount_inc_not_zero(&fp->refcnt)) 1231 return false; 1232 1233 if (!__sk_filter_charge(sk, fp)) { 1234 sk_filter_release(fp); 1235 return false; 1236 } 1237 return true; 1238 } 1239 1240 static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) 1241 { 1242 struct sock_filter *old_prog; 1243 struct bpf_prog *old_fp; 1244 int err, new_len, old_len = fp->len; 1245 bool seen_ld_abs = false; 1246 1247 /* We are free to overwrite insns et al right here as it won't be used at 1248 * this point in time anymore internally after the migration to the eBPF 1249 * instruction representation. 1250 */ 1251 BUILD_BUG_ON(sizeof(struct sock_filter) != 1252 sizeof(struct bpf_insn)); 1253 1254 /* Conversion cannot happen on overlapping memory areas, 1255 * so we need to keep the user BPF around until the 2nd 1256 * pass. At this time, the user BPF is stored in fp->insns. 1257 */ 1258 old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter), 1259 GFP_KERNEL | __GFP_NOWARN); 1260 if (!old_prog) { 1261 err = -ENOMEM; 1262 goto out_err; 1263 } 1264 1265 /* 1st pass: calculate the new program length. */ 1266 err = bpf_convert_filter(old_prog, old_len, NULL, &new_len, 1267 &seen_ld_abs); 1268 if (err) 1269 goto out_err_free; 1270 1271 /* Expand fp for appending the new filter representation. */ 1272 old_fp = fp; 1273 fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0); 1274 if (!fp) { 1275 /* The old_fp is still around in case we couldn't 1276 * allocate new memory, so uncharge on that one. 1277 */ 1278 fp = old_fp; 1279 err = -ENOMEM; 1280 goto out_err_free; 1281 } 1282 1283 fp->len = new_len; 1284 1285 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */ 1286 err = bpf_convert_filter(old_prog, old_len, fp, &new_len, 1287 &seen_ld_abs); 1288 if (err) 1289 /* 2nd bpf_convert_filter() can fail only if it fails 1290 * to allocate memory, remapping must succeed. Note, 1291 * that at this time old_fp has already been released 1292 * by krealloc(). 1293 */ 1294 goto out_err_free; 1295 1296 fp = bpf_prog_select_runtime(fp, &err); 1297 if (err) 1298 goto out_err_free; 1299 1300 kfree(old_prog); 1301 return fp; 1302 1303 out_err_free: 1304 kfree(old_prog); 1305 out_err: 1306 __bpf_prog_release(fp); 1307 return ERR_PTR(err); 1308 } 1309 1310 static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp, 1311 bpf_aux_classic_check_t trans) 1312 { 1313 int err; 1314 1315 fp->bpf_func = NULL; 1316 fp->jited = 0; 1317 1318 err = bpf_check_classic(fp->insns, fp->len); 1319 if (err) { 1320 __bpf_prog_release(fp); 1321 return ERR_PTR(err); 1322 } 1323 1324 /* There might be additional checks and transformations 1325 * needed on classic filters, f.e. in case of seccomp. 1326 */ 1327 if (trans) { 1328 err = trans(fp->insns, fp->len); 1329 if (err) { 1330 __bpf_prog_release(fp); 1331 return ERR_PTR(err); 1332 } 1333 } 1334 1335 /* Probe if we can JIT compile the filter and if so, do 1336 * the compilation of the filter. 1337 */ 1338 bpf_jit_compile(fp); 1339 1340 /* JIT compiler couldn't process this filter, so do the eBPF translation 1341 * for the optimized interpreter. 1342 */ 1343 if (!fp->jited) 1344 fp = bpf_migrate_filter(fp); 1345 1346 return fp; 1347 } 1348 1349 /** 1350 * bpf_prog_create - create an unattached filter 1351 * @pfp: the unattached filter that is created 1352 * @fprog: the filter program 1353 * 1354 * Create a filter independent of any socket. We first run some 1355 * sanity checks on it to make sure it does not explode on us later. 1356 * If an error occurs or there is insufficient memory for the filter 1357 * a negative errno code is returned. On success the return is zero. 1358 */ 1359 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog) 1360 { 1361 unsigned int fsize = bpf_classic_proglen(fprog); 1362 struct bpf_prog *fp; 1363 1364 /* Make sure new filter is there and in the right amounts. */ 1365 if (!bpf_check_basics_ok(fprog->filter, fprog->len)) 1366 return -EINVAL; 1367 1368 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); 1369 if (!fp) 1370 return -ENOMEM; 1371 1372 memcpy(fp->insns, fprog->filter, fsize); 1373 1374 fp->len = fprog->len; 1375 /* Since unattached filters are not copied back to user 1376 * space through sk_get_filter(), we do not need to hold 1377 * a copy here, and can spare us the work. 1378 */ 1379 fp->orig_prog = NULL; 1380 1381 /* bpf_prepare_filter() already takes care of freeing 1382 * memory in case something goes wrong. 1383 */ 1384 fp = bpf_prepare_filter(fp, NULL); 1385 if (IS_ERR(fp)) 1386 return PTR_ERR(fp); 1387 1388 *pfp = fp; 1389 return 0; 1390 } 1391 EXPORT_SYMBOL_GPL(bpf_prog_create); 1392 1393 /** 1394 * bpf_prog_create_from_user - create an unattached filter from user buffer 1395 * @pfp: the unattached filter that is created 1396 * @fprog: the filter program 1397 * @trans: post-classic verifier transformation handler 1398 * @save_orig: save classic BPF program 1399 * 1400 * This function effectively does the same as bpf_prog_create(), only 1401 * that it builds up its insns buffer from user space provided buffer. 1402 * It also allows for passing a bpf_aux_classic_check_t handler. 1403 */ 1404 int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, 1405 bpf_aux_classic_check_t trans, bool save_orig) 1406 { 1407 unsigned int fsize = bpf_classic_proglen(fprog); 1408 struct bpf_prog *fp; 1409 int err; 1410 1411 /* Make sure new filter is there and in the right amounts. */ 1412 if (!bpf_check_basics_ok(fprog->filter, fprog->len)) 1413 return -EINVAL; 1414 1415 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); 1416 if (!fp) 1417 return -ENOMEM; 1418 1419 if (copy_from_user(fp->insns, fprog->filter, fsize)) { 1420 __bpf_prog_free(fp); 1421 return -EFAULT; 1422 } 1423 1424 fp->len = fprog->len; 1425 fp->orig_prog = NULL; 1426 1427 if (save_orig) { 1428 err = bpf_prog_store_orig_filter(fp, fprog); 1429 if (err) { 1430 __bpf_prog_free(fp); 1431 return -ENOMEM; 1432 } 1433 } 1434 1435 /* bpf_prepare_filter() already takes care of freeing 1436 * memory in case something goes wrong. 1437 */ 1438 fp = bpf_prepare_filter(fp, trans); 1439 if (IS_ERR(fp)) 1440 return PTR_ERR(fp); 1441 1442 *pfp = fp; 1443 return 0; 1444 } 1445 EXPORT_SYMBOL_GPL(bpf_prog_create_from_user); 1446 1447 void bpf_prog_destroy(struct bpf_prog *fp) 1448 { 1449 __bpf_prog_release(fp); 1450 } 1451 EXPORT_SYMBOL_GPL(bpf_prog_destroy); 1452 1453 static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk) 1454 { 1455 struct sk_filter *fp, *old_fp; 1456 1457 fp = kmalloc(sizeof(*fp), GFP_KERNEL); 1458 if (!fp) 1459 return -ENOMEM; 1460 1461 fp->prog = prog; 1462 1463 if (!__sk_filter_charge(sk, fp)) { 1464 kfree(fp); 1465 return -ENOMEM; 1466 } 1467 refcount_set(&fp->refcnt, 1); 1468 1469 old_fp = rcu_dereference_protected(sk->sk_filter, 1470 lockdep_sock_is_held(sk)); 1471 rcu_assign_pointer(sk->sk_filter, fp); 1472 1473 if (old_fp) 1474 sk_filter_uncharge(sk, old_fp); 1475 1476 return 0; 1477 } 1478 1479 static 1480 struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) 1481 { 1482 unsigned int fsize = bpf_classic_proglen(fprog); 1483 struct bpf_prog *prog; 1484 int err; 1485 1486 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 1487 return ERR_PTR(-EPERM); 1488 1489 /* Make sure new filter is there and in the right amounts. */ 1490 if (!bpf_check_basics_ok(fprog->filter, fprog->len)) 1491 return ERR_PTR(-EINVAL); 1492 1493 prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); 1494 if (!prog) 1495 return ERR_PTR(-ENOMEM); 1496 1497 if (copy_from_user(prog->insns, fprog->filter, fsize)) { 1498 __bpf_prog_free(prog); 1499 return ERR_PTR(-EFAULT); 1500 } 1501 1502 prog->len = fprog->len; 1503 1504 err = bpf_prog_store_orig_filter(prog, fprog); 1505 if (err) { 1506 __bpf_prog_free(prog); 1507 return ERR_PTR(-ENOMEM); 1508 } 1509 1510 /* bpf_prepare_filter() already takes care of freeing 1511 * memory in case something goes wrong. 1512 */ 1513 return bpf_prepare_filter(prog, NULL); 1514 } 1515 1516 /** 1517 * sk_attach_filter - attach a socket filter 1518 * @fprog: the filter program 1519 * @sk: the socket to use 1520 * 1521 * Attach the user's filter code. We first run some sanity checks on 1522 * it to make sure it does not explode on us later. If an error 1523 * occurs or there is insufficient memory for the filter a negative 1524 * errno code is returned. On success the return is zero. 1525 */ 1526 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) 1527 { 1528 struct bpf_prog *prog = __get_filter(fprog, sk); 1529 int err; 1530 1531 if (IS_ERR(prog)) 1532 return PTR_ERR(prog); 1533 1534 err = __sk_attach_prog(prog, sk); 1535 if (err < 0) { 1536 __bpf_prog_release(prog); 1537 return err; 1538 } 1539 1540 return 0; 1541 } 1542 EXPORT_SYMBOL_GPL(sk_attach_filter); 1543 1544 int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk) 1545 { 1546 struct bpf_prog *prog = __get_filter(fprog, sk); 1547 int err; 1548 1549 if (IS_ERR(prog)) 1550 return PTR_ERR(prog); 1551 1552 if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max)) 1553 err = -ENOMEM; 1554 else 1555 err = reuseport_attach_prog(sk, prog); 1556 1557 if (err) 1558 __bpf_prog_release(prog); 1559 1560 return err; 1561 } 1562 1563 static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk) 1564 { 1565 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 1566 return ERR_PTR(-EPERM); 1567 1568 return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER); 1569 } 1570 1571 int sk_attach_bpf(u32 ufd, struct sock *sk) 1572 { 1573 struct bpf_prog *prog = __get_bpf(ufd, sk); 1574 int err; 1575 1576 if (IS_ERR(prog)) 1577 return PTR_ERR(prog); 1578 1579 err = __sk_attach_prog(prog, sk); 1580 if (err < 0) { 1581 bpf_prog_put(prog); 1582 return err; 1583 } 1584 1585 return 0; 1586 } 1587 1588 int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk) 1589 { 1590 struct bpf_prog *prog; 1591 int err; 1592 1593 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 1594 return -EPERM; 1595 1596 prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER); 1597 if (PTR_ERR(prog) == -EINVAL) 1598 prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT); 1599 if (IS_ERR(prog)) 1600 return PTR_ERR(prog); 1601 1602 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) { 1603 /* Like other non BPF_PROG_TYPE_SOCKET_FILTER 1604 * bpf prog (e.g. sockmap). It depends on the 1605 * limitation imposed by bpf_prog_load(). 1606 * Hence, sysctl_optmem_max is not checked. 1607 */ 1608 if ((sk->sk_type != SOCK_STREAM && 1609 sk->sk_type != SOCK_DGRAM) || 1610 (sk->sk_protocol != IPPROTO_UDP && 1611 sk->sk_protocol != IPPROTO_TCP) || 1612 (sk->sk_family != AF_INET && 1613 sk->sk_family != AF_INET6)) { 1614 err = -ENOTSUPP; 1615 goto err_prog_put; 1616 } 1617 } else { 1618 /* BPF_PROG_TYPE_SOCKET_FILTER */ 1619 if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max)) { 1620 err = -ENOMEM; 1621 goto err_prog_put; 1622 } 1623 } 1624 1625 err = reuseport_attach_prog(sk, prog); 1626 err_prog_put: 1627 if (err) 1628 bpf_prog_put(prog); 1629 1630 return err; 1631 } 1632 1633 void sk_reuseport_prog_free(struct bpf_prog *prog) 1634 { 1635 if (!prog) 1636 return; 1637 1638 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) 1639 bpf_prog_put(prog); 1640 else 1641 bpf_prog_destroy(prog); 1642 } 1643 1644 struct bpf_scratchpad { 1645 union { 1646 __be32 diff[MAX_BPF_STACK / sizeof(__be32)]; 1647 u8 buff[MAX_BPF_STACK]; 1648 }; 1649 }; 1650 1651 static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp); 1652 1653 static inline int __bpf_try_make_writable(struct sk_buff *skb, 1654 unsigned int write_len) 1655 { 1656 return skb_ensure_writable(skb, write_len); 1657 } 1658 1659 static inline int bpf_try_make_writable(struct sk_buff *skb, 1660 unsigned int write_len) 1661 { 1662 int err = __bpf_try_make_writable(skb, write_len); 1663 1664 bpf_compute_data_pointers(skb); 1665 return err; 1666 } 1667 1668 static int bpf_try_make_head_writable(struct sk_buff *skb) 1669 { 1670 return bpf_try_make_writable(skb, skb_headlen(skb)); 1671 } 1672 1673 static inline void bpf_push_mac_rcsum(struct sk_buff *skb) 1674 { 1675 if (skb_at_tc_ingress(skb)) 1676 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len); 1677 } 1678 1679 static inline void bpf_pull_mac_rcsum(struct sk_buff *skb) 1680 { 1681 if (skb_at_tc_ingress(skb)) 1682 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len); 1683 } 1684 1685 BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset, 1686 const void *, from, u32, len, u64, flags) 1687 { 1688 void *ptr; 1689 1690 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH))) 1691 return -EINVAL; 1692 if (unlikely(offset > INT_MAX)) 1693 return -EFAULT; 1694 if (unlikely(bpf_try_make_writable(skb, offset + len))) 1695 return -EFAULT; 1696 1697 ptr = skb->data + offset; 1698 if (flags & BPF_F_RECOMPUTE_CSUM) 1699 __skb_postpull_rcsum(skb, ptr, len, offset); 1700 1701 memcpy(ptr, from, len); 1702 1703 if (flags & BPF_F_RECOMPUTE_CSUM) 1704 __skb_postpush_rcsum(skb, ptr, len, offset); 1705 if (flags & BPF_F_INVALIDATE_HASH) 1706 skb_clear_hash(skb); 1707 1708 return 0; 1709 } 1710 1711 static const struct bpf_func_proto bpf_skb_store_bytes_proto = { 1712 .func = bpf_skb_store_bytes, 1713 .gpl_only = false, 1714 .ret_type = RET_INTEGER, 1715 .arg1_type = ARG_PTR_TO_CTX, 1716 .arg2_type = ARG_ANYTHING, 1717 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1718 .arg4_type = ARG_CONST_SIZE, 1719 .arg5_type = ARG_ANYTHING, 1720 }; 1721 1722 BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset, 1723 void *, to, u32, len) 1724 { 1725 void *ptr; 1726 1727 if (unlikely(offset > INT_MAX)) 1728 goto err_clear; 1729 1730 ptr = skb_header_pointer(skb, offset, len, to); 1731 if (unlikely(!ptr)) 1732 goto err_clear; 1733 if (ptr != to) 1734 memcpy(to, ptr, len); 1735 1736 return 0; 1737 err_clear: 1738 memset(to, 0, len); 1739 return -EFAULT; 1740 } 1741 1742 static const struct bpf_func_proto bpf_skb_load_bytes_proto = { 1743 .func = bpf_skb_load_bytes, 1744 .gpl_only = false, 1745 .ret_type = RET_INTEGER, 1746 .arg1_type = ARG_PTR_TO_CTX, 1747 .arg2_type = ARG_ANYTHING, 1748 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 1749 .arg4_type = ARG_CONST_SIZE, 1750 }; 1751 1752 BPF_CALL_4(bpf_flow_dissector_load_bytes, 1753 const struct bpf_flow_dissector *, ctx, u32, offset, 1754 void *, to, u32, len) 1755 { 1756 void *ptr; 1757 1758 if (unlikely(offset > 0xffff)) 1759 goto err_clear; 1760 1761 if (unlikely(!ctx->skb)) 1762 goto err_clear; 1763 1764 ptr = skb_header_pointer(ctx->skb, offset, len, to); 1765 if (unlikely(!ptr)) 1766 goto err_clear; 1767 if (ptr != to) 1768 memcpy(to, ptr, len); 1769 1770 return 0; 1771 err_clear: 1772 memset(to, 0, len); 1773 return -EFAULT; 1774 } 1775 1776 static const struct bpf_func_proto bpf_flow_dissector_load_bytes_proto = { 1777 .func = bpf_flow_dissector_load_bytes, 1778 .gpl_only = false, 1779 .ret_type = RET_INTEGER, 1780 .arg1_type = ARG_PTR_TO_CTX, 1781 .arg2_type = ARG_ANYTHING, 1782 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 1783 .arg4_type = ARG_CONST_SIZE, 1784 }; 1785 1786 BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb, 1787 u32, offset, void *, to, u32, len, u32, start_header) 1788 { 1789 u8 *end = skb_tail_pointer(skb); 1790 u8 *start, *ptr; 1791 1792 if (unlikely(offset > 0xffff)) 1793 goto err_clear; 1794 1795 switch (start_header) { 1796 case BPF_HDR_START_MAC: 1797 if (unlikely(!skb_mac_header_was_set(skb))) 1798 goto err_clear; 1799 start = skb_mac_header(skb); 1800 break; 1801 case BPF_HDR_START_NET: 1802 start = skb_network_header(skb); 1803 break; 1804 default: 1805 goto err_clear; 1806 } 1807 1808 ptr = start + offset; 1809 1810 if (likely(ptr + len <= end)) { 1811 memcpy(to, ptr, len); 1812 return 0; 1813 } 1814 1815 err_clear: 1816 memset(to, 0, len); 1817 return -EFAULT; 1818 } 1819 1820 static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = { 1821 .func = bpf_skb_load_bytes_relative, 1822 .gpl_only = false, 1823 .ret_type = RET_INTEGER, 1824 .arg1_type = ARG_PTR_TO_CTX, 1825 .arg2_type = ARG_ANYTHING, 1826 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 1827 .arg4_type = ARG_CONST_SIZE, 1828 .arg5_type = ARG_ANYTHING, 1829 }; 1830 1831 BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len) 1832 { 1833 /* Idea is the following: should the needed direct read/write 1834 * test fail during runtime, we can pull in more data and redo 1835 * again, since implicitly, we invalidate previous checks here. 1836 * 1837 * Or, since we know how much we need to make read/writeable, 1838 * this can be done once at the program beginning for direct 1839 * access case. By this we overcome limitations of only current 1840 * headroom being accessible. 1841 */ 1842 return bpf_try_make_writable(skb, len ? : skb_headlen(skb)); 1843 } 1844 1845 static const struct bpf_func_proto bpf_skb_pull_data_proto = { 1846 .func = bpf_skb_pull_data, 1847 .gpl_only = false, 1848 .ret_type = RET_INTEGER, 1849 .arg1_type = ARG_PTR_TO_CTX, 1850 .arg2_type = ARG_ANYTHING, 1851 }; 1852 1853 BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk) 1854 { 1855 return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL; 1856 } 1857 1858 static const struct bpf_func_proto bpf_sk_fullsock_proto = { 1859 .func = bpf_sk_fullsock, 1860 .gpl_only = false, 1861 .ret_type = RET_PTR_TO_SOCKET_OR_NULL, 1862 .arg1_type = ARG_PTR_TO_SOCK_COMMON, 1863 }; 1864 1865 static inline int sk_skb_try_make_writable(struct sk_buff *skb, 1866 unsigned int write_len) 1867 { 1868 return __bpf_try_make_writable(skb, write_len); 1869 } 1870 1871 BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len) 1872 { 1873 /* Idea is the following: should the needed direct read/write 1874 * test fail during runtime, we can pull in more data and redo 1875 * again, since implicitly, we invalidate previous checks here. 1876 * 1877 * Or, since we know how much we need to make read/writeable, 1878 * this can be done once at the program beginning for direct 1879 * access case. By this we overcome limitations of only current 1880 * headroom being accessible. 1881 */ 1882 return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb)); 1883 } 1884 1885 static const struct bpf_func_proto sk_skb_pull_data_proto = { 1886 .func = sk_skb_pull_data, 1887 .gpl_only = false, 1888 .ret_type = RET_INTEGER, 1889 .arg1_type = ARG_PTR_TO_CTX, 1890 .arg2_type = ARG_ANYTHING, 1891 }; 1892 1893 BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset, 1894 u64, from, u64, to, u64, flags) 1895 { 1896 __sum16 *ptr; 1897 1898 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK))) 1899 return -EINVAL; 1900 if (unlikely(offset > 0xffff || offset & 1)) 1901 return -EFAULT; 1902 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) 1903 return -EFAULT; 1904 1905 ptr = (__sum16 *)(skb->data + offset); 1906 switch (flags & BPF_F_HDR_FIELD_MASK) { 1907 case 0: 1908 if (unlikely(from != 0)) 1909 return -EINVAL; 1910 1911 csum_replace_by_diff(ptr, to); 1912 break; 1913 case 2: 1914 csum_replace2(ptr, from, to); 1915 break; 1916 case 4: 1917 csum_replace4(ptr, from, to); 1918 break; 1919 default: 1920 return -EINVAL; 1921 } 1922 1923 return 0; 1924 } 1925 1926 static const struct bpf_func_proto bpf_l3_csum_replace_proto = { 1927 .func = bpf_l3_csum_replace, 1928 .gpl_only = false, 1929 .ret_type = RET_INTEGER, 1930 .arg1_type = ARG_PTR_TO_CTX, 1931 .arg2_type = ARG_ANYTHING, 1932 .arg3_type = ARG_ANYTHING, 1933 .arg4_type = ARG_ANYTHING, 1934 .arg5_type = ARG_ANYTHING, 1935 }; 1936 1937 BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset, 1938 u64, from, u64, to, u64, flags) 1939 { 1940 bool is_pseudo = flags & BPF_F_PSEUDO_HDR; 1941 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0; 1942 bool do_mforce = flags & BPF_F_MARK_ENFORCE; 1943 __sum16 *ptr; 1944 1945 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE | 1946 BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK))) 1947 return -EINVAL; 1948 if (unlikely(offset > 0xffff || offset & 1)) 1949 return -EFAULT; 1950 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) 1951 return -EFAULT; 1952 1953 ptr = (__sum16 *)(skb->data + offset); 1954 if (is_mmzero && !do_mforce && !*ptr) 1955 return 0; 1956 1957 switch (flags & BPF_F_HDR_FIELD_MASK) { 1958 case 0: 1959 if (unlikely(from != 0)) 1960 return -EINVAL; 1961 1962 inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo); 1963 break; 1964 case 2: 1965 inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo); 1966 break; 1967 case 4: 1968 inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo); 1969 break; 1970 default: 1971 return -EINVAL; 1972 } 1973 1974 if (is_mmzero && !*ptr) 1975 *ptr = CSUM_MANGLED_0; 1976 return 0; 1977 } 1978 1979 static const struct bpf_func_proto bpf_l4_csum_replace_proto = { 1980 .func = bpf_l4_csum_replace, 1981 .gpl_only = false, 1982 .ret_type = RET_INTEGER, 1983 .arg1_type = ARG_PTR_TO_CTX, 1984 .arg2_type = ARG_ANYTHING, 1985 .arg3_type = ARG_ANYTHING, 1986 .arg4_type = ARG_ANYTHING, 1987 .arg5_type = ARG_ANYTHING, 1988 }; 1989 1990 BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size, 1991 __be32 *, to, u32, to_size, __wsum, seed) 1992 { 1993 struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp); 1994 u32 diff_size = from_size + to_size; 1995 int i, j = 0; 1996 1997 /* This is quite flexible, some examples: 1998 * 1999 * from_size == 0, to_size > 0, seed := csum --> pushing data 2000 * from_size > 0, to_size == 0, seed := csum --> pulling data 2001 * from_size > 0, to_size > 0, seed := 0 --> diffing data 2002 * 2003 * Even for diffing, from_size and to_size don't need to be equal. 2004 */ 2005 if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) || 2006 diff_size > sizeof(sp->diff))) 2007 return -EINVAL; 2008 2009 for (i = 0; i < from_size / sizeof(__be32); i++, j++) 2010 sp->diff[j] = ~from[i]; 2011 for (i = 0; i < to_size / sizeof(__be32); i++, j++) 2012 sp->diff[j] = to[i]; 2013 2014 return csum_partial(sp->diff, diff_size, seed); 2015 } 2016 2017 static const struct bpf_func_proto bpf_csum_diff_proto = { 2018 .func = bpf_csum_diff, 2019 .gpl_only = false, 2020 .pkt_access = true, 2021 .ret_type = RET_INTEGER, 2022 .arg1_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 2023 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 2024 .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 2025 .arg4_type = ARG_CONST_SIZE_OR_ZERO, 2026 .arg5_type = ARG_ANYTHING, 2027 }; 2028 2029 BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum) 2030 { 2031 /* The interface is to be used in combination with bpf_csum_diff() 2032 * for direct packet writes. csum rotation for alignment as well 2033 * as emulating csum_sub() can be done from the eBPF program. 2034 */ 2035 if (skb->ip_summed == CHECKSUM_COMPLETE) 2036 return (skb->csum = csum_add(skb->csum, csum)); 2037 2038 return -ENOTSUPP; 2039 } 2040 2041 static const struct bpf_func_proto bpf_csum_update_proto = { 2042 .func = bpf_csum_update, 2043 .gpl_only = false, 2044 .ret_type = RET_INTEGER, 2045 .arg1_type = ARG_PTR_TO_CTX, 2046 .arg2_type = ARG_ANYTHING, 2047 }; 2048 2049 BPF_CALL_2(bpf_csum_level, struct sk_buff *, skb, u64, level) 2050 { 2051 /* The interface is to be used in combination with bpf_skb_adjust_room() 2052 * for encap/decap of packet headers when BPF_F_ADJ_ROOM_NO_CSUM_RESET 2053 * is passed as flags, for example. 2054 */ 2055 switch (level) { 2056 case BPF_CSUM_LEVEL_INC: 2057 __skb_incr_checksum_unnecessary(skb); 2058 break; 2059 case BPF_CSUM_LEVEL_DEC: 2060 __skb_decr_checksum_unnecessary(skb); 2061 break; 2062 case BPF_CSUM_LEVEL_RESET: 2063 __skb_reset_checksum_unnecessary(skb); 2064 break; 2065 case BPF_CSUM_LEVEL_QUERY: 2066 return skb->ip_summed == CHECKSUM_UNNECESSARY ? 2067 skb->csum_level : -EACCES; 2068 default: 2069 return -EINVAL; 2070 } 2071 2072 return 0; 2073 } 2074 2075 static const struct bpf_func_proto bpf_csum_level_proto = { 2076 .func = bpf_csum_level, 2077 .gpl_only = false, 2078 .ret_type = RET_INTEGER, 2079 .arg1_type = ARG_PTR_TO_CTX, 2080 .arg2_type = ARG_ANYTHING, 2081 }; 2082 2083 static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) 2084 { 2085 return dev_forward_skb_nomtu(dev, skb); 2086 } 2087 2088 static inline int __bpf_rx_skb_no_mac(struct net_device *dev, 2089 struct sk_buff *skb) 2090 { 2091 int ret = ____dev_forward_skb(dev, skb, false); 2092 2093 if (likely(!ret)) { 2094 skb->dev = dev; 2095 ret = netif_rx(skb); 2096 } 2097 2098 return ret; 2099 } 2100 2101 static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) 2102 { 2103 int ret; 2104 2105 if (dev_xmit_recursion()) { 2106 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); 2107 kfree_skb(skb); 2108 return -ENETDOWN; 2109 } 2110 2111 skb->dev = dev; 2112 skb_clear_tstamp(skb); 2113 2114 dev_xmit_recursion_inc(); 2115 ret = dev_queue_xmit(skb); 2116 dev_xmit_recursion_dec(); 2117 2118 return ret; 2119 } 2120 2121 static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, 2122 u32 flags) 2123 { 2124 unsigned int mlen = skb_network_offset(skb); 2125 2126 if (unlikely(skb->len <= mlen)) { 2127 kfree_skb(skb); 2128 return -ERANGE; 2129 } 2130 2131 if (mlen) { 2132 __skb_pull(skb, mlen); 2133 if (unlikely(!skb->len)) { 2134 kfree_skb(skb); 2135 return -ERANGE; 2136 } 2137 2138 /* At ingress, the mac header has already been pulled once. 2139 * At egress, skb_pospull_rcsum has to be done in case that 2140 * the skb is originated from ingress (i.e. a forwarded skb) 2141 * to ensure that rcsum starts at net header. 2142 */ 2143 if (!skb_at_tc_ingress(skb)) 2144 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); 2145 } 2146 skb_pop_mac_header(skb); 2147 skb_reset_mac_len(skb); 2148 return flags & BPF_F_INGRESS ? 2149 __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb); 2150 } 2151 2152 static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev, 2153 u32 flags) 2154 { 2155 /* Verify that a link layer header is carried */ 2156 if (unlikely(skb->mac_header >= skb->network_header || skb->len == 0)) { 2157 kfree_skb(skb); 2158 return -ERANGE; 2159 } 2160 2161 bpf_push_mac_rcsum(skb); 2162 return flags & BPF_F_INGRESS ? 2163 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); 2164 } 2165 2166 static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev, 2167 u32 flags) 2168 { 2169 if (dev_is_mac_header_xmit(dev)) 2170 return __bpf_redirect_common(skb, dev, flags); 2171 else 2172 return __bpf_redirect_no_mac(skb, dev, flags); 2173 } 2174 2175 #if IS_ENABLED(CONFIG_IPV6) 2176 static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb, 2177 struct net_device *dev, struct bpf_nh_params *nh) 2178 { 2179 u32 hh_len = LL_RESERVED_SPACE(dev); 2180 const struct in6_addr *nexthop; 2181 struct dst_entry *dst = NULL; 2182 struct neighbour *neigh; 2183 2184 if (dev_xmit_recursion()) { 2185 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); 2186 goto out_drop; 2187 } 2188 2189 skb->dev = dev; 2190 skb_clear_tstamp(skb); 2191 2192 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { 2193 skb = skb_expand_head(skb, hh_len); 2194 if (!skb) 2195 return -ENOMEM; 2196 } 2197 2198 rcu_read_lock_bh(); 2199 if (!nh) { 2200 dst = skb_dst(skb); 2201 nexthop = rt6_nexthop(container_of(dst, struct rt6_info, dst), 2202 &ipv6_hdr(skb)->daddr); 2203 } else { 2204 nexthop = &nh->ipv6_nh; 2205 } 2206 neigh = ip_neigh_gw6(dev, nexthop); 2207 if (likely(!IS_ERR(neigh))) { 2208 int ret; 2209 2210 sock_confirm_neigh(skb, neigh); 2211 dev_xmit_recursion_inc(); 2212 ret = neigh_output(neigh, skb, false); 2213 dev_xmit_recursion_dec(); 2214 rcu_read_unlock_bh(); 2215 return ret; 2216 } 2217 rcu_read_unlock_bh(); 2218 if (dst) 2219 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 2220 out_drop: 2221 kfree_skb(skb); 2222 return -ENETDOWN; 2223 } 2224 2225 static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev, 2226 struct bpf_nh_params *nh) 2227 { 2228 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 2229 struct net *net = dev_net(dev); 2230 int err, ret = NET_XMIT_DROP; 2231 2232 if (!nh) { 2233 struct dst_entry *dst; 2234 struct flowi6 fl6 = { 2235 .flowi6_flags = FLOWI_FLAG_ANYSRC, 2236 .flowi6_mark = skb->mark, 2237 .flowlabel = ip6_flowinfo(ip6h), 2238 .flowi6_oif = dev->ifindex, 2239 .flowi6_proto = ip6h->nexthdr, 2240 .daddr = ip6h->daddr, 2241 .saddr = ip6h->saddr, 2242 }; 2243 2244 dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL); 2245 if (IS_ERR(dst)) 2246 goto out_drop; 2247 2248 skb_dst_set(skb, dst); 2249 } else if (nh->nh_family != AF_INET6) { 2250 goto out_drop; 2251 } 2252 2253 err = bpf_out_neigh_v6(net, skb, dev, nh); 2254 if (unlikely(net_xmit_eval(err))) 2255 dev->stats.tx_errors++; 2256 else 2257 ret = NET_XMIT_SUCCESS; 2258 goto out_xmit; 2259 out_drop: 2260 dev->stats.tx_errors++; 2261 kfree_skb(skb); 2262 out_xmit: 2263 return ret; 2264 } 2265 #else 2266 static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev, 2267 struct bpf_nh_params *nh) 2268 { 2269 kfree_skb(skb); 2270 return NET_XMIT_DROP; 2271 } 2272 #endif /* CONFIG_IPV6 */ 2273 2274 #if IS_ENABLED(CONFIG_INET) 2275 static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb, 2276 struct net_device *dev, struct bpf_nh_params *nh) 2277 { 2278 u32 hh_len = LL_RESERVED_SPACE(dev); 2279 struct neighbour *neigh; 2280 bool is_v6gw = false; 2281 2282 if (dev_xmit_recursion()) { 2283 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); 2284 goto out_drop; 2285 } 2286 2287 skb->dev = dev; 2288 skb_clear_tstamp(skb); 2289 2290 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { 2291 skb = skb_expand_head(skb, hh_len); 2292 if (!skb) 2293 return -ENOMEM; 2294 } 2295 2296 rcu_read_lock_bh(); 2297 if (!nh) { 2298 struct dst_entry *dst = skb_dst(skb); 2299 struct rtable *rt = container_of(dst, struct rtable, dst); 2300 2301 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw); 2302 } else if (nh->nh_family == AF_INET6) { 2303 neigh = ip_neigh_gw6(dev, &nh->ipv6_nh); 2304 is_v6gw = true; 2305 } else if (nh->nh_family == AF_INET) { 2306 neigh = ip_neigh_gw4(dev, nh->ipv4_nh); 2307 } else { 2308 rcu_read_unlock_bh(); 2309 goto out_drop; 2310 } 2311 2312 if (likely(!IS_ERR(neigh))) { 2313 int ret; 2314 2315 sock_confirm_neigh(skb, neigh); 2316 dev_xmit_recursion_inc(); 2317 ret = neigh_output(neigh, skb, is_v6gw); 2318 dev_xmit_recursion_dec(); 2319 rcu_read_unlock_bh(); 2320 return ret; 2321 } 2322 rcu_read_unlock_bh(); 2323 out_drop: 2324 kfree_skb(skb); 2325 return -ENETDOWN; 2326 } 2327 2328 static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev, 2329 struct bpf_nh_params *nh) 2330 { 2331 const struct iphdr *ip4h = ip_hdr(skb); 2332 struct net *net = dev_net(dev); 2333 int err, ret = NET_XMIT_DROP; 2334 2335 if (!nh) { 2336 struct flowi4 fl4 = { 2337 .flowi4_flags = FLOWI_FLAG_ANYSRC, 2338 .flowi4_mark = skb->mark, 2339 .flowi4_tos = RT_TOS(ip4h->tos), 2340 .flowi4_oif = dev->ifindex, 2341 .flowi4_proto = ip4h->protocol, 2342 .daddr = ip4h->daddr, 2343 .saddr = ip4h->saddr, 2344 }; 2345 struct rtable *rt; 2346 2347 rt = ip_route_output_flow(net, &fl4, NULL); 2348 if (IS_ERR(rt)) 2349 goto out_drop; 2350 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { 2351 ip_rt_put(rt); 2352 goto out_drop; 2353 } 2354 2355 skb_dst_set(skb, &rt->dst); 2356 } 2357 2358 err = bpf_out_neigh_v4(net, skb, dev, nh); 2359 if (unlikely(net_xmit_eval(err))) 2360 dev->stats.tx_errors++; 2361 else 2362 ret = NET_XMIT_SUCCESS; 2363 goto out_xmit; 2364 out_drop: 2365 dev->stats.tx_errors++; 2366 kfree_skb(skb); 2367 out_xmit: 2368 return ret; 2369 } 2370 #else 2371 static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev, 2372 struct bpf_nh_params *nh) 2373 { 2374 kfree_skb(skb); 2375 return NET_XMIT_DROP; 2376 } 2377 #endif /* CONFIG_INET */ 2378 2379 static int __bpf_redirect_neigh(struct sk_buff *skb, struct net_device *dev, 2380 struct bpf_nh_params *nh) 2381 { 2382 struct ethhdr *ethh = eth_hdr(skb); 2383 2384 if (unlikely(skb->mac_header >= skb->network_header)) 2385 goto out; 2386 bpf_push_mac_rcsum(skb); 2387 if (is_multicast_ether_addr(ethh->h_dest)) 2388 goto out; 2389 2390 skb_pull(skb, sizeof(*ethh)); 2391 skb_unset_mac_header(skb); 2392 skb_reset_network_header(skb); 2393 2394 if (skb->protocol == htons(ETH_P_IP)) 2395 return __bpf_redirect_neigh_v4(skb, dev, nh); 2396 else if (skb->protocol == htons(ETH_P_IPV6)) 2397 return __bpf_redirect_neigh_v6(skb, dev, nh); 2398 out: 2399 kfree_skb(skb); 2400 return -ENOTSUPP; 2401 } 2402 2403 /* Internal, non-exposed redirect flags. */ 2404 enum { 2405 BPF_F_NEIGH = (1ULL << 1), 2406 BPF_F_PEER = (1ULL << 2), 2407 BPF_F_NEXTHOP = (1ULL << 3), 2408 #define BPF_F_REDIRECT_INTERNAL (BPF_F_NEIGH | BPF_F_PEER | BPF_F_NEXTHOP) 2409 }; 2410 2411 BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) 2412 { 2413 struct net_device *dev; 2414 struct sk_buff *clone; 2415 int ret; 2416 2417 if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL))) 2418 return -EINVAL; 2419 2420 dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex); 2421 if (unlikely(!dev)) 2422 return -EINVAL; 2423 2424 clone = skb_clone(skb, GFP_ATOMIC); 2425 if (unlikely(!clone)) 2426 return -ENOMEM; 2427 2428 /* For direct write, we need to keep the invariant that the skbs 2429 * we're dealing with need to be uncloned. Should uncloning fail 2430 * here, we need to free the just generated clone to unclone once 2431 * again. 2432 */ 2433 ret = bpf_try_make_head_writable(skb); 2434 if (unlikely(ret)) { 2435 kfree_skb(clone); 2436 return -ENOMEM; 2437 } 2438 2439 return __bpf_redirect(clone, dev, flags); 2440 } 2441 2442 static const struct bpf_func_proto bpf_clone_redirect_proto = { 2443 .func = bpf_clone_redirect, 2444 .gpl_only = false, 2445 .ret_type = RET_INTEGER, 2446 .arg1_type = ARG_PTR_TO_CTX, 2447 .arg2_type = ARG_ANYTHING, 2448 .arg3_type = ARG_ANYTHING, 2449 }; 2450 2451 DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); 2452 EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info); 2453 2454 int skb_do_redirect(struct sk_buff *skb) 2455 { 2456 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 2457 struct net *net = dev_net(skb->dev); 2458 struct net_device *dev; 2459 u32 flags = ri->flags; 2460 2461 dev = dev_get_by_index_rcu(net, ri->tgt_index); 2462 ri->tgt_index = 0; 2463 ri->flags = 0; 2464 if (unlikely(!dev)) 2465 goto out_drop; 2466 if (flags & BPF_F_PEER) { 2467 const struct net_device_ops *ops = dev->netdev_ops; 2468 2469 if (unlikely(!ops->ndo_get_peer_dev || 2470 !skb_at_tc_ingress(skb))) 2471 goto out_drop; 2472 dev = ops->ndo_get_peer_dev(dev); 2473 if (unlikely(!dev || 2474 !(dev->flags & IFF_UP) || 2475 net_eq(net, dev_net(dev)))) 2476 goto out_drop; 2477 skb->dev = dev; 2478 return -EAGAIN; 2479 } 2480 return flags & BPF_F_NEIGH ? 2481 __bpf_redirect_neigh(skb, dev, flags & BPF_F_NEXTHOP ? 2482 &ri->nh : NULL) : 2483 __bpf_redirect(skb, dev, flags); 2484 out_drop: 2485 kfree_skb(skb); 2486 return -EINVAL; 2487 } 2488 2489 BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags) 2490 { 2491 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 2492 2493 if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL))) 2494 return TC_ACT_SHOT; 2495 2496 ri->flags = flags; 2497 ri->tgt_index = ifindex; 2498 2499 return TC_ACT_REDIRECT; 2500 } 2501 2502 static const struct bpf_func_proto bpf_redirect_proto = { 2503 .func = bpf_redirect, 2504 .gpl_only = false, 2505 .ret_type = RET_INTEGER, 2506 .arg1_type = ARG_ANYTHING, 2507 .arg2_type = ARG_ANYTHING, 2508 }; 2509 2510 BPF_CALL_2(bpf_redirect_peer, u32, ifindex, u64, flags) 2511 { 2512 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 2513 2514 if (unlikely(flags)) 2515 return TC_ACT_SHOT; 2516 2517 ri->flags = BPF_F_PEER; 2518 ri->tgt_index = ifindex; 2519 2520 return TC_ACT_REDIRECT; 2521 } 2522 2523 static const struct bpf_func_proto bpf_redirect_peer_proto = { 2524 .func = bpf_redirect_peer, 2525 .gpl_only = false, 2526 .ret_type = RET_INTEGER, 2527 .arg1_type = ARG_ANYTHING, 2528 .arg2_type = ARG_ANYTHING, 2529 }; 2530 2531 BPF_CALL_4(bpf_redirect_neigh, u32, ifindex, struct bpf_redir_neigh *, params, 2532 int, plen, u64, flags) 2533 { 2534 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 2535 2536 if (unlikely((plen && plen < sizeof(*params)) || flags)) 2537 return TC_ACT_SHOT; 2538 2539 ri->flags = BPF_F_NEIGH | (plen ? BPF_F_NEXTHOP : 0); 2540 ri->tgt_index = ifindex; 2541 2542 BUILD_BUG_ON(sizeof(struct bpf_redir_neigh) != sizeof(struct bpf_nh_params)); 2543 if (plen) 2544 memcpy(&ri->nh, params, sizeof(ri->nh)); 2545 2546 return TC_ACT_REDIRECT; 2547 } 2548 2549 static const struct bpf_func_proto bpf_redirect_neigh_proto = { 2550 .func = bpf_redirect_neigh, 2551 .gpl_only = false, 2552 .ret_type = RET_INTEGER, 2553 .arg1_type = ARG_ANYTHING, 2554 .arg2_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 2555 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 2556 .arg4_type = ARG_ANYTHING, 2557 }; 2558 2559 BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes) 2560 { 2561 msg->apply_bytes = bytes; 2562 return 0; 2563 } 2564 2565 static const struct bpf_func_proto bpf_msg_apply_bytes_proto = { 2566 .func = bpf_msg_apply_bytes, 2567 .gpl_only = false, 2568 .ret_type = RET_INTEGER, 2569 .arg1_type = ARG_PTR_TO_CTX, 2570 .arg2_type = ARG_ANYTHING, 2571 }; 2572 2573 BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes) 2574 { 2575 msg->cork_bytes = bytes; 2576 return 0; 2577 } 2578 2579 static const struct bpf_func_proto bpf_msg_cork_bytes_proto = { 2580 .func = bpf_msg_cork_bytes, 2581 .gpl_only = false, 2582 .ret_type = RET_INTEGER, 2583 .arg1_type = ARG_PTR_TO_CTX, 2584 .arg2_type = ARG_ANYTHING, 2585 }; 2586 2587 BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start, 2588 u32, end, u64, flags) 2589 { 2590 u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start; 2591 u32 first_sge, last_sge, i, shift, bytes_sg_total; 2592 struct scatterlist *sge; 2593 u8 *raw, *to, *from; 2594 struct page *page; 2595 2596 if (unlikely(flags || end <= start)) 2597 return -EINVAL; 2598 2599 /* First find the starting scatterlist element */ 2600 i = msg->sg.start; 2601 do { 2602 offset += len; 2603 len = sk_msg_elem(msg, i)->length; 2604 if (start < offset + len) 2605 break; 2606 sk_msg_iter_var_next(i); 2607 } while (i != msg->sg.end); 2608 2609 if (unlikely(start >= offset + len)) 2610 return -EINVAL; 2611 2612 first_sge = i; 2613 /* The start may point into the sg element so we need to also 2614 * account for the headroom. 2615 */ 2616 bytes_sg_total = start - offset + bytes; 2617 if (!test_bit(i, msg->sg.copy) && bytes_sg_total <= len) 2618 goto out; 2619 2620 /* At this point we need to linearize multiple scatterlist 2621 * elements or a single shared page. Either way we need to 2622 * copy into a linear buffer exclusively owned by BPF. Then 2623 * place the buffer in the scatterlist and fixup the original 2624 * entries by removing the entries now in the linear buffer 2625 * and shifting the remaining entries. For now we do not try 2626 * to copy partial entries to avoid complexity of running out 2627 * of sg_entry slots. The downside is reading a single byte 2628 * will copy the entire sg entry. 2629 */ 2630 do { 2631 copy += sk_msg_elem(msg, i)->length; 2632 sk_msg_iter_var_next(i); 2633 if (bytes_sg_total <= copy) 2634 break; 2635 } while (i != msg->sg.end); 2636 last_sge = i; 2637 2638 if (unlikely(bytes_sg_total > copy)) 2639 return -EINVAL; 2640 2641 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP, 2642 get_order(copy)); 2643 if (unlikely(!page)) 2644 return -ENOMEM; 2645 2646 raw = page_address(page); 2647 i = first_sge; 2648 do { 2649 sge = sk_msg_elem(msg, i); 2650 from = sg_virt(sge); 2651 len = sge->length; 2652 to = raw + poffset; 2653 2654 memcpy(to, from, len); 2655 poffset += len; 2656 sge->length = 0; 2657 put_page(sg_page(sge)); 2658 2659 sk_msg_iter_var_next(i); 2660 } while (i != last_sge); 2661 2662 sg_set_page(&msg->sg.data[first_sge], page, copy, 0); 2663 2664 /* To repair sg ring we need to shift entries. If we only 2665 * had a single entry though we can just replace it and 2666 * be done. Otherwise walk the ring and shift the entries. 2667 */ 2668 WARN_ON_ONCE(last_sge == first_sge); 2669 shift = last_sge > first_sge ? 2670 last_sge - first_sge - 1 : 2671 NR_MSG_FRAG_IDS - first_sge + last_sge - 1; 2672 if (!shift) 2673 goto out; 2674 2675 i = first_sge; 2676 sk_msg_iter_var_next(i); 2677 do { 2678 u32 move_from; 2679 2680 if (i + shift >= NR_MSG_FRAG_IDS) 2681 move_from = i + shift - NR_MSG_FRAG_IDS; 2682 else 2683 move_from = i + shift; 2684 if (move_from == msg->sg.end) 2685 break; 2686 2687 msg->sg.data[i] = msg->sg.data[move_from]; 2688 msg->sg.data[move_from].length = 0; 2689 msg->sg.data[move_from].page_link = 0; 2690 msg->sg.data[move_from].offset = 0; 2691 sk_msg_iter_var_next(i); 2692 } while (1); 2693 2694 msg->sg.end = msg->sg.end - shift > msg->sg.end ? 2695 msg->sg.end - shift + NR_MSG_FRAG_IDS : 2696 msg->sg.end - shift; 2697 out: 2698 msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset; 2699 msg->data_end = msg->data + bytes; 2700 return 0; 2701 } 2702 2703 static const struct bpf_func_proto bpf_msg_pull_data_proto = { 2704 .func = bpf_msg_pull_data, 2705 .gpl_only = false, 2706 .ret_type = RET_INTEGER, 2707 .arg1_type = ARG_PTR_TO_CTX, 2708 .arg2_type = ARG_ANYTHING, 2709 .arg3_type = ARG_ANYTHING, 2710 .arg4_type = ARG_ANYTHING, 2711 }; 2712 2713 BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, 2714 u32, len, u64, flags) 2715 { 2716 struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge; 2717 u32 new, i = 0, l = 0, space, copy = 0, offset = 0; 2718 u8 *raw, *to, *from; 2719 struct page *page; 2720 2721 if (unlikely(flags)) 2722 return -EINVAL; 2723 2724 if (unlikely(len == 0)) 2725 return 0; 2726 2727 /* First find the starting scatterlist element */ 2728 i = msg->sg.start; 2729 do { 2730 offset += l; 2731 l = sk_msg_elem(msg, i)->length; 2732 2733 if (start < offset + l) 2734 break; 2735 sk_msg_iter_var_next(i); 2736 } while (i != msg->sg.end); 2737 2738 if (start >= offset + l) 2739 return -EINVAL; 2740 2741 space = MAX_MSG_FRAGS - sk_msg_elem_used(msg); 2742 2743 /* If no space available will fallback to copy, we need at 2744 * least one scatterlist elem available to push data into 2745 * when start aligns to the beginning of an element or two 2746 * when it falls inside an element. We handle the start equals 2747 * offset case because its the common case for inserting a 2748 * header. 2749 */ 2750 if (!space || (space == 1 && start != offset)) 2751 copy = msg->sg.data[i].length; 2752 2753 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP, 2754 get_order(copy + len)); 2755 if (unlikely(!page)) 2756 return -ENOMEM; 2757 2758 if (copy) { 2759 int front, back; 2760 2761 raw = page_address(page); 2762 2763 psge = sk_msg_elem(msg, i); 2764 front = start - offset; 2765 back = psge->length - front; 2766 from = sg_virt(psge); 2767 2768 if (front) 2769 memcpy(raw, from, front); 2770 2771 if (back) { 2772 from += front; 2773 to = raw + front + len; 2774 2775 memcpy(to, from, back); 2776 } 2777 2778 put_page(sg_page(psge)); 2779 } else if (start - offset) { 2780 psge = sk_msg_elem(msg, i); 2781 rsge = sk_msg_elem_cpy(msg, i); 2782 2783 psge->length = start - offset; 2784 rsge.length -= psge->length; 2785 rsge.offset += start; 2786 2787 sk_msg_iter_var_next(i); 2788 sg_unmark_end(psge); 2789 sg_unmark_end(&rsge); 2790 sk_msg_iter_next(msg, end); 2791 } 2792 2793 /* Slot(s) to place newly allocated data */ 2794 new = i; 2795 2796 /* Shift one or two slots as needed */ 2797 if (!copy) { 2798 sge = sk_msg_elem_cpy(msg, i); 2799 2800 sk_msg_iter_var_next(i); 2801 sg_unmark_end(&sge); 2802 sk_msg_iter_next(msg, end); 2803 2804 nsge = sk_msg_elem_cpy(msg, i); 2805 if (rsge.length) { 2806 sk_msg_iter_var_next(i); 2807 nnsge = sk_msg_elem_cpy(msg, i); 2808 } 2809 2810 while (i != msg->sg.end) { 2811 msg->sg.data[i] = sge; 2812 sge = nsge; 2813 sk_msg_iter_var_next(i); 2814 if (rsge.length) { 2815 nsge = nnsge; 2816 nnsge = sk_msg_elem_cpy(msg, i); 2817 } else { 2818 nsge = sk_msg_elem_cpy(msg, i); 2819 } 2820 } 2821 } 2822 2823 /* Place newly allocated data buffer */ 2824 sk_mem_charge(msg->sk, len); 2825 msg->sg.size += len; 2826 __clear_bit(new, msg->sg.copy); 2827 sg_set_page(&msg->sg.data[new], page, len + copy, 0); 2828 if (rsge.length) { 2829 get_page(sg_page(&rsge)); 2830 sk_msg_iter_var_next(new); 2831 msg->sg.data[new] = rsge; 2832 } 2833 2834 sk_msg_compute_data_pointers(msg); 2835 return 0; 2836 } 2837 2838 static const struct bpf_func_proto bpf_msg_push_data_proto = { 2839 .func = bpf_msg_push_data, 2840 .gpl_only = false, 2841 .ret_type = RET_INTEGER, 2842 .arg1_type = ARG_PTR_TO_CTX, 2843 .arg2_type = ARG_ANYTHING, 2844 .arg3_type = ARG_ANYTHING, 2845 .arg4_type = ARG_ANYTHING, 2846 }; 2847 2848 static void sk_msg_shift_left(struct sk_msg *msg, int i) 2849 { 2850 int prev; 2851 2852 do { 2853 prev = i; 2854 sk_msg_iter_var_next(i); 2855 msg->sg.data[prev] = msg->sg.data[i]; 2856 } while (i != msg->sg.end); 2857 2858 sk_msg_iter_prev(msg, end); 2859 } 2860 2861 static void sk_msg_shift_right(struct sk_msg *msg, int i) 2862 { 2863 struct scatterlist tmp, sge; 2864 2865 sk_msg_iter_next(msg, end); 2866 sge = sk_msg_elem_cpy(msg, i); 2867 sk_msg_iter_var_next(i); 2868 tmp = sk_msg_elem_cpy(msg, i); 2869 2870 while (i != msg->sg.end) { 2871 msg->sg.data[i] = sge; 2872 sk_msg_iter_var_next(i); 2873 sge = tmp; 2874 tmp = sk_msg_elem_cpy(msg, i); 2875 } 2876 } 2877 2878 BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, 2879 u32, len, u64, flags) 2880 { 2881 u32 i = 0, l = 0, space, offset = 0; 2882 u64 last = start + len; 2883 int pop; 2884 2885 if (unlikely(flags)) 2886 return -EINVAL; 2887 2888 /* First find the starting scatterlist element */ 2889 i = msg->sg.start; 2890 do { 2891 offset += l; 2892 l = sk_msg_elem(msg, i)->length; 2893 2894 if (start < offset + l) 2895 break; 2896 sk_msg_iter_var_next(i); 2897 } while (i != msg->sg.end); 2898 2899 /* Bounds checks: start and pop must be inside message */ 2900 if (start >= offset + l || last >= msg->sg.size) 2901 return -EINVAL; 2902 2903 space = MAX_MSG_FRAGS - sk_msg_elem_used(msg); 2904 2905 pop = len; 2906 /* --------------| offset 2907 * -| start |-------- len -------| 2908 * 2909 * |----- a ----|-------- pop -------|----- b ----| 2910 * |______________________________________________| length 2911 * 2912 * 2913 * a: region at front of scatter element to save 2914 * b: region at back of scatter element to save when length > A + pop 2915 * pop: region to pop from element, same as input 'pop' here will be 2916 * decremented below per iteration. 2917 * 2918 * Two top-level cases to handle when start != offset, first B is non 2919 * zero and second B is zero corresponding to when a pop includes more 2920 * than one element. 2921 * 2922 * Then if B is non-zero AND there is no space allocate space and 2923 * compact A, B regions into page. If there is space shift ring to 2924 * the rigth free'ing the next element in ring to place B, leaving 2925 * A untouched except to reduce length. 2926 */ 2927 if (start != offset) { 2928 struct scatterlist *nsge, *sge = sk_msg_elem(msg, i); 2929 int a = start; 2930 int b = sge->length - pop - a; 2931 2932 sk_msg_iter_var_next(i); 2933 2934 if (pop < sge->length - a) { 2935 if (space) { 2936 sge->length = a; 2937 sk_msg_shift_right(msg, i); 2938 nsge = sk_msg_elem(msg, i); 2939 get_page(sg_page(sge)); 2940 sg_set_page(nsge, 2941 sg_page(sge), 2942 b, sge->offset + pop + a); 2943 } else { 2944 struct page *page, *orig; 2945 u8 *to, *from; 2946 2947 page = alloc_pages(__GFP_NOWARN | 2948 __GFP_COMP | GFP_ATOMIC, 2949 get_order(a + b)); 2950 if (unlikely(!page)) 2951 return -ENOMEM; 2952 2953 sge->length = a; 2954 orig = sg_page(sge); 2955 from = sg_virt(sge); 2956 to = page_address(page); 2957 memcpy(to, from, a); 2958 memcpy(to + a, from + a + pop, b); 2959 sg_set_page(sge, page, a + b, 0); 2960 put_page(orig); 2961 } 2962 pop = 0; 2963 } else if (pop >= sge->length - a) { 2964 pop -= (sge->length - a); 2965 sge->length = a; 2966 } 2967 } 2968 2969 /* From above the current layout _must_ be as follows, 2970 * 2971 * -| offset 2972 * -| start 2973 * 2974 * |---- pop ---|---------------- b ------------| 2975 * |____________________________________________| length 2976 * 2977 * Offset and start of the current msg elem are equal because in the 2978 * previous case we handled offset != start and either consumed the 2979 * entire element and advanced to the next element OR pop == 0. 2980 * 2981 * Two cases to handle here are first pop is less than the length 2982 * leaving some remainder b above. Simply adjust the element's layout 2983 * in this case. Or pop >= length of the element so that b = 0. In this 2984 * case advance to next element decrementing pop. 2985 */ 2986 while (pop) { 2987 struct scatterlist *sge = sk_msg_elem(msg, i); 2988 2989 if (pop < sge->length) { 2990 sge->length -= pop; 2991 sge->offset += pop; 2992 pop = 0; 2993 } else { 2994 pop -= sge->length; 2995 sk_msg_shift_left(msg, i); 2996 } 2997 sk_msg_iter_var_next(i); 2998 } 2999 3000 sk_mem_uncharge(msg->sk, len - pop); 3001 msg->sg.size -= (len - pop); 3002 sk_msg_compute_data_pointers(msg); 3003 return 0; 3004 } 3005 3006 static const struct bpf_func_proto bpf_msg_pop_data_proto = { 3007 .func = bpf_msg_pop_data, 3008 .gpl_only = false, 3009 .ret_type = RET_INTEGER, 3010 .arg1_type = ARG_PTR_TO_CTX, 3011 .arg2_type = ARG_ANYTHING, 3012 .arg3_type = ARG_ANYTHING, 3013 .arg4_type = ARG_ANYTHING, 3014 }; 3015 3016 #ifdef CONFIG_CGROUP_NET_CLASSID 3017 BPF_CALL_0(bpf_get_cgroup_classid_curr) 3018 { 3019 return __task_get_classid(current); 3020 } 3021 3022 static const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto = { 3023 .func = bpf_get_cgroup_classid_curr, 3024 .gpl_only = false, 3025 .ret_type = RET_INTEGER, 3026 }; 3027 3028 BPF_CALL_1(bpf_skb_cgroup_classid, const struct sk_buff *, skb) 3029 { 3030 struct sock *sk = skb_to_full_sk(skb); 3031 3032 if (!sk || !sk_fullsock(sk)) 3033 return 0; 3034 3035 return sock_cgroup_classid(&sk->sk_cgrp_data); 3036 } 3037 3038 static const struct bpf_func_proto bpf_skb_cgroup_classid_proto = { 3039 .func = bpf_skb_cgroup_classid, 3040 .gpl_only = false, 3041 .ret_type = RET_INTEGER, 3042 .arg1_type = ARG_PTR_TO_CTX, 3043 }; 3044 #endif 3045 3046 BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb) 3047 { 3048 return task_get_classid(skb); 3049 } 3050 3051 static const struct bpf_func_proto bpf_get_cgroup_classid_proto = { 3052 .func = bpf_get_cgroup_classid, 3053 .gpl_only = false, 3054 .ret_type = RET_INTEGER, 3055 .arg1_type = ARG_PTR_TO_CTX, 3056 }; 3057 3058 BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb) 3059 { 3060 return dst_tclassid(skb); 3061 } 3062 3063 static const struct bpf_func_proto bpf_get_route_realm_proto = { 3064 .func = bpf_get_route_realm, 3065 .gpl_only = false, 3066 .ret_type = RET_INTEGER, 3067 .arg1_type = ARG_PTR_TO_CTX, 3068 }; 3069 3070 BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb) 3071 { 3072 /* If skb_clear_hash() was called due to mangling, we can 3073 * trigger SW recalculation here. Later access to hash 3074 * can then use the inline skb->hash via context directly 3075 * instead of calling this helper again. 3076 */ 3077 return skb_get_hash(skb); 3078 } 3079 3080 static const struct bpf_func_proto bpf_get_hash_recalc_proto = { 3081 .func = bpf_get_hash_recalc, 3082 .gpl_only = false, 3083 .ret_type = RET_INTEGER, 3084 .arg1_type = ARG_PTR_TO_CTX, 3085 }; 3086 3087 BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb) 3088 { 3089 /* After all direct packet write, this can be used once for 3090 * triggering a lazy recalc on next skb_get_hash() invocation. 3091 */ 3092 skb_clear_hash(skb); 3093 return 0; 3094 } 3095 3096 static const struct bpf_func_proto bpf_set_hash_invalid_proto = { 3097 .func = bpf_set_hash_invalid, 3098 .gpl_only = false, 3099 .ret_type = RET_INTEGER, 3100 .arg1_type = ARG_PTR_TO_CTX, 3101 }; 3102 3103 BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash) 3104 { 3105 /* Set user specified hash as L4(+), so that it gets returned 3106 * on skb_get_hash() call unless BPF prog later on triggers a 3107 * skb_clear_hash(). 3108 */ 3109 __skb_set_sw_hash(skb, hash, true); 3110 return 0; 3111 } 3112 3113 static const struct bpf_func_proto bpf_set_hash_proto = { 3114 .func = bpf_set_hash, 3115 .gpl_only = false, 3116 .ret_type = RET_INTEGER, 3117 .arg1_type = ARG_PTR_TO_CTX, 3118 .arg2_type = ARG_ANYTHING, 3119 }; 3120 3121 BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto, 3122 u16, vlan_tci) 3123 { 3124 int ret; 3125 3126 if (unlikely(vlan_proto != htons(ETH_P_8021Q) && 3127 vlan_proto != htons(ETH_P_8021AD))) 3128 vlan_proto = htons(ETH_P_8021Q); 3129 3130 bpf_push_mac_rcsum(skb); 3131 ret = skb_vlan_push(skb, vlan_proto, vlan_tci); 3132 bpf_pull_mac_rcsum(skb); 3133 3134 bpf_compute_data_pointers(skb); 3135 return ret; 3136 } 3137 3138 static const struct bpf_func_proto bpf_skb_vlan_push_proto = { 3139 .func = bpf_skb_vlan_push, 3140 .gpl_only = false, 3141 .ret_type = RET_INTEGER, 3142 .arg1_type = ARG_PTR_TO_CTX, 3143 .arg2_type = ARG_ANYTHING, 3144 .arg3_type = ARG_ANYTHING, 3145 }; 3146 3147 BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb) 3148 { 3149 int ret; 3150 3151 bpf_push_mac_rcsum(skb); 3152 ret = skb_vlan_pop(skb); 3153 bpf_pull_mac_rcsum(skb); 3154 3155 bpf_compute_data_pointers(skb); 3156 return ret; 3157 } 3158 3159 static const struct bpf_func_proto bpf_skb_vlan_pop_proto = { 3160 .func = bpf_skb_vlan_pop, 3161 .gpl_only = false, 3162 .ret_type = RET_INTEGER, 3163 .arg1_type = ARG_PTR_TO_CTX, 3164 }; 3165 3166 static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len) 3167 { 3168 /* Caller already did skb_cow() with len as headroom, 3169 * so no need to do it here. 3170 */ 3171 skb_push(skb, len); 3172 memmove(skb->data, skb->data + len, off); 3173 memset(skb->data + off, 0, len); 3174 3175 /* No skb_postpush_rcsum(skb, skb->data + off, len) 3176 * needed here as it does not change the skb->csum 3177 * result for checksum complete when summing over 3178 * zeroed blocks. 3179 */ 3180 return 0; 3181 } 3182 3183 static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len) 3184 { 3185 void *old_data; 3186 3187 /* skb_ensure_writable() is not needed here, as we're 3188 * already working on an uncloned skb. 3189 */ 3190 if (unlikely(!pskb_may_pull(skb, off + len))) 3191 return -ENOMEM; 3192 3193 old_data = skb->data; 3194 __skb_pull(skb, len); 3195 skb_postpull_rcsum(skb, old_data + off, len); 3196 memmove(skb->data, old_data, off); 3197 3198 return 0; 3199 } 3200 3201 static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len) 3202 { 3203 bool trans_same = skb->transport_header == skb->network_header; 3204 int ret; 3205 3206 /* There's no need for __skb_push()/__skb_pull() pair to 3207 * get to the start of the mac header as we're guaranteed 3208 * to always start from here under eBPF. 3209 */ 3210 ret = bpf_skb_generic_push(skb, off, len); 3211 if (likely(!ret)) { 3212 skb->mac_header -= len; 3213 skb->network_header -= len; 3214 if (trans_same) 3215 skb->transport_header = skb->network_header; 3216 } 3217 3218 return ret; 3219 } 3220 3221 static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len) 3222 { 3223 bool trans_same = skb->transport_header == skb->network_header; 3224 int ret; 3225 3226 /* Same here, __skb_push()/__skb_pull() pair not needed. */ 3227 ret = bpf_skb_generic_pop(skb, off, len); 3228 if (likely(!ret)) { 3229 skb->mac_header += len; 3230 skb->network_header += len; 3231 if (trans_same) 3232 skb->transport_header = skb->network_header; 3233 } 3234 3235 return ret; 3236 } 3237 3238 static int bpf_skb_proto_4_to_6(struct sk_buff *skb) 3239 { 3240 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); 3241 u32 off = skb_mac_header_len(skb); 3242 int ret; 3243 3244 ret = skb_cow(skb, len_diff); 3245 if (unlikely(ret < 0)) 3246 return ret; 3247 3248 ret = bpf_skb_net_hdr_push(skb, off, len_diff); 3249 if (unlikely(ret < 0)) 3250 return ret; 3251 3252 if (skb_is_gso(skb)) { 3253 struct skb_shared_info *shinfo = skb_shinfo(skb); 3254 3255 /* SKB_GSO_TCPV4 needs to be changed into SKB_GSO_TCPV6. */ 3256 if (shinfo->gso_type & SKB_GSO_TCPV4) { 3257 shinfo->gso_type &= ~SKB_GSO_TCPV4; 3258 shinfo->gso_type |= SKB_GSO_TCPV6; 3259 } 3260 } 3261 3262 skb->protocol = htons(ETH_P_IPV6); 3263 skb_clear_hash(skb); 3264 3265 return 0; 3266 } 3267 3268 static int bpf_skb_proto_6_to_4(struct sk_buff *skb) 3269 { 3270 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); 3271 u32 off = skb_mac_header_len(skb); 3272 int ret; 3273 3274 ret = skb_unclone(skb, GFP_ATOMIC); 3275 if (unlikely(ret < 0)) 3276 return ret; 3277 3278 ret = bpf_skb_net_hdr_pop(skb, off, len_diff); 3279 if (unlikely(ret < 0)) 3280 return ret; 3281 3282 if (skb_is_gso(skb)) { 3283 struct skb_shared_info *shinfo = skb_shinfo(skb); 3284 3285 /* SKB_GSO_TCPV6 needs to be changed into SKB_GSO_TCPV4. */ 3286 if (shinfo->gso_type & SKB_GSO_TCPV6) { 3287 shinfo->gso_type &= ~SKB_GSO_TCPV6; 3288 shinfo->gso_type |= SKB_GSO_TCPV4; 3289 } 3290 } 3291 3292 skb->protocol = htons(ETH_P_IP); 3293 skb_clear_hash(skb); 3294 3295 return 0; 3296 } 3297 3298 static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto) 3299 { 3300 __be16 from_proto = skb->protocol; 3301 3302 if (from_proto == htons(ETH_P_IP) && 3303 to_proto == htons(ETH_P_IPV6)) 3304 return bpf_skb_proto_4_to_6(skb); 3305 3306 if (from_proto == htons(ETH_P_IPV6) && 3307 to_proto == htons(ETH_P_IP)) 3308 return bpf_skb_proto_6_to_4(skb); 3309 3310 return -ENOTSUPP; 3311 } 3312 3313 BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto, 3314 u64, flags) 3315 { 3316 int ret; 3317 3318 if (unlikely(flags)) 3319 return -EINVAL; 3320 3321 /* General idea is that this helper does the basic groundwork 3322 * needed for changing the protocol, and eBPF program fills the 3323 * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace() 3324 * and other helpers, rather than passing a raw buffer here. 3325 * 3326 * The rationale is to keep this minimal and without a need to 3327 * deal with raw packet data. F.e. even if we would pass buffers 3328 * here, the program still needs to call the bpf_lX_csum_replace() 3329 * helpers anyway. Plus, this way we keep also separation of 3330 * concerns, since f.e. bpf_skb_store_bytes() should only take 3331 * care of stores. 3332 * 3333 * Currently, additional options and extension header space are 3334 * not supported, but flags register is reserved so we can adapt 3335 * that. For offloads, we mark packet as dodgy, so that headers 3336 * need to be verified first. 3337 */ 3338 ret = bpf_skb_proto_xlat(skb, proto); 3339 bpf_compute_data_pointers(skb); 3340 return ret; 3341 } 3342 3343 static const struct bpf_func_proto bpf_skb_change_proto_proto = { 3344 .func = bpf_skb_change_proto, 3345 .gpl_only = false, 3346 .ret_type = RET_INTEGER, 3347 .arg1_type = ARG_PTR_TO_CTX, 3348 .arg2_type = ARG_ANYTHING, 3349 .arg3_type = ARG_ANYTHING, 3350 }; 3351 3352 BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type) 3353 { 3354 /* We only allow a restricted subset to be changed for now. */ 3355 if (unlikely(!skb_pkt_type_ok(skb->pkt_type) || 3356 !skb_pkt_type_ok(pkt_type))) 3357 return -EINVAL; 3358 3359 skb->pkt_type = pkt_type; 3360 return 0; 3361 } 3362 3363 static const struct bpf_func_proto bpf_skb_change_type_proto = { 3364 .func = bpf_skb_change_type, 3365 .gpl_only = false, 3366 .ret_type = RET_INTEGER, 3367 .arg1_type = ARG_PTR_TO_CTX, 3368 .arg2_type = ARG_ANYTHING, 3369 }; 3370 3371 static u32 bpf_skb_net_base_len(const struct sk_buff *skb) 3372 { 3373 switch (skb->protocol) { 3374 case htons(ETH_P_IP): 3375 return sizeof(struct iphdr); 3376 case htons(ETH_P_IPV6): 3377 return sizeof(struct ipv6hdr); 3378 default: 3379 return ~0U; 3380 } 3381 } 3382 3383 #define BPF_F_ADJ_ROOM_ENCAP_L3_MASK (BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 | \ 3384 BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) 3385 3386 #define BPF_F_ADJ_ROOM_MASK (BPF_F_ADJ_ROOM_FIXED_GSO | \ 3387 BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \ 3388 BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \ 3389 BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \ 3390 BPF_F_ADJ_ROOM_ENCAP_L2_ETH | \ 3391 BPF_F_ADJ_ROOM_ENCAP_L2( \ 3392 BPF_ADJ_ROOM_ENCAP_L2_MASK)) 3393 3394 static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff, 3395 u64 flags) 3396 { 3397 u8 inner_mac_len = flags >> BPF_ADJ_ROOM_ENCAP_L2_SHIFT; 3398 bool encap = flags & BPF_F_ADJ_ROOM_ENCAP_L3_MASK; 3399 u16 mac_len = 0, inner_net = 0, inner_trans = 0; 3400 unsigned int gso_type = SKB_GSO_DODGY; 3401 int ret; 3402 3403 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) { 3404 /* udp gso_size delineates datagrams, only allow if fixed */ 3405 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) || 3406 !(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) 3407 return -ENOTSUPP; 3408 } 3409 3410 ret = skb_cow_head(skb, len_diff); 3411 if (unlikely(ret < 0)) 3412 return ret; 3413 3414 if (encap) { 3415 if (skb->protocol != htons(ETH_P_IP) && 3416 skb->protocol != htons(ETH_P_IPV6)) 3417 return -ENOTSUPP; 3418 3419 if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 && 3420 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) 3421 return -EINVAL; 3422 3423 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE && 3424 flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) 3425 return -EINVAL; 3426 3427 if (flags & BPF_F_ADJ_ROOM_ENCAP_L2_ETH && 3428 inner_mac_len < ETH_HLEN) 3429 return -EINVAL; 3430 3431 if (skb->encapsulation) 3432 return -EALREADY; 3433 3434 mac_len = skb->network_header - skb->mac_header; 3435 inner_net = skb->network_header; 3436 if (inner_mac_len > len_diff) 3437 return -EINVAL; 3438 inner_trans = skb->transport_header; 3439 } 3440 3441 ret = bpf_skb_net_hdr_push(skb, off, len_diff); 3442 if (unlikely(ret < 0)) 3443 return ret; 3444 3445 if (encap) { 3446 skb->inner_mac_header = inner_net - inner_mac_len; 3447 skb->inner_network_header = inner_net; 3448 skb->inner_transport_header = inner_trans; 3449 3450 if (flags & BPF_F_ADJ_ROOM_ENCAP_L2_ETH) 3451 skb_set_inner_protocol(skb, htons(ETH_P_TEB)); 3452 else 3453 skb_set_inner_protocol(skb, skb->protocol); 3454 3455 skb->encapsulation = 1; 3456 skb_set_network_header(skb, mac_len); 3457 3458 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) 3459 gso_type |= SKB_GSO_UDP_TUNNEL; 3460 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE) 3461 gso_type |= SKB_GSO_GRE; 3462 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) 3463 gso_type |= SKB_GSO_IPXIP6; 3464 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4) 3465 gso_type |= SKB_GSO_IPXIP4; 3466 3467 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE || 3468 flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) { 3469 int nh_len = flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 ? 3470 sizeof(struct ipv6hdr) : 3471 sizeof(struct iphdr); 3472 3473 skb_set_transport_header(skb, mac_len + nh_len); 3474 } 3475 3476 /* Match skb->protocol to new outer l3 protocol */ 3477 if (skb->protocol == htons(ETH_P_IP) && 3478 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) 3479 skb->protocol = htons(ETH_P_IPV6); 3480 else if (skb->protocol == htons(ETH_P_IPV6) && 3481 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4) 3482 skb->protocol = htons(ETH_P_IP); 3483 } 3484 3485 if (skb_is_gso(skb)) { 3486 struct skb_shared_info *shinfo = skb_shinfo(skb); 3487 3488 /* Due to header grow, MSS needs to be downgraded. */ 3489 if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) 3490 skb_decrease_gso_size(shinfo, len_diff); 3491 3492 /* Header must be checked, and gso_segs recomputed. */ 3493 shinfo->gso_type |= gso_type; 3494 shinfo->gso_segs = 0; 3495 } 3496 3497 return 0; 3498 } 3499 3500 static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff, 3501 u64 flags) 3502 { 3503 int ret; 3504 3505 if (unlikely(flags & ~(BPF_F_ADJ_ROOM_FIXED_GSO | 3506 BPF_F_ADJ_ROOM_NO_CSUM_RESET))) 3507 return -EINVAL; 3508 3509 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) { 3510 /* udp gso_size delineates datagrams, only allow if fixed */ 3511 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) || 3512 !(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) 3513 return -ENOTSUPP; 3514 } 3515 3516 ret = skb_unclone(skb, GFP_ATOMIC); 3517 if (unlikely(ret < 0)) 3518 return ret; 3519 3520 ret = bpf_skb_net_hdr_pop(skb, off, len_diff); 3521 if (unlikely(ret < 0)) 3522 return ret; 3523 3524 if (skb_is_gso(skb)) { 3525 struct skb_shared_info *shinfo = skb_shinfo(skb); 3526 3527 /* Due to header shrink, MSS can be upgraded. */ 3528 if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) 3529 skb_increase_gso_size(shinfo, len_diff); 3530 3531 /* Header must be checked, and gso_segs recomputed. */ 3532 shinfo->gso_type |= SKB_GSO_DODGY; 3533 shinfo->gso_segs = 0; 3534 } 3535 3536 return 0; 3537 } 3538 3539 #define BPF_SKB_MAX_LEN SKB_MAX_ALLOC 3540 3541 BPF_CALL_4(sk_skb_adjust_room, struct sk_buff *, skb, s32, len_diff, 3542 u32, mode, u64, flags) 3543 { 3544 u32 len_diff_abs = abs(len_diff); 3545 bool shrink = len_diff < 0; 3546 int ret = 0; 3547 3548 if (unlikely(flags || mode)) 3549 return -EINVAL; 3550 if (unlikely(len_diff_abs > 0xfffU)) 3551 return -EFAULT; 3552 3553 if (!shrink) { 3554 ret = skb_cow(skb, len_diff); 3555 if (unlikely(ret < 0)) 3556 return ret; 3557 __skb_push(skb, len_diff_abs); 3558 memset(skb->data, 0, len_diff_abs); 3559 } else { 3560 if (unlikely(!pskb_may_pull(skb, len_diff_abs))) 3561 return -ENOMEM; 3562 __skb_pull(skb, len_diff_abs); 3563 } 3564 if (tls_sw_has_ctx_rx(skb->sk)) { 3565 struct strp_msg *rxm = strp_msg(skb); 3566 3567 rxm->full_len += len_diff; 3568 } 3569 return ret; 3570 } 3571 3572 static const struct bpf_func_proto sk_skb_adjust_room_proto = { 3573 .func = sk_skb_adjust_room, 3574 .gpl_only = false, 3575 .ret_type = RET_INTEGER, 3576 .arg1_type = ARG_PTR_TO_CTX, 3577 .arg2_type = ARG_ANYTHING, 3578 .arg3_type = ARG_ANYTHING, 3579 .arg4_type = ARG_ANYTHING, 3580 }; 3581 3582 BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff, 3583 u32, mode, u64, flags) 3584 { 3585 u32 len_cur, len_diff_abs = abs(len_diff); 3586 u32 len_min = bpf_skb_net_base_len(skb); 3587 u32 len_max = BPF_SKB_MAX_LEN; 3588 __be16 proto = skb->protocol; 3589 bool shrink = len_diff < 0; 3590 u32 off; 3591 int ret; 3592 3593 if (unlikely(flags & ~(BPF_F_ADJ_ROOM_MASK | 3594 BPF_F_ADJ_ROOM_NO_CSUM_RESET))) 3595 return -EINVAL; 3596 if (unlikely(len_diff_abs > 0xfffU)) 3597 return -EFAULT; 3598 if (unlikely(proto != htons(ETH_P_IP) && 3599 proto != htons(ETH_P_IPV6))) 3600 return -ENOTSUPP; 3601 3602 off = skb_mac_header_len(skb); 3603 switch (mode) { 3604 case BPF_ADJ_ROOM_NET: 3605 off += bpf_skb_net_base_len(skb); 3606 break; 3607 case BPF_ADJ_ROOM_MAC: 3608 break; 3609 default: 3610 return -ENOTSUPP; 3611 } 3612 3613 len_cur = skb->len - skb_network_offset(skb); 3614 if ((shrink && (len_diff_abs >= len_cur || 3615 len_cur - len_diff_abs < len_min)) || 3616 (!shrink && (skb->len + len_diff_abs > len_max && 3617 !skb_is_gso(skb)))) 3618 return -ENOTSUPP; 3619 3620 ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) : 3621 bpf_skb_net_grow(skb, off, len_diff_abs, flags); 3622 if (!ret && !(flags & BPF_F_ADJ_ROOM_NO_CSUM_RESET)) 3623 __skb_reset_checksum_unnecessary(skb); 3624 3625 bpf_compute_data_pointers(skb); 3626 return ret; 3627 } 3628 3629 static const struct bpf_func_proto bpf_skb_adjust_room_proto = { 3630 .func = bpf_skb_adjust_room, 3631 .gpl_only = false, 3632 .ret_type = RET_INTEGER, 3633 .arg1_type = ARG_PTR_TO_CTX, 3634 .arg2_type = ARG_ANYTHING, 3635 .arg3_type = ARG_ANYTHING, 3636 .arg4_type = ARG_ANYTHING, 3637 }; 3638 3639 static u32 __bpf_skb_min_len(const struct sk_buff *skb) 3640 { 3641 u32 min_len = skb_network_offset(skb); 3642 3643 if (skb_transport_header_was_set(skb)) 3644 min_len = skb_transport_offset(skb); 3645 if (skb->ip_summed == CHECKSUM_PARTIAL) 3646 min_len = skb_checksum_start_offset(skb) + 3647 skb->csum_offset + sizeof(__sum16); 3648 return min_len; 3649 } 3650 3651 static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len) 3652 { 3653 unsigned int old_len = skb->len; 3654 int ret; 3655 3656 ret = __skb_grow_rcsum(skb, new_len); 3657 if (!ret) 3658 memset(skb->data + old_len, 0, new_len - old_len); 3659 return ret; 3660 } 3661 3662 static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len) 3663 { 3664 return __skb_trim_rcsum(skb, new_len); 3665 } 3666 3667 static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len, 3668 u64 flags) 3669 { 3670 u32 max_len = BPF_SKB_MAX_LEN; 3671 u32 min_len = __bpf_skb_min_len(skb); 3672 int ret; 3673 3674 if (unlikely(flags || new_len > max_len || new_len < min_len)) 3675 return -EINVAL; 3676 if (skb->encapsulation) 3677 return -ENOTSUPP; 3678 3679 /* The basic idea of this helper is that it's performing the 3680 * needed work to either grow or trim an skb, and eBPF program 3681 * rewrites the rest via helpers like bpf_skb_store_bytes(), 3682 * bpf_lX_csum_replace() and others rather than passing a raw 3683 * buffer here. This one is a slow path helper and intended 3684 * for replies with control messages. 3685 * 3686 * Like in bpf_skb_change_proto(), we want to keep this rather 3687 * minimal and without protocol specifics so that we are able 3688 * to separate concerns as in bpf_skb_store_bytes() should only 3689 * be the one responsible for writing buffers. 3690 * 3691 * It's really expected to be a slow path operation here for 3692 * control message replies, so we're implicitly linearizing, 3693 * uncloning and drop offloads from the skb by this. 3694 */ 3695 ret = __bpf_try_make_writable(skb, skb->len); 3696 if (!ret) { 3697 if (new_len > skb->len) 3698 ret = bpf_skb_grow_rcsum(skb, new_len); 3699 else if (new_len < skb->len) 3700 ret = bpf_skb_trim_rcsum(skb, new_len); 3701 if (!ret && skb_is_gso(skb)) 3702 skb_gso_reset(skb); 3703 } 3704 return ret; 3705 } 3706 3707 BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len, 3708 u64, flags) 3709 { 3710 int ret = __bpf_skb_change_tail(skb, new_len, flags); 3711 3712 bpf_compute_data_pointers(skb); 3713 return ret; 3714 } 3715 3716 static const struct bpf_func_proto bpf_skb_change_tail_proto = { 3717 .func = bpf_skb_change_tail, 3718 .gpl_only = false, 3719 .ret_type = RET_INTEGER, 3720 .arg1_type = ARG_PTR_TO_CTX, 3721 .arg2_type = ARG_ANYTHING, 3722 .arg3_type = ARG_ANYTHING, 3723 }; 3724 3725 BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len, 3726 u64, flags) 3727 { 3728 return __bpf_skb_change_tail(skb, new_len, flags); 3729 } 3730 3731 static const struct bpf_func_proto sk_skb_change_tail_proto = { 3732 .func = sk_skb_change_tail, 3733 .gpl_only = false, 3734 .ret_type = RET_INTEGER, 3735 .arg1_type = ARG_PTR_TO_CTX, 3736 .arg2_type = ARG_ANYTHING, 3737 .arg3_type = ARG_ANYTHING, 3738 }; 3739 3740 static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room, 3741 u64 flags) 3742 { 3743 u32 max_len = BPF_SKB_MAX_LEN; 3744 u32 new_len = skb->len + head_room; 3745 int ret; 3746 3747 if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) || 3748 new_len < skb->len)) 3749 return -EINVAL; 3750 3751 ret = skb_cow(skb, head_room); 3752 if (likely(!ret)) { 3753 /* Idea for this helper is that we currently only 3754 * allow to expand on mac header. This means that 3755 * skb->protocol network header, etc, stay as is. 3756 * Compared to bpf_skb_change_tail(), we're more 3757 * flexible due to not needing to linearize or 3758 * reset GSO. Intention for this helper is to be 3759 * used by an L3 skb that needs to push mac header 3760 * for redirection into L2 device. 3761 */ 3762 __skb_push(skb, head_room); 3763 memset(skb->data, 0, head_room); 3764 skb_reset_mac_header(skb); 3765 skb_reset_mac_len(skb); 3766 } 3767 3768 return ret; 3769 } 3770 3771 BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room, 3772 u64, flags) 3773 { 3774 int ret = __bpf_skb_change_head(skb, head_room, flags); 3775 3776 bpf_compute_data_pointers(skb); 3777 return ret; 3778 } 3779 3780 static const struct bpf_func_proto bpf_skb_change_head_proto = { 3781 .func = bpf_skb_change_head, 3782 .gpl_only = false, 3783 .ret_type = RET_INTEGER, 3784 .arg1_type = ARG_PTR_TO_CTX, 3785 .arg2_type = ARG_ANYTHING, 3786 .arg3_type = ARG_ANYTHING, 3787 }; 3788 3789 BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room, 3790 u64, flags) 3791 { 3792 return __bpf_skb_change_head(skb, head_room, flags); 3793 } 3794 3795 static const struct bpf_func_proto sk_skb_change_head_proto = { 3796 .func = sk_skb_change_head, 3797 .gpl_only = false, 3798 .ret_type = RET_INTEGER, 3799 .arg1_type = ARG_PTR_TO_CTX, 3800 .arg2_type = ARG_ANYTHING, 3801 .arg3_type = ARG_ANYTHING, 3802 }; 3803 3804 BPF_CALL_1(bpf_xdp_get_buff_len, struct xdp_buff*, xdp) 3805 { 3806 return xdp_get_buff_len(xdp); 3807 } 3808 3809 static const struct bpf_func_proto bpf_xdp_get_buff_len_proto = { 3810 .func = bpf_xdp_get_buff_len, 3811 .gpl_only = false, 3812 .ret_type = RET_INTEGER, 3813 .arg1_type = ARG_PTR_TO_CTX, 3814 }; 3815 3816 BTF_ID_LIST_SINGLE(bpf_xdp_get_buff_len_bpf_ids, struct, xdp_buff) 3817 3818 const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto = { 3819 .func = bpf_xdp_get_buff_len, 3820 .gpl_only = false, 3821 .arg1_type = ARG_PTR_TO_BTF_ID, 3822 .arg1_btf_id = &bpf_xdp_get_buff_len_bpf_ids[0], 3823 }; 3824 3825 static unsigned long xdp_get_metalen(const struct xdp_buff *xdp) 3826 { 3827 return xdp_data_meta_unsupported(xdp) ? 0 : 3828 xdp->data - xdp->data_meta; 3829 } 3830 3831 BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset) 3832 { 3833 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame); 3834 unsigned long metalen = xdp_get_metalen(xdp); 3835 void *data_start = xdp_frame_end + metalen; 3836 void *data = xdp->data + offset; 3837 3838 if (unlikely(data < data_start || 3839 data > xdp->data_end - ETH_HLEN)) 3840 return -EINVAL; 3841 3842 if (metalen) 3843 memmove(xdp->data_meta + offset, 3844 xdp->data_meta, metalen); 3845 xdp->data_meta += offset; 3846 xdp->data = data; 3847 3848 return 0; 3849 } 3850 3851 static const struct bpf_func_proto bpf_xdp_adjust_head_proto = { 3852 .func = bpf_xdp_adjust_head, 3853 .gpl_only = false, 3854 .ret_type = RET_INTEGER, 3855 .arg1_type = ARG_PTR_TO_CTX, 3856 .arg2_type = ARG_ANYTHING, 3857 }; 3858 3859 static void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, 3860 void *buf, unsigned long len, bool flush) 3861 { 3862 unsigned long ptr_len, ptr_off = 0; 3863 skb_frag_t *next_frag, *end_frag; 3864 struct skb_shared_info *sinfo; 3865 void *src, *dst; 3866 u8 *ptr_buf; 3867 3868 if (likely(xdp->data_end - xdp->data >= off + len)) { 3869 src = flush ? buf : xdp->data + off; 3870 dst = flush ? xdp->data + off : buf; 3871 memcpy(dst, src, len); 3872 return; 3873 } 3874 3875 sinfo = xdp_get_shared_info_from_buff(xdp); 3876 end_frag = &sinfo->frags[sinfo->nr_frags]; 3877 next_frag = &sinfo->frags[0]; 3878 3879 ptr_len = xdp->data_end - xdp->data; 3880 ptr_buf = xdp->data; 3881 3882 while (true) { 3883 if (off < ptr_off + ptr_len) { 3884 unsigned long copy_off = off - ptr_off; 3885 unsigned long copy_len = min(len, ptr_len - copy_off); 3886 3887 src = flush ? buf : ptr_buf + copy_off; 3888 dst = flush ? ptr_buf + copy_off : buf; 3889 memcpy(dst, src, copy_len); 3890 3891 off += copy_len; 3892 len -= copy_len; 3893 buf += copy_len; 3894 } 3895 3896 if (!len || next_frag == end_frag) 3897 break; 3898 3899 ptr_off += ptr_len; 3900 ptr_buf = skb_frag_address(next_frag); 3901 ptr_len = skb_frag_size(next_frag); 3902 next_frag++; 3903 } 3904 } 3905 3906 static void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len) 3907 { 3908 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 3909 u32 size = xdp->data_end - xdp->data; 3910 void *addr = xdp->data; 3911 int i; 3912 3913 if (unlikely(offset > 0xffff || len > 0xffff)) 3914 return ERR_PTR(-EFAULT); 3915 3916 if (offset + len > xdp_get_buff_len(xdp)) 3917 return ERR_PTR(-EINVAL); 3918 3919 if (offset < size) /* linear area */ 3920 goto out; 3921 3922 offset -= size; 3923 for (i = 0; i < sinfo->nr_frags; i++) { /* paged area */ 3924 u32 frag_size = skb_frag_size(&sinfo->frags[i]); 3925 3926 if (offset < frag_size) { 3927 addr = skb_frag_address(&sinfo->frags[i]); 3928 size = frag_size; 3929 break; 3930 } 3931 offset -= frag_size; 3932 } 3933 out: 3934 return offset + len <= size ? addr + offset : NULL; 3935 } 3936 3937 BPF_CALL_4(bpf_xdp_load_bytes, struct xdp_buff *, xdp, u32, offset, 3938 void *, buf, u32, len) 3939 { 3940 void *ptr; 3941 3942 ptr = bpf_xdp_pointer(xdp, offset, len); 3943 if (IS_ERR(ptr)) 3944 return PTR_ERR(ptr); 3945 3946 if (!ptr) 3947 bpf_xdp_copy_buf(xdp, offset, buf, len, false); 3948 else 3949 memcpy(buf, ptr, len); 3950 3951 return 0; 3952 } 3953 3954 static const struct bpf_func_proto bpf_xdp_load_bytes_proto = { 3955 .func = bpf_xdp_load_bytes, 3956 .gpl_only = false, 3957 .ret_type = RET_INTEGER, 3958 .arg1_type = ARG_PTR_TO_CTX, 3959 .arg2_type = ARG_ANYTHING, 3960 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 3961 .arg4_type = ARG_CONST_SIZE, 3962 }; 3963 3964 BPF_CALL_4(bpf_xdp_store_bytes, struct xdp_buff *, xdp, u32, offset, 3965 void *, buf, u32, len) 3966 { 3967 void *ptr; 3968 3969 ptr = bpf_xdp_pointer(xdp, offset, len); 3970 if (IS_ERR(ptr)) 3971 return PTR_ERR(ptr); 3972 3973 if (!ptr) 3974 bpf_xdp_copy_buf(xdp, offset, buf, len, true); 3975 else 3976 memcpy(ptr, buf, len); 3977 3978 return 0; 3979 } 3980 3981 static const struct bpf_func_proto bpf_xdp_store_bytes_proto = { 3982 .func = bpf_xdp_store_bytes, 3983 .gpl_only = false, 3984 .ret_type = RET_INTEGER, 3985 .arg1_type = ARG_PTR_TO_CTX, 3986 .arg2_type = ARG_ANYTHING, 3987 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 3988 .arg4_type = ARG_CONST_SIZE, 3989 }; 3990 3991 static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset) 3992 { 3993 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 3994 skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags - 1]; 3995 struct xdp_rxq_info *rxq = xdp->rxq; 3996 unsigned int tailroom; 3997 3998 if (!rxq->frag_size || rxq->frag_size > xdp->frame_sz) 3999 return -EOPNOTSUPP; 4000 4001 tailroom = rxq->frag_size - skb_frag_size(frag) - skb_frag_off(frag); 4002 if (unlikely(offset > tailroom)) 4003 return -EINVAL; 4004 4005 memset(skb_frag_address(frag) + skb_frag_size(frag), 0, offset); 4006 skb_frag_size_add(frag, offset); 4007 sinfo->xdp_frags_size += offset; 4008 4009 return 0; 4010 } 4011 4012 static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset) 4013 { 4014 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 4015 int i, n_frags_free = 0, len_free = 0; 4016 4017 if (unlikely(offset > (int)xdp_get_buff_len(xdp) - ETH_HLEN)) 4018 return -EINVAL; 4019 4020 for (i = sinfo->nr_frags - 1; i >= 0 && offset > 0; i--) { 4021 skb_frag_t *frag = &sinfo->frags[i]; 4022 int shrink = min_t(int, offset, skb_frag_size(frag)); 4023 4024 len_free += shrink; 4025 offset -= shrink; 4026 4027 if (skb_frag_size(frag) == shrink) { 4028 struct page *page = skb_frag_page(frag); 4029 4030 __xdp_return(page_address(page), &xdp->rxq->mem, 4031 false, NULL); 4032 n_frags_free++; 4033 } else { 4034 skb_frag_size_sub(frag, shrink); 4035 break; 4036 } 4037 } 4038 sinfo->nr_frags -= n_frags_free; 4039 sinfo->xdp_frags_size -= len_free; 4040 4041 if (unlikely(!sinfo->nr_frags)) { 4042 xdp_buff_clear_frags_flag(xdp); 4043 xdp->data_end -= offset; 4044 } 4045 4046 return 0; 4047 } 4048 4049 BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset) 4050 { 4051 void *data_hard_end = xdp_data_hard_end(xdp); /* use xdp->frame_sz */ 4052 void *data_end = xdp->data_end + offset; 4053 4054 if (unlikely(xdp_buff_has_frags(xdp))) { /* non-linear xdp buff */ 4055 if (offset < 0) 4056 return bpf_xdp_frags_shrink_tail(xdp, -offset); 4057 4058 return bpf_xdp_frags_increase_tail(xdp, offset); 4059 } 4060 4061 /* Notice that xdp_data_hard_end have reserved some tailroom */ 4062 if (unlikely(data_end > data_hard_end)) 4063 return -EINVAL; 4064 4065 /* ALL drivers MUST init xdp->frame_sz, chicken check below */ 4066 if (unlikely(xdp->frame_sz > PAGE_SIZE)) { 4067 WARN_ONCE(1, "Too BIG xdp->frame_sz = %d\n", xdp->frame_sz); 4068 return -EINVAL; 4069 } 4070 4071 if (unlikely(data_end < xdp->data + ETH_HLEN)) 4072 return -EINVAL; 4073 4074 /* Clear memory area on grow, can contain uninit kernel memory */ 4075 if (offset > 0) 4076 memset(xdp->data_end, 0, offset); 4077 4078 xdp->data_end = data_end; 4079 4080 return 0; 4081 } 4082 4083 static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = { 4084 .func = bpf_xdp_adjust_tail, 4085 .gpl_only = false, 4086 .ret_type = RET_INTEGER, 4087 .arg1_type = ARG_PTR_TO_CTX, 4088 .arg2_type = ARG_ANYTHING, 4089 }; 4090 4091 BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset) 4092 { 4093 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame); 4094 void *meta = xdp->data_meta + offset; 4095 unsigned long metalen = xdp->data - meta; 4096 4097 if (xdp_data_meta_unsupported(xdp)) 4098 return -ENOTSUPP; 4099 if (unlikely(meta < xdp_frame_end || 4100 meta > xdp->data)) 4101 return -EINVAL; 4102 if (unlikely(xdp_metalen_invalid(metalen))) 4103 return -EACCES; 4104 4105 xdp->data_meta = meta; 4106 4107 return 0; 4108 } 4109 4110 static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = { 4111 .func = bpf_xdp_adjust_meta, 4112 .gpl_only = false, 4113 .ret_type = RET_INTEGER, 4114 .arg1_type = ARG_PTR_TO_CTX, 4115 .arg2_type = ARG_ANYTHING, 4116 }; 4117 4118 /* XDP_REDIRECT works by a three-step process, implemented in the functions 4119 * below: 4120 * 4121 * 1. The bpf_redirect() and bpf_redirect_map() helpers will lookup the target 4122 * of the redirect and store it (along with some other metadata) in a per-CPU 4123 * struct bpf_redirect_info. 4124 * 4125 * 2. When the program returns the XDP_REDIRECT return code, the driver will 4126 * call xdp_do_redirect() which will use the information in struct 4127 * bpf_redirect_info to actually enqueue the frame into a map type-specific 4128 * bulk queue structure. 4129 * 4130 * 3. Before exiting its NAPI poll loop, the driver will call xdp_do_flush(), 4131 * which will flush all the different bulk queues, thus completing the 4132 * redirect. 4133 * 4134 * Pointers to the map entries will be kept around for this whole sequence of 4135 * steps, protected by RCU. However, there is no top-level rcu_read_lock() in 4136 * the core code; instead, the RCU protection relies on everything happening 4137 * inside a single NAPI poll sequence, which means it's between a pair of calls 4138 * to local_bh_disable()/local_bh_enable(). 4139 * 4140 * The map entries are marked as __rcu and the map code makes sure to 4141 * dereference those pointers with rcu_dereference_check() in a way that works 4142 * for both sections that to hold an rcu_read_lock() and sections that are 4143 * called from NAPI without a separate rcu_read_lock(). The code below does not 4144 * use RCU annotations, but relies on those in the map code. 4145 */ 4146 void xdp_do_flush(void) 4147 { 4148 __dev_flush(); 4149 __cpu_map_flush(); 4150 __xsk_map_flush(); 4151 } 4152 EXPORT_SYMBOL_GPL(xdp_do_flush); 4153 4154 void bpf_clear_redirect_map(struct bpf_map *map) 4155 { 4156 struct bpf_redirect_info *ri; 4157 int cpu; 4158 4159 for_each_possible_cpu(cpu) { 4160 ri = per_cpu_ptr(&bpf_redirect_info, cpu); 4161 /* Avoid polluting remote cacheline due to writes if 4162 * not needed. Once we pass this test, we need the 4163 * cmpxchg() to make sure it hasn't been changed in 4164 * the meantime by remote CPU. 4165 */ 4166 if (unlikely(READ_ONCE(ri->map) == map)) 4167 cmpxchg(&ri->map, map, NULL); 4168 } 4169 } 4170 4171 DEFINE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key); 4172 EXPORT_SYMBOL_GPL(bpf_master_redirect_enabled_key); 4173 4174 u32 xdp_master_redirect(struct xdp_buff *xdp) 4175 { 4176 struct net_device *master, *slave; 4177 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4178 4179 master = netdev_master_upper_dev_get_rcu(xdp->rxq->dev); 4180 slave = master->netdev_ops->ndo_xdp_get_xmit_slave(master, xdp); 4181 if (slave && slave != xdp->rxq->dev) { 4182 /* The target device is different from the receiving device, so 4183 * redirect it to the new device. 4184 * Using XDP_REDIRECT gets the correct behaviour from XDP enabled 4185 * drivers to unmap the packet from their rx ring. 4186 */ 4187 ri->tgt_index = slave->ifindex; 4188 ri->map_id = INT_MAX; 4189 ri->map_type = BPF_MAP_TYPE_UNSPEC; 4190 return XDP_REDIRECT; 4191 } 4192 return XDP_TX; 4193 } 4194 EXPORT_SYMBOL_GPL(xdp_master_redirect); 4195 4196 static inline int __xdp_do_redirect_xsk(struct bpf_redirect_info *ri, 4197 struct net_device *dev, 4198 struct xdp_buff *xdp, 4199 struct bpf_prog *xdp_prog) 4200 { 4201 enum bpf_map_type map_type = ri->map_type; 4202 void *fwd = ri->tgt_value; 4203 u32 map_id = ri->map_id; 4204 int err; 4205 4206 ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */ 4207 ri->map_type = BPF_MAP_TYPE_UNSPEC; 4208 4209 err = __xsk_map_redirect(fwd, xdp); 4210 if (unlikely(err)) 4211 goto err; 4212 4213 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index); 4214 return 0; 4215 err: 4216 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err); 4217 return err; 4218 } 4219 4220 static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri, 4221 struct net_device *dev, 4222 struct xdp_frame *xdpf, 4223 struct bpf_prog *xdp_prog) 4224 { 4225 enum bpf_map_type map_type = ri->map_type; 4226 void *fwd = ri->tgt_value; 4227 u32 map_id = ri->map_id; 4228 struct bpf_map *map; 4229 int err; 4230 4231 ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */ 4232 ri->map_type = BPF_MAP_TYPE_UNSPEC; 4233 4234 if (unlikely(!xdpf)) { 4235 err = -EOVERFLOW; 4236 goto err; 4237 } 4238 4239 switch (map_type) { 4240 case BPF_MAP_TYPE_DEVMAP: 4241 fallthrough; 4242 case BPF_MAP_TYPE_DEVMAP_HASH: 4243 map = READ_ONCE(ri->map); 4244 if (unlikely(map)) { 4245 WRITE_ONCE(ri->map, NULL); 4246 err = dev_map_enqueue_multi(xdpf, dev, map, 4247 ri->flags & BPF_F_EXCLUDE_INGRESS); 4248 } else { 4249 err = dev_map_enqueue(fwd, xdpf, dev); 4250 } 4251 break; 4252 case BPF_MAP_TYPE_CPUMAP: 4253 err = cpu_map_enqueue(fwd, xdpf, dev); 4254 break; 4255 case BPF_MAP_TYPE_UNSPEC: 4256 if (map_id == INT_MAX) { 4257 fwd = dev_get_by_index_rcu(dev_net(dev), ri->tgt_index); 4258 if (unlikely(!fwd)) { 4259 err = -EINVAL; 4260 break; 4261 } 4262 err = dev_xdp_enqueue(fwd, xdpf, dev); 4263 break; 4264 } 4265 fallthrough; 4266 default: 4267 err = -EBADRQC; 4268 } 4269 4270 if (unlikely(err)) 4271 goto err; 4272 4273 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index); 4274 return 0; 4275 err: 4276 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err); 4277 return err; 4278 } 4279 4280 int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, 4281 struct bpf_prog *xdp_prog) 4282 { 4283 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4284 enum bpf_map_type map_type = ri->map_type; 4285 4286 /* XDP_REDIRECT is not fully supported yet for xdp frags since 4287 * not all XDP capable drivers can map non-linear xdp_frame in 4288 * ndo_xdp_xmit. 4289 */ 4290 if (unlikely(xdp_buff_has_frags(xdp) && 4291 map_type != BPF_MAP_TYPE_CPUMAP)) 4292 return -EOPNOTSUPP; 4293 4294 if (map_type == BPF_MAP_TYPE_XSKMAP) 4295 return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog); 4296 4297 return __xdp_do_redirect_frame(ri, dev, xdp_convert_buff_to_frame(xdp), 4298 xdp_prog); 4299 } 4300 EXPORT_SYMBOL_GPL(xdp_do_redirect); 4301 4302 int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp, 4303 struct xdp_frame *xdpf, struct bpf_prog *xdp_prog) 4304 { 4305 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4306 enum bpf_map_type map_type = ri->map_type; 4307 4308 if (map_type == BPF_MAP_TYPE_XSKMAP) 4309 return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog); 4310 4311 return __xdp_do_redirect_frame(ri, dev, xdpf, xdp_prog); 4312 } 4313 EXPORT_SYMBOL_GPL(xdp_do_redirect_frame); 4314 4315 static int xdp_do_generic_redirect_map(struct net_device *dev, 4316 struct sk_buff *skb, 4317 struct xdp_buff *xdp, 4318 struct bpf_prog *xdp_prog, 4319 void *fwd, 4320 enum bpf_map_type map_type, u32 map_id) 4321 { 4322 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4323 struct bpf_map *map; 4324 int err; 4325 4326 switch (map_type) { 4327 case BPF_MAP_TYPE_DEVMAP: 4328 fallthrough; 4329 case BPF_MAP_TYPE_DEVMAP_HASH: 4330 map = READ_ONCE(ri->map); 4331 if (unlikely(map)) { 4332 WRITE_ONCE(ri->map, NULL); 4333 err = dev_map_redirect_multi(dev, skb, xdp_prog, map, 4334 ri->flags & BPF_F_EXCLUDE_INGRESS); 4335 } else { 4336 err = dev_map_generic_redirect(fwd, skb, xdp_prog); 4337 } 4338 if (unlikely(err)) 4339 goto err; 4340 break; 4341 case BPF_MAP_TYPE_XSKMAP: 4342 err = xsk_generic_rcv(fwd, xdp); 4343 if (err) 4344 goto err; 4345 consume_skb(skb); 4346 break; 4347 case BPF_MAP_TYPE_CPUMAP: 4348 err = cpu_map_generic_redirect(fwd, skb); 4349 if (unlikely(err)) 4350 goto err; 4351 break; 4352 default: 4353 err = -EBADRQC; 4354 goto err; 4355 } 4356 4357 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index); 4358 return 0; 4359 err: 4360 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err); 4361 return err; 4362 } 4363 4364 int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, 4365 struct xdp_buff *xdp, struct bpf_prog *xdp_prog) 4366 { 4367 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4368 enum bpf_map_type map_type = ri->map_type; 4369 void *fwd = ri->tgt_value; 4370 u32 map_id = ri->map_id; 4371 int err; 4372 4373 ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */ 4374 ri->map_type = BPF_MAP_TYPE_UNSPEC; 4375 4376 if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) { 4377 fwd = dev_get_by_index_rcu(dev_net(dev), ri->tgt_index); 4378 if (unlikely(!fwd)) { 4379 err = -EINVAL; 4380 goto err; 4381 } 4382 4383 err = xdp_ok_fwd_dev(fwd, skb->len); 4384 if (unlikely(err)) 4385 goto err; 4386 4387 skb->dev = fwd; 4388 _trace_xdp_redirect(dev, xdp_prog, ri->tgt_index); 4389 generic_xdp_tx(skb, xdp_prog); 4390 return 0; 4391 } 4392 4393 return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id); 4394 err: 4395 _trace_xdp_redirect_err(dev, xdp_prog, ri->tgt_index, err); 4396 return err; 4397 } 4398 4399 BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags) 4400 { 4401 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4402 4403 if (unlikely(flags)) 4404 return XDP_ABORTED; 4405 4406 /* NB! Map type UNSPEC and map_id == INT_MAX (never generated 4407 * by map_idr) is used for ifindex based XDP redirect. 4408 */ 4409 ri->tgt_index = ifindex; 4410 ri->map_id = INT_MAX; 4411 ri->map_type = BPF_MAP_TYPE_UNSPEC; 4412 4413 return XDP_REDIRECT; 4414 } 4415 4416 static const struct bpf_func_proto bpf_xdp_redirect_proto = { 4417 .func = bpf_xdp_redirect, 4418 .gpl_only = false, 4419 .ret_type = RET_INTEGER, 4420 .arg1_type = ARG_ANYTHING, 4421 .arg2_type = ARG_ANYTHING, 4422 }; 4423 4424 BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, 4425 u64, flags) 4426 { 4427 return map->ops->map_redirect(map, ifindex, flags); 4428 } 4429 4430 static const struct bpf_func_proto bpf_xdp_redirect_map_proto = { 4431 .func = bpf_xdp_redirect_map, 4432 .gpl_only = false, 4433 .ret_type = RET_INTEGER, 4434 .arg1_type = ARG_CONST_MAP_PTR, 4435 .arg2_type = ARG_ANYTHING, 4436 .arg3_type = ARG_ANYTHING, 4437 }; 4438 4439 static unsigned long bpf_skb_copy(void *dst_buff, const void *skb, 4440 unsigned long off, unsigned long len) 4441 { 4442 void *ptr = skb_header_pointer(skb, off, len, dst_buff); 4443 4444 if (unlikely(!ptr)) 4445 return len; 4446 if (ptr != dst_buff) 4447 memcpy(dst_buff, ptr, len); 4448 4449 return 0; 4450 } 4451 4452 BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map, 4453 u64, flags, void *, meta, u64, meta_size) 4454 { 4455 u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32; 4456 4457 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) 4458 return -EINVAL; 4459 if (unlikely(!skb || skb_size > skb->len)) 4460 return -EFAULT; 4461 4462 return bpf_event_output(map, flags, meta, meta_size, skb, skb_size, 4463 bpf_skb_copy); 4464 } 4465 4466 static const struct bpf_func_proto bpf_skb_event_output_proto = { 4467 .func = bpf_skb_event_output, 4468 .gpl_only = true, 4469 .ret_type = RET_INTEGER, 4470 .arg1_type = ARG_PTR_TO_CTX, 4471 .arg2_type = ARG_CONST_MAP_PTR, 4472 .arg3_type = ARG_ANYTHING, 4473 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 4474 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 4475 }; 4476 4477 BTF_ID_LIST_SINGLE(bpf_skb_output_btf_ids, struct, sk_buff) 4478 4479 const struct bpf_func_proto bpf_skb_output_proto = { 4480 .func = bpf_skb_event_output, 4481 .gpl_only = true, 4482 .ret_type = RET_INTEGER, 4483 .arg1_type = ARG_PTR_TO_BTF_ID, 4484 .arg1_btf_id = &bpf_skb_output_btf_ids[0], 4485 .arg2_type = ARG_CONST_MAP_PTR, 4486 .arg3_type = ARG_ANYTHING, 4487 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 4488 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 4489 }; 4490 4491 static unsigned short bpf_tunnel_key_af(u64 flags) 4492 { 4493 return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET; 4494 } 4495 4496 BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to, 4497 u32, size, u64, flags) 4498 { 4499 const struct ip_tunnel_info *info = skb_tunnel_info(skb); 4500 u8 compat[sizeof(struct bpf_tunnel_key)]; 4501 void *to_orig = to; 4502 int err; 4503 4504 if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) { 4505 err = -EINVAL; 4506 goto err_clear; 4507 } 4508 if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) { 4509 err = -EPROTO; 4510 goto err_clear; 4511 } 4512 if (unlikely(size != sizeof(struct bpf_tunnel_key))) { 4513 err = -EINVAL; 4514 switch (size) { 4515 case offsetof(struct bpf_tunnel_key, local_ipv6[0]): 4516 case offsetof(struct bpf_tunnel_key, tunnel_label): 4517 case offsetof(struct bpf_tunnel_key, tunnel_ext): 4518 goto set_compat; 4519 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): 4520 /* Fixup deprecated structure layouts here, so we have 4521 * a common path later on. 4522 */ 4523 if (ip_tunnel_info_af(info) != AF_INET) 4524 goto err_clear; 4525 set_compat: 4526 to = (struct bpf_tunnel_key *)compat; 4527 break; 4528 default: 4529 goto err_clear; 4530 } 4531 } 4532 4533 to->tunnel_id = be64_to_cpu(info->key.tun_id); 4534 to->tunnel_tos = info->key.tos; 4535 to->tunnel_ttl = info->key.ttl; 4536 to->tunnel_ext = 0; 4537 4538 if (flags & BPF_F_TUNINFO_IPV6) { 4539 memcpy(to->remote_ipv6, &info->key.u.ipv6.src, 4540 sizeof(to->remote_ipv6)); 4541 memcpy(to->local_ipv6, &info->key.u.ipv6.dst, 4542 sizeof(to->local_ipv6)); 4543 to->tunnel_label = be32_to_cpu(info->key.label); 4544 } else { 4545 to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src); 4546 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3); 4547 to->local_ipv4 = be32_to_cpu(info->key.u.ipv4.dst); 4548 memset(&to->local_ipv6[1], 0, sizeof(__u32) * 3); 4549 to->tunnel_label = 0; 4550 } 4551 4552 if (unlikely(size != sizeof(struct bpf_tunnel_key))) 4553 memcpy(to_orig, to, size); 4554 4555 return 0; 4556 err_clear: 4557 memset(to_orig, 0, size); 4558 return err; 4559 } 4560 4561 static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = { 4562 .func = bpf_skb_get_tunnel_key, 4563 .gpl_only = false, 4564 .ret_type = RET_INTEGER, 4565 .arg1_type = ARG_PTR_TO_CTX, 4566 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 4567 .arg3_type = ARG_CONST_SIZE, 4568 .arg4_type = ARG_ANYTHING, 4569 }; 4570 4571 BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size) 4572 { 4573 const struct ip_tunnel_info *info = skb_tunnel_info(skb); 4574 int err; 4575 4576 if (unlikely(!info || 4577 !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) { 4578 err = -ENOENT; 4579 goto err_clear; 4580 } 4581 if (unlikely(size < info->options_len)) { 4582 err = -ENOMEM; 4583 goto err_clear; 4584 } 4585 4586 ip_tunnel_info_opts_get(to, info); 4587 if (size > info->options_len) 4588 memset(to + info->options_len, 0, size - info->options_len); 4589 4590 return info->options_len; 4591 err_clear: 4592 memset(to, 0, size); 4593 return err; 4594 } 4595 4596 static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = { 4597 .func = bpf_skb_get_tunnel_opt, 4598 .gpl_only = false, 4599 .ret_type = RET_INTEGER, 4600 .arg1_type = ARG_PTR_TO_CTX, 4601 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 4602 .arg3_type = ARG_CONST_SIZE, 4603 }; 4604 4605 static struct metadata_dst __percpu *md_dst; 4606 4607 BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb, 4608 const struct bpf_tunnel_key *, from, u32, size, u64, flags) 4609 { 4610 struct metadata_dst *md = this_cpu_ptr(md_dst); 4611 u8 compat[sizeof(struct bpf_tunnel_key)]; 4612 struct ip_tunnel_info *info; 4613 4614 if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX | 4615 BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER))) 4616 return -EINVAL; 4617 if (unlikely(size != sizeof(struct bpf_tunnel_key))) { 4618 switch (size) { 4619 case offsetof(struct bpf_tunnel_key, local_ipv6[0]): 4620 case offsetof(struct bpf_tunnel_key, tunnel_label): 4621 case offsetof(struct bpf_tunnel_key, tunnel_ext): 4622 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): 4623 /* Fixup deprecated structure layouts here, so we have 4624 * a common path later on. 4625 */ 4626 memcpy(compat, from, size); 4627 memset(compat + size, 0, sizeof(compat) - size); 4628 from = (const struct bpf_tunnel_key *) compat; 4629 break; 4630 default: 4631 return -EINVAL; 4632 } 4633 } 4634 if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) || 4635 from->tunnel_ext)) 4636 return -EINVAL; 4637 4638 skb_dst_drop(skb); 4639 dst_hold((struct dst_entry *) md); 4640 skb_dst_set(skb, (struct dst_entry *) md); 4641 4642 info = &md->u.tun_info; 4643 memset(info, 0, sizeof(*info)); 4644 info->mode = IP_TUNNEL_INFO_TX; 4645 4646 info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE; 4647 if (flags & BPF_F_DONT_FRAGMENT) 4648 info->key.tun_flags |= TUNNEL_DONT_FRAGMENT; 4649 if (flags & BPF_F_ZERO_CSUM_TX) 4650 info->key.tun_flags &= ~TUNNEL_CSUM; 4651 if (flags & BPF_F_SEQ_NUMBER) 4652 info->key.tun_flags |= TUNNEL_SEQ; 4653 4654 info->key.tun_id = cpu_to_be64(from->tunnel_id); 4655 info->key.tos = from->tunnel_tos; 4656 info->key.ttl = from->tunnel_ttl; 4657 4658 if (flags & BPF_F_TUNINFO_IPV6) { 4659 info->mode |= IP_TUNNEL_INFO_IPV6; 4660 memcpy(&info->key.u.ipv6.dst, from->remote_ipv6, 4661 sizeof(from->remote_ipv6)); 4662 memcpy(&info->key.u.ipv6.src, from->local_ipv6, 4663 sizeof(from->local_ipv6)); 4664 info->key.label = cpu_to_be32(from->tunnel_label) & 4665 IPV6_FLOWLABEL_MASK; 4666 } else { 4667 info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4); 4668 info->key.u.ipv4.src = cpu_to_be32(from->local_ipv4); 4669 info->key.flow_flags = FLOWI_FLAG_ANYSRC; 4670 } 4671 4672 return 0; 4673 } 4674 4675 static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = { 4676 .func = bpf_skb_set_tunnel_key, 4677 .gpl_only = false, 4678 .ret_type = RET_INTEGER, 4679 .arg1_type = ARG_PTR_TO_CTX, 4680 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 4681 .arg3_type = ARG_CONST_SIZE, 4682 .arg4_type = ARG_ANYTHING, 4683 }; 4684 4685 BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb, 4686 const u8 *, from, u32, size) 4687 { 4688 struct ip_tunnel_info *info = skb_tunnel_info(skb); 4689 const struct metadata_dst *md = this_cpu_ptr(md_dst); 4690 4691 if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1)))) 4692 return -EINVAL; 4693 if (unlikely(size > IP_TUNNEL_OPTS_MAX)) 4694 return -ENOMEM; 4695 4696 ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT); 4697 4698 return 0; 4699 } 4700 4701 static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = { 4702 .func = bpf_skb_set_tunnel_opt, 4703 .gpl_only = false, 4704 .ret_type = RET_INTEGER, 4705 .arg1_type = ARG_PTR_TO_CTX, 4706 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 4707 .arg3_type = ARG_CONST_SIZE, 4708 }; 4709 4710 static const struct bpf_func_proto * 4711 bpf_get_skb_set_tunnel_proto(enum bpf_func_id which) 4712 { 4713 if (!md_dst) { 4714 struct metadata_dst __percpu *tmp; 4715 4716 tmp = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX, 4717 METADATA_IP_TUNNEL, 4718 GFP_KERNEL); 4719 if (!tmp) 4720 return NULL; 4721 if (cmpxchg(&md_dst, NULL, tmp)) 4722 metadata_dst_free_percpu(tmp); 4723 } 4724 4725 switch (which) { 4726 case BPF_FUNC_skb_set_tunnel_key: 4727 return &bpf_skb_set_tunnel_key_proto; 4728 case BPF_FUNC_skb_set_tunnel_opt: 4729 return &bpf_skb_set_tunnel_opt_proto; 4730 default: 4731 return NULL; 4732 } 4733 } 4734 4735 BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map, 4736 u32, idx) 4737 { 4738 struct bpf_array *array = container_of(map, struct bpf_array, map); 4739 struct cgroup *cgrp; 4740 struct sock *sk; 4741 4742 sk = skb_to_full_sk(skb); 4743 if (!sk || !sk_fullsock(sk)) 4744 return -ENOENT; 4745 if (unlikely(idx >= array->map.max_entries)) 4746 return -E2BIG; 4747 4748 cgrp = READ_ONCE(array->ptrs[idx]); 4749 if (unlikely(!cgrp)) 4750 return -EAGAIN; 4751 4752 return sk_under_cgroup_hierarchy(sk, cgrp); 4753 } 4754 4755 static const struct bpf_func_proto bpf_skb_under_cgroup_proto = { 4756 .func = bpf_skb_under_cgroup, 4757 .gpl_only = false, 4758 .ret_type = RET_INTEGER, 4759 .arg1_type = ARG_PTR_TO_CTX, 4760 .arg2_type = ARG_CONST_MAP_PTR, 4761 .arg3_type = ARG_ANYTHING, 4762 }; 4763 4764 #ifdef CONFIG_SOCK_CGROUP_DATA 4765 static inline u64 __bpf_sk_cgroup_id(struct sock *sk) 4766 { 4767 struct cgroup *cgrp; 4768 4769 sk = sk_to_full_sk(sk); 4770 if (!sk || !sk_fullsock(sk)) 4771 return 0; 4772 4773 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 4774 return cgroup_id(cgrp); 4775 } 4776 4777 BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb) 4778 { 4779 return __bpf_sk_cgroup_id(skb->sk); 4780 } 4781 4782 static const struct bpf_func_proto bpf_skb_cgroup_id_proto = { 4783 .func = bpf_skb_cgroup_id, 4784 .gpl_only = false, 4785 .ret_type = RET_INTEGER, 4786 .arg1_type = ARG_PTR_TO_CTX, 4787 }; 4788 4789 static inline u64 __bpf_sk_ancestor_cgroup_id(struct sock *sk, 4790 int ancestor_level) 4791 { 4792 struct cgroup *ancestor; 4793 struct cgroup *cgrp; 4794 4795 sk = sk_to_full_sk(sk); 4796 if (!sk || !sk_fullsock(sk)) 4797 return 0; 4798 4799 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 4800 ancestor = cgroup_ancestor(cgrp, ancestor_level); 4801 if (!ancestor) 4802 return 0; 4803 4804 return cgroup_id(ancestor); 4805 } 4806 4807 BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int, 4808 ancestor_level) 4809 { 4810 return __bpf_sk_ancestor_cgroup_id(skb->sk, ancestor_level); 4811 } 4812 4813 static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = { 4814 .func = bpf_skb_ancestor_cgroup_id, 4815 .gpl_only = false, 4816 .ret_type = RET_INTEGER, 4817 .arg1_type = ARG_PTR_TO_CTX, 4818 .arg2_type = ARG_ANYTHING, 4819 }; 4820 4821 BPF_CALL_1(bpf_sk_cgroup_id, struct sock *, sk) 4822 { 4823 return __bpf_sk_cgroup_id(sk); 4824 } 4825 4826 static const struct bpf_func_proto bpf_sk_cgroup_id_proto = { 4827 .func = bpf_sk_cgroup_id, 4828 .gpl_only = false, 4829 .ret_type = RET_INTEGER, 4830 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 4831 }; 4832 4833 BPF_CALL_2(bpf_sk_ancestor_cgroup_id, struct sock *, sk, int, ancestor_level) 4834 { 4835 return __bpf_sk_ancestor_cgroup_id(sk, ancestor_level); 4836 } 4837 4838 static const struct bpf_func_proto bpf_sk_ancestor_cgroup_id_proto = { 4839 .func = bpf_sk_ancestor_cgroup_id, 4840 .gpl_only = false, 4841 .ret_type = RET_INTEGER, 4842 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 4843 .arg2_type = ARG_ANYTHING, 4844 }; 4845 #endif 4846 4847 static unsigned long bpf_xdp_copy(void *dst, const void *ctx, 4848 unsigned long off, unsigned long len) 4849 { 4850 struct xdp_buff *xdp = (struct xdp_buff *)ctx; 4851 4852 bpf_xdp_copy_buf(xdp, off, dst, len, false); 4853 return 0; 4854 } 4855 4856 BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map, 4857 u64, flags, void *, meta, u64, meta_size) 4858 { 4859 u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32; 4860 4861 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) 4862 return -EINVAL; 4863 4864 if (unlikely(!xdp || xdp_size > xdp_get_buff_len(xdp))) 4865 return -EFAULT; 4866 4867 return bpf_event_output(map, flags, meta, meta_size, xdp, 4868 xdp_size, bpf_xdp_copy); 4869 } 4870 4871 static const struct bpf_func_proto bpf_xdp_event_output_proto = { 4872 .func = bpf_xdp_event_output, 4873 .gpl_only = true, 4874 .ret_type = RET_INTEGER, 4875 .arg1_type = ARG_PTR_TO_CTX, 4876 .arg2_type = ARG_CONST_MAP_PTR, 4877 .arg3_type = ARG_ANYTHING, 4878 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 4879 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 4880 }; 4881 4882 BTF_ID_LIST_SINGLE(bpf_xdp_output_btf_ids, struct, xdp_buff) 4883 4884 const struct bpf_func_proto bpf_xdp_output_proto = { 4885 .func = bpf_xdp_event_output, 4886 .gpl_only = true, 4887 .ret_type = RET_INTEGER, 4888 .arg1_type = ARG_PTR_TO_BTF_ID, 4889 .arg1_btf_id = &bpf_xdp_output_btf_ids[0], 4890 .arg2_type = ARG_CONST_MAP_PTR, 4891 .arg3_type = ARG_ANYTHING, 4892 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 4893 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 4894 }; 4895 4896 BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb) 4897 { 4898 return skb->sk ? __sock_gen_cookie(skb->sk) : 0; 4899 } 4900 4901 static const struct bpf_func_proto bpf_get_socket_cookie_proto = { 4902 .func = bpf_get_socket_cookie, 4903 .gpl_only = false, 4904 .ret_type = RET_INTEGER, 4905 .arg1_type = ARG_PTR_TO_CTX, 4906 }; 4907 4908 BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx) 4909 { 4910 return __sock_gen_cookie(ctx->sk); 4911 } 4912 4913 static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = { 4914 .func = bpf_get_socket_cookie_sock_addr, 4915 .gpl_only = false, 4916 .ret_type = RET_INTEGER, 4917 .arg1_type = ARG_PTR_TO_CTX, 4918 }; 4919 4920 BPF_CALL_1(bpf_get_socket_cookie_sock, struct sock *, ctx) 4921 { 4922 return __sock_gen_cookie(ctx); 4923 } 4924 4925 static const struct bpf_func_proto bpf_get_socket_cookie_sock_proto = { 4926 .func = bpf_get_socket_cookie_sock, 4927 .gpl_only = false, 4928 .ret_type = RET_INTEGER, 4929 .arg1_type = ARG_PTR_TO_CTX, 4930 }; 4931 4932 BPF_CALL_1(bpf_get_socket_ptr_cookie, struct sock *, sk) 4933 { 4934 return sk ? sock_gen_cookie(sk) : 0; 4935 } 4936 4937 const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto = { 4938 .func = bpf_get_socket_ptr_cookie, 4939 .gpl_only = false, 4940 .ret_type = RET_INTEGER, 4941 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 4942 }; 4943 4944 BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx) 4945 { 4946 return __sock_gen_cookie(ctx->sk); 4947 } 4948 4949 static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = { 4950 .func = bpf_get_socket_cookie_sock_ops, 4951 .gpl_only = false, 4952 .ret_type = RET_INTEGER, 4953 .arg1_type = ARG_PTR_TO_CTX, 4954 }; 4955 4956 static u64 __bpf_get_netns_cookie(struct sock *sk) 4957 { 4958 const struct net *net = sk ? sock_net(sk) : &init_net; 4959 4960 return net->net_cookie; 4961 } 4962 4963 BPF_CALL_1(bpf_get_netns_cookie_sock, struct sock *, ctx) 4964 { 4965 return __bpf_get_netns_cookie(ctx); 4966 } 4967 4968 static const struct bpf_func_proto bpf_get_netns_cookie_sock_proto = { 4969 .func = bpf_get_netns_cookie_sock, 4970 .gpl_only = false, 4971 .ret_type = RET_INTEGER, 4972 .arg1_type = ARG_PTR_TO_CTX_OR_NULL, 4973 }; 4974 4975 BPF_CALL_1(bpf_get_netns_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx) 4976 { 4977 return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL); 4978 } 4979 4980 static const struct bpf_func_proto bpf_get_netns_cookie_sock_addr_proto = { 4981 .func = bpf_get_netns_cookie_sock_addr, 4982 .gpl_only = false, 4983 .ret_type = RET_INTEGER, 4984 .arg1_type = ARG_PTR_TO_CTX_OR_NULL, 4985 }; 4986 4987 BPF_CALL_1(bpf_get_netns_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx) 4988 { 4989 return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL); 4990 } 4991 4992 static const struct bpf_func_proto bpf_get_netns_cookie_sock_ops_proto = { 4993 .func = bpf_get_netns_cookie_sock_ops, 4994 .gpl_only = false, 4995 .ret_type = RET_INTEGER, 4996 .arg1_type = ARG_PTR_TO_CTX_OR_NULL, 4997 }; 4998 4999 BPF_CALL_1(bpf_get_netns_cookie_sk_msg, struct sk_msg *, ctx) 5000 { 5001 return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL); 5002 } 5003 5004 static const struct bpf_func_proto bpf_get_netns_cookie_sk_msg_proto = { 5005 .func = bpf_get_netns_cookie_sk_msg, 5006 .gpl_only = false, 5007 .ret_type = RET_INTEGER, 5008 .arg1_type = ARG_PTR_TO_CTX_OR_NULL, 5009 }; 5010 5011 BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb) 5012 { 5013 struct sock *sk = sk_to_full_sk(skb->sk); 5014 kuid_t kuid; 5015 5016 if (!sk || !sk_fullsock(sk)) 5017 return overflowuid; 5018 kuid = sock_net_uid(sock_net(sk), sk); 5019 return from_kuid_munged(sock_net(sk)->user_ns, kuid); 5020 } 5021 5022 static const struct bpf_func_proto bpf_get_socket_uid_proto = { 5023 .func = bpf_get_socket_uid, 5024 .gpl_only = false, 5025 .ret_type = RET_INTEGER, 5026 .arg1_type = ARG_PTR_TO_CTX, 5027 }; 5028 5029 static int __bpf_setsockopt(struct sock *sk, int level, int optname, 5030 char *optval, int optlen) 5031 { 5032 char devname[IFNAMSIZ]; 5033 int val, valbool; 5034 struct net *net; 5035 int ifindex; 5036 int ret = 0; 5037 5038 if (!sk_fullsock(sk)) 5039 return -EINVAL; 5040 5041 if (level == SOL_SOCKET) { 5042 if (optlen != sizeof(int) && optname != SO_BINDTODEVICE) 5043 return -EINVAL; 5044 val = *((int *)optval); 5045 valbool = val ? 1 : 0; 5046 5047 /* Only some socketops are supported */ 5048 switch (optname) { 5049 case SO_RCVBUF: 5050 val = min_t(u32, val, READ_ONCE(sysctl_rmem_max)); 5051 val = min_t(int, val, INT_MAX / 2); 5052 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 5053 WRITE_ONCE(sk->sk_rcvbuf, 5054 max_t(int, val * 2, SOCK_MIN_RCVBUF)); 5055 break; 5056 case SO_SNDBUF: 5057 val = min_t(u32, val, READ_ONCE(sysctl_wmem_max)); 5058 val = min_t(int, val, INT_MAX / 2); 5059 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 5060 WRITE_ONCE(sk->sk_sndbuf, 5061 max_t(int, val * 2, SOCK_MIN_SNDBUF)); 5062 break; 5063 case SO_MAX_PACING_RATE: /* 32bit version */ 5064 if (val != ~0U) 5065 cmpxchg(&sk->sk_pacing_status, 5066 SK_PACING_NONE, 5067 SK_PACING_NEEDED); 5068 sk->sk_max_pacing_rate = (val == ~0U) ? 5069 ~0UL : (unsigned int)val; 5070 sk->sk_pacing_rate = min(sk->sk_pacing_rate, 5071 sk->sk_max_pacing_rate); 5072 break; 5073 case SO_PRIORITY: 5074 sk->sk_priority = val; 5075 break; 5076 case SO_RCVLOWAT: 5077 if (val < 0) 5078 val = INT_MAX; 5079 if (sk->sk_socket && sk->sk_socket->ops->set_rcvlowat) 5080 ret = sk->sk_socket->ops->set_rcvlowat(sk, val); 5081 else 5082 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); 5083 break; 5084 case SO_MARK: 5085 if (sk->sk_mark != val) { 5086 sk->sk_mark = val; 5087 sk_dst_reset(sk); 5088 } 5089 break; 5090 case SO_BINDTODEVICE: 5091 optlen = min_t(long, optlen, IFNAMSIZ - 1); 5092 strncpy(devname, optval, optlen); 5093 devname[optlen] = 0; 5094 5095 ifindex = 0; 5096 if (devname[0] != '\0') { 5097 struct net_device *dev; 5098 5099 ret = -ENODEV; 5100 5101 net = sock_net(sk); 5102 dev = dev_get_by_name(net, devname); 5103 if (!dev) 5104 break; 5105 ifindex = dev->ifindex; 5106 dev_put(dev); 5107 } 5108 fallthrough; 5109 case SO_BINDTOIFINDEX: 5110 if (optname == SO_BINDTOIFINDEX) 5111 ifindex = val; 5112 ret = sock_bindtoindex(sk, ifindex, false); 5113 break; 5114 case SO_KEEPALIVE: 5115 if (sk->sk_prot->keepalive) 5116 sk->sk_prot->keepalive(sk, valbool); 5117 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 5118 break; 5119 case SO_REUSEPORT: 5120 sk->sk_reuseport = valbool; 5121 break; 5122 case SO_TXREHASH: 5123 if (val < -1 || val > 1) { 5124 ret = -EINVAL; 5125 break; 5126 } 5127 sk->sk_txrehash = (u8)val; 5128 break; 5129 default: 5130 ret = -EINVAL; 5131 } 5132 #ifdef CONFIG_INET 5133 } else if (level == SOL_IP) { 5134 if (optlen != sizeof(int) || sk->sk_family != AF_INET) 5135 return -EINVAL; 5136 5137 val = *((int *)optval); 5138 /* Only some options are supported */ 5139 switch (optname) { 5140 case IP_TOS: 5141 if (val < -1 || val > 0xff) { 5142 ret = -EINVAL; 5143 } else { 5144 struct inet_sock *inet = inet_sk(sk); 5145 5146 if (val == -1) 5147 val = 0; 5148 inet->tos = val; 5149 } 5150 break; 5151 default: 5152 ret = -EINVAL; 5153 } 5154 #if IS_ENABLED(CONFIG_IPV6) 5155 } else if (level == SOL_IPV6) { 5156 if (optlen != sizeof(int) || sk->sk_family != AF_INET6) 5157 return -EINVAL; 5158 5159 val = *((int *)optval); 5160 /* Only some options are supported */ 5161 switch (optname) { 5162 case IPV6_TCLASS: 5163 if (val < -1 || val > 0xff) { 5164 ret = -EINVAL; 5165 } else { 5166 struct ipv6_pinfo *np = inet6_sk(sk); 5167 5168 if (val == -1) 5169 val = 0; 5170 np->tclass = val; 5171 } 5172 break; 5173 default: 5174 ret = -EINVAL; 5175 } 5176 #endif 5177 } else if (level == SOL_TCP && 5178 sk->sk_prot->setsockopt == tcp_setsockopt) { 5179 if (optname == TCP_CONGESTION) { 5180 char name[TCP_CA_NAME_MAX]; 5181 5182 strncpy(name, optval, min_t(long, optlen, 5183 TCP_CA_NAME_MAX-1)); 5184 name[TCP_CA_NAME_MAX-1] = 0; 5185 ret = tcp_set_congestion_control(sk, name, false, true); 5186 } else { 5187 struct inet_connection_sock *icsk = inet_csk(sk); 5188 struct tcp_sock *tp = tcp_sk(sk); 5189 unsigned long timeout; 5190 5191 if (optlen != sizeof(int)) 5192 return -EINVAL; 5193 5194 val = *((int *)optval); 5195 /* Only some options are supported */ 5196 switch (optname) { 5197 case TCP_BPF_IW: 5198 if (val <= 0 || tp->data_segs_out > tp->syn_data) 5199 ret = -EINVAL; 5200 else 5201 tcp_snd_cwnd_set(tp, val); 5202 break; 5203 case TCP_BPF_SNDCWND_CLAMP: 5204 if (val <= 0) { 5205 ret = -EINVAL; 5206 } else { 5207 tp->snd_cwnd_clamp = val; 5208 tp->snd_ssthresh = val; 5209 } 5210 break; 5211 case TCP_BPF_DELACK_MAX: 5212 timeout = usecs_to_jiffies(val); 5213 if (timeout > TCP_DELACK_MAX || 5214 timeout < TCP_TIMEOUT_MIN) 5215 return -EINVAL; 5216 inet_csk(sk)->icsk_delack_max = timeout; 5217 break; 5218 case TCP_BPF_RTO_MIN: 5219 timeout = usecs_to_jiffies(val); 5220 if (timeout > TCP_RTO_MIN || 5221 timeout < TCP_TIMEOUT_MIN) 5222 return -EINVAL; 5223 inet_csk(sk)->icsk_rto_min = timeout; 5224 break; 5225 case TCP_SAVE_SYN: 5226 if (val < 0 || val > 1) 5227 ret = -EINVAL; 5228 else 5229 tp->save_syn = val; 5230 break; 5231 case TCP_KEEPIDLE: 5232 ret = tcp_sock_set_keepidle_locked(sk, val); 5233 break; 5234 case TCP_KEEPINTVL: 5235 if (val < 1 || val > MAX_TCP_KEEPINTVL) 5236 ret = -EINVAL; 5237 else 5238 tp->keepalive_intvl = val * HZ; 5239 break; 5240 case TCP_KEEPCNT: 5241 if (val < 1 || val > MAX_TCP_KEEPCNT) 5242 ret = -EINVAL; 5243 else 5244 tp->keepalive_probes = val; 5245 break; 5246 case TCP_SYNCNT: 5247 if (val < 1 || val > MAX_TCP_SYNCNT) 5248 ret = -EINVAL; 5249 else 5250 icsk->icsk_syn_retries = val; 5251 break; 5252 case TCP_USER_TIMEOUT: 5253 if (val < 0) 5254 ret = -EINVAL; 5255 else 5256 icsk->icsk_user_timeout = val; 5257 break; 5258 case TCP_NOTSENT_LOWAT: 5259 tp->notsent_lowat = val; 5260 sk->sk_write_space(sk); 5261 break; 5262 case TCP_WINDOW_CLAMP: 5263 ret = tcp_set_window_clamp(sk, val); 5264 break; 5265 default: 5266 ret = -EINVAL; 5267 } 5268 } 5269 #endif 5270 } else { 5271 ret = -EINVAL; 5272 } 5273 return ret; 5274 } 5275 5276 static int _bpf_setsockopt(struct sock *sk, int level, int optname, 5277 char *optval, int optlen) 5278 { 5279 if (sk_fullsock(sk)) 5280 sock_owned_by_me(sk); 5281 return __bpf_setsockopt(sk, level, optname, optval, optlen); 5282 } 5283 5284 static int __bpf_getsockopt(struct sock *sk, int level, int optname, 5285 char *optval, int optlen) 5286 { 5287 if (!sk_fullsock(sk)) 5288 goto err_clear; 5289 5290 if (level == SOL_SOCKET) { 5291 if (optlen != sizeof(int)) 5292 goto err_clear; 5293 5294 switch (optname) { 5295 case SO_RCVBUF: 5296 *((int *)optval) = sk->sk_rcvbuf; 5297 break; 5298 case SO_SNDBUF: 5299 *((int *)optval) = sk->sk_sndbuf; 5300 break; 5301 case SO_MARK: 5302 *((int *)optval) = sk->sk_mark; 5303 break; 5304 case SO_PRIORITY: 5305 *((int *)optval) = sk->sk_priority; 5306 break; 5307 case SO_BINDTOIFINDEX: 5308 *((int *)optval) = sk->sk_bound_dev_if; 5309 break; 5310 case SO_REUSEPORT: 5311 *((int *)optval) = sk->sk_reuseport; 5312 break; 5313 case SO_TXREHASH: 5314 *((int *)optval) = sk->sk_txrehash; 5315 break; 5316 default: 5317 goto err_clear; 5318 } 5319 #ifdef CONFIG_INET 5320 } else if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) { 5321 struct inet_connection_sock *icsk; 5322 struct tcp_sock *tp; 5323 5324 switch (optname) { 5325 case TCP_CONGESTION: 5326 icsk = inet_csk(sk); 5327 5328 if (!icsk->icsk_ca_ops || optlen <= 1) 5329 goto err_clear; 5330 strncpy(optval, icsk->icsk_ca_ops->name, optlen); 5331 optval[optlen - 1] = 0; 5332 break; 5333 case TCP_SAVED_SYN: 5334 tp = tcp_sk(sk); 5335 5336 if (optlen <= 0 || !tp->saved_syn || 5337 optlen > tcp_saved_syn_len(tp->saved_syn)) 5338 goto err_clear; 5339 memcpy(optval, tp->saved_syn->data, optlen); 5340 break; 5341 default: 5342 goto err_clear; 5343 } 5344 } else if (level == SOL_IP) { 5345 struct inet_sock *inet = inet_sk(sk); 5346 5347 if (optlen != sizeof(int) || sk->sk_family != AF_INET) 5348 goto err_clear; 5349 5350 /* Only some options are supported */ 5351 switch (optname) { 5352 case IP_TOS: 5353 *((int *)optval) = (int)inet->tos; 5354 break; 5355 default: 5356 goto err_clear; 5357 } 5358 #if IS_ENABLED(CONFIG_IPV6) 5359 } else if (level == SOL_IPV6) { 5360 struct ipv6_pinfo *np = inet6_sk(sk); 5361 5362 if (optlen != sizeof(int) || sk->sk_family != AF_INET6) 5363 goto err_clear; 5364 5365 /* Only some options are supported */ 5366 switch (optname) { 5367 case IPV6_TCLASS: 5368 *((int *)optval) = (int)np->tclass; 5369 break; 5370 default: 5371 goto err_clear; 5372 } 5373 #endif 5374 #endif 5375 } else { 5376 goto err_clear; 5377 } 5378 return 0; 5379 err_clear: 5380 memset(optval, 0, optlen); 5381 return -EINVAL; 5382 } 5383 5384 static int _bpf_getsockopt(struct sock *sk, int level, int optname, 5385 char *optval, int optlen) 5386 { 5387 if (sk_fullsock(sk)) 5388 sock_owned_by_me(sk); 5389 return __bpf_getsockopt(sk, level, optname, optval, optlen); 5390 } 5391 5392 BPF_CALL_5(bpf_sk_setsockopt, struct sock *, sk, int, level, 5393 int, optname, char *, optval, int, optlen) 5394 { 5395 if (level == SOL_TCP && optname == TCP_CONGESTION) { 5396 if (optlen >= sizeof("cdg") - 1 && 5397 !strncmp("cdg", optval, optlen)) 5398 return -ENOTSUPP; 5399 } 5400 5401 return _bpf_setsockopt(sk, level, optname, optval, optlen); 5402 } 5403 5404 const struct bpf_func_proto bpf_sk_setsockopt_proto = { 5405 .func = bpf_sk_setsockopt, 5406 .gpl_only = false, 5407 .ret_type = RET_INTEGER, 5408 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 5409 .arg2_type = ARG_ANYTHING, 5410 .arg3_type = ARG_ANYTHING, 5411 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5412 .arg5_type = ARG_CONST_SIZE, 5413 }; 5414 5415 BPF_CALL_5(bpf_sk_getsockopt, struct sock *, sk, int, level, 5416 int, optname, char *, optval, int, optlen) 5417 { 5418 return _bpf_getsockopt(sk, level, optname, optval, optlen); 5419 } 5420 5421 const struct bpf_func_proto bpf_sk_getsockopt_proto = { 5422 .func = bpf_sk_getsockopt, 5423 .gpl_only = false, 5424 .ret_type = RET_INTEGER, 5425 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 5426 .arg2_type = ARG_ANYTHING, 5427 .arg3_type = ARG_ANYTHING, 5428 .arg4_type = ARG_PTR_TO_UNINIT_MEM, 5429 .arg5_type = ARG_CONST_SIZE, 5430 }; 5431 5432 BPF_CALL_5(bpf_unlocked_sk_setsockopt, struct sock *, sk, int, level, 5433 int, optname, char *, optval, int, optlen) 5434 { 5435 return __bpf_setsockopt(sk, level, optname, optval, optlen); 5436 } 5437 5438 const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto = { 5439 .func = bpf_unlocked_sk_setsockopt, 5440 .gpl_only = false, 5441 .ret_type = RET_INTEGER, 5442 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 5443 .arg2_type = ARG_ANYTHING, 5444 .arg3_type = ARG_ANYTHING, 5445 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5446 .arg5_type = ARG_CONST_SIZE, 5447 }; 5448 5449 BPF_CALL_5(bpf_unlocked_sk_getsockopt, struct sock *, sk, int, level, 5450 int, optname, char *, optval, int, optlen) 5451 { 5452 return __bpf_getsockopt(sk, level, optname, optval, optlen); 5453 } 5454 5455 const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto = { 5456 .func = bpf_unlocked_sk_getsockopt, 5457 .gpl_only = false, 5458 .ret_type = RET_INTEGER, 5459 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, 5460 .arg2_type = ARG_ANYTHING, 5461 .arg3_type = ARG_ANYTHING, 5462 .arg4_type = ARG_PTR_TO_UNINIT_MEM, 5463 .arg5_type = ARG_CONST_SIZE, 5464 }; 5465 5466 BPF_CALL_5(bpf_sock_addr_setsockopt, struct bpf_sock_addr_kern *, ctx, 5467 int, level, int, optname, char *, optval, int, optlen) 5468 { 5469 return _bpf_setsockopt(ctx->sk, level, optname, optval, optlen); 5470 } 5471 5472 static const struct bpf_func_proto bpf_sock_addr_setsockopt_proto = { 5473 .func = bpf_sock_addr_setsockopt, 5474 .gpl_only = false, 5475 .ret_type = RET_INTEGER, 5476 .arg1_type = ARG_PTR_TO_CTX, 5477 .arg2_type = ARG_ANYTHING, 5478 .arg3_type = ARG_ANYTHING, 5479 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5480 .arg5_type = ARG_CONST_SIZE, 5481 }; 5482 5483 BPF_CALL_5(bpf_sock_addr_getsockopt, struct bpf_sock_addr_kern *, ctx, 5484 int, level, int, optname, char *, optval, int, optlen) 5485 { 5486 return _bpf_getsockopt(ctx->sk, level, optname, optval, optlen); 5487 } 5488 5489 static const struct bpf_func_proto bpf_sock_addr_getsockopt_proto = { 5490 .func = bpf_sock_addr_getsockopt, 5491 .gpl_only = false, 5492 .ret_type = RET_INTEGER, 5493 .arg1_type = ARG_PTR_TO_CTX, 5494 .arg2_type = ARG_ANYTHING, 5495 .arg3_type = ARG_ANYTHING, 5496 .arg4_type = ARG_PTR_TO_UNINIT_MEM, 5497 .arg5_type = ARG_CONST_SIZE, 5498 }; 5499 5500 BPF_CALL_5(bpf_sock_ops_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, 5501 int, level, int, optname, char *, optval, int, optlen) 5502 { 5503 return _bpf_setsockopt(bpf_sock->sk, level, optname, optval, optlen); 5504 } 5505 5506 static const struct bpf_func_proto bpf_sock_ops_setsockopt_proto = { 5507 .func = bpf_sock_ops_setsockopt, 5508 .gpl_only = false, 5509 .ret_type = RET_INTEGER, 5510 .arg1_type = ARG_PTR_TO_CTX, 5511 .arg2_type = ARG_ANYTHING, 5512 .arg3_type = ARG_ANYTHING, 5513 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5514 .arg5_type = ARG_CONST_SIZE, 5515 }; 5516 5517 static int bpf_sock_ops_get_syn(struct bpf_sock_ops_kern *bpf_sock, 5518 int optname, const u8 **start) 5519 { 5520 struct sk_buff *syn_skb = bpf_sock->syn_skb; 5521 const u8 *hdr_start; 5522 int ret; 5523 5524 if (syn_skb) { 5525 /* sk is a request_sock here */ 5526 5527 if (optname == TCP_BPF_SYN) { 5528 hdr_start = syn_skb->data; 5529 ret = tcp_hdrlen(syn_skb); 5530 } else if (optname == TCP_BPF_SYN_IP) { 5531 hdr_start = skb_network_header(syn_skb); 5532 ret = skb_network_header_len(syn_skb) + 5533 tcp_hdrlen(syn_skb); 5534 } else { 5535 /* optname == TCP_BPF_SYN_MAC */ 5536 hdr_start = skb_mac_header(syn_skb); 5537 ret = skb_mac_header_len(syn_skb) + 5538 skb_network_header_len(syn_skb) + 5539 tcp_hdrlen(syn_skb); 5540 } 5541 } else { 5542 struct sock *sk = bpf_sock->sk; 5543 struct saved_syn *saved_syn; 5544 5545 if (sk->sk_state == TCP_NEW_SYN_RECV) 5546 /* synack retransmit. bpf_sock->syn_skb will 5547 * not be available. It has to resort to 5548 * saved_syn (if it is saved). 5549 */ 5550 saved_syn = inet_reqsk(sk)->saved_syn; 5551 else 5552 saved_syn = tcp_sk(sk)->saved_syn; 5553 5554 if (!saved_syn) 5555 return -ENOENT; 5556 5557 if (optname == TCP_BPF_SYN) { 5558 hdr_start = saved_syn->data + 5559 saved_syn->mac_hdrlen + 5560 saved_syn->network_hdrlen; 5561 ret = saved_syn->tcp_hdrlen; 5562 } else if (optname == TCP_BPF_SYN_IP) { 5563 hdr_start = saved_syn->data + 5564 saved_syn->mac_hdrlen; 5565 ret = saved_syn->network_hdrlen + 5566 saved_syn->tcp_hdrlen; 5567 } else { 5568 /* optname == TCP_BPF_SYN_MAC */ 5569 5570 /* TCP_SAVE_SYN may not have saved the mac hdr */ 5571 if (!saved_syn->mac_hdrlen) 5572 return -ENOENT; 5573 5574 hdr_start = saved_syn->data; 5575 ret = saved_syn->mac_hdrlen + 5576 saved_syn->network_hdrlen + 5577 saved_syn->tcp_hdrlen; 5578 } 5579 } 5580 5581 *start = hdr_start; 5582 return ret; 5583 } 5584 5585 BPF_CALL_5(bpf_sock_ops_getsockopt, struct bpf_sock_ops_kern *, bpf_sock, 5586 int, level, int, optname, char *, optval, int, optlen) 5587 { 5588 if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP && 5589 optname >= TCP_BPF_SYN && optname <= TCP_BPF_SYN_MAC) { 5590 int ret, copy_len = 0; 5591 const u8 *start; 5592 5593 ret = bpf_sock_ops_get_syn(bpf_sock, optname, &start); 5594 if (ret > 0) { 5595 copy_len = ret; 5596 if (optlen < copy_len) { 5597 copy_len = optlen; 5598 ret = -ENOSPC; 5599 } 5600 5601 memcpy(optval, start, copy_len); 5602 } 5603 5604 /* Zero out unused buffer at the end */ 5605 memset(optval + copy_len, 0, optlen - copy_len); 5606 5607 return ret; 5608 } 5609 5610 return _bpf_getsockopt(bpf_sock->sk, level, optname, optval, optlen); 5611 } 5612 5613 static const struct bpf_func_proto bpf_sock_ops_getsockopt_proto = { 5614 .func = bpf_sock_ops_getsockopt, 5615 .gpl_only = false, 5616 .ret_type = RET_INTEGER, 5617 .arg1_type = ARG_PTR_TO_CTX, 5618 .arg2_type = ARG_ANYTHING, 5619 .arg3_type = ARG_ANYTHING, 5620 .arg4_type = ARG_PTR_TO_UNINIT_MEM, 5621 .arg5_type = ARG_CONST_SIZE, 5622 }; 5623 5624 BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock, 5625 int, argval) 5626 { 5627 struct sock *sk = bpf_sock->sk; 5628 int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS; 5629 5630 if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk)) 5631 return -EINVAL; 5632 5633 tcp_sk(sk)->bpf_sock_ops_cb_flags = val; 5634 5635 return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS); 5636 } 5637 5638 static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = { 5639 .func = bpf_sock_ops_cb_flags_set, 5640 .gpl_only = false, 5641 .ret_type = RET_INTEGER, 5642 .arg1_type = ARG_PTR_TO_CTX, 5643 .arg2_type = ARG_ANYTHING, 5644 }; 5645 5646 const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly; 5647 EXPORT_SYMBOL_GPL(ipv6_bpf_stub); 5648 5649 BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr, 5650 int, addr_len) 5651 { 5652 #ifdef CONFIG_INET 5653 struct sock *sk = ctx->sk; 5654 u32 flags = BIND_FROM_BPF; 5655 int err; 5656 5657 err = -EINVAL; 5658 if (addr_len < offsetofend(struct sockaddr, sa_family)) 5659 return err; 5660 if (addr->sa_family == AF_INET) { 5661 if (addr_len < sizeof(struct sockaddr_in)) 5662 return err; 5663 if (((struct sockaddr_in *)addr)->sin_port == htons(0)) 5664 flags |= BIND_FORCE_ADDRESS_NO_PORT; 5665 return __inet_bind(sk, addr, addr_len, flags); 5666 #if IS_ENABLED(CONFIG_IPV6) 5667 } else if (addr->sa_family == AF_INET6) { 5668 if (addr_len < SIN6_LEN_RFC2133) 5669 return err; 5670 if (((struct sockaddr_in6 *)addr)->sin6_port == htons(0)) 5671 flags |= BIND_FORCE_ADDRESS_NO_PORT; 5672 /* ipv6_bpf_stub cannot be NULL, since it's called from 5673 * bpf_cgroup_inet6_connect hook and ipv6 is already loaded 5674 */ 5675 return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, flags); 5676 #endif /* CONFIG_IPV6 */ 5677 } 5678 #endif /* CONFIG_INET */ 5679 5680 return -EAFNOSUPPORT; 5681 } 5682 5683 static const struct bpf_func_proto bpf_bind_proto = { 5684 .func = bpf_bind, 5685 .gpl_only = false, 5686 .ret_type = RET_INTEGER, 5687 .arg1_type = ARG_PTR_TO_CTX, 5688 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5689 .arg3_type = ARG_CONST_SIZE, 5690 }; 5691 5692 #ifdef CONFIG_XFRM 5693 BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index, 5694 struct bpf_xfrm_state *, to, u32, size, u64, flags) 5695 { 5696 const struct sec_path *sp = skb_sec_path(skb); 5697 const struct xfrm_state *x; 5698 5699 if (!sp || unlikely(index >= sp->len || flags)) 5700 goto err_clear; 5701 5702 x = sp->xvec[index]; 5703 5704 if (unlikely(size != sizeof(struct bpf_xfrm_state))) 5705 goto err_clear; 5706 5707 to->reqid = x->props.reqid; 5708 to->spi = x->id.spi; 5709 to->family = x->props.family; 5710 to->ext = 0; 5711 5712 if (to->family == AF_INET6) { 5713 memcpy(to->remote_ipv6, x->props.saddr.a6, 5714 sizeof(to->remote_ipv6)); 5715 } else { 5716 to->remote_ipv4 = x->props.saddr.a4; 5717 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3); 5718 } 5719 5720 return 0; 5721 err_clear: 5722 memset(to, 0, size); 5723 return -EINVAL; 5724 } 5725 5726 static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = { 5727 .func = bpf_skb_get_xfrm_state, 5728 .gpl_only = false, 5729 .ret_type = RET_INTEGER, 5730 .arg1_type = ARG_PTR_TO_CTX, 5731 .arg2_type = ARG_ANYTHING, 5732 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 5733 .arg4_type = ARG_CONST_SIZE, 5734 .arg5_type = ARG_ANYTHING, 5735 }; 5736 #endif 5737 5738 #if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6) 5739 static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params, 5740 const struct neighbour *neigh, 5741 const struct net_device *dev, u32 mtu) 5742 { 5743 memcpy(params->dmac, neigh->ha, ETH_ALEN); 5744 memcpy(params->smac, dev->dev_addr, ETH_ALEN); 5745 params->h_vlan_TCI = 0; 5746 params->h_vlan_proto = 0; 5747 if (mtu) 5748 params->mtu_result = mtu; /* union with tot_len */ 5749 5750 return 0; 5751 } 5752 #endif 5753 5754 #if IS_ENABLED(CONFIG_INET) 5755 static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, 5756 u32 flags, bool check_mtu) 5757 { 5758 struct fib_nh_common *nhc; 5759 struct in_device *in_dev; 5760 struct neighbour *neigh; 5761 struct net_device *dev; 5762 struct fib_result res; 5763 struct flowi4 fl4; 5764 u32 mtu = 0; 5765 int err; 5766 5767 dev = dev_get_by_index_rcu(net, params->ifindex); 5768 if (unlikely(!dev)) 5769 return -ENODEV; 5770 5771 /* verify forwarding is enabled on this interface */ 5772 in_dev = __in_dev_get_rcu(dev); 5773 if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev))) 5774 return BPF_FIB_LKUP_RET_FWD_DISABLED; 5775 5776 if (flags & BPF_FIB_LOOKUP_OUTPUT) { 5777 fl4.flowi4_iif = 1; 5778 fl4.flowi4_oif = params->ifindex; 5779 } else { 5780 fl4.flowi4_iif = params->ifindex; 5781 fl4.flowi4_oif = 0; 5782 } 5783 fl4.flowi4_tos = params->tos & IPTOS_RT_MASK; 5784 fl4.flowi4_scope = RT_SCOPE_UNIVERSE; 5785 fl4.flowi4_flags = 0; 5786 5787 fl4.flowi4_proto = params->l4_protocol; 5788 fl4.daddr = params->ipv4_dst; 5789 fl4.saddr = params->ipv4_src; 5790 fl4.fl4_sport = params->sport; 5791 fl4.fl4_dport = params->dport; 5792 fl4.flowi4_multipath_hash = 0; 5793 5794 if (flags & BPF_FIB_LOOKUP_DIRECT) { 5795 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN; 5796 struct fib_table *tb; 5797 5798 tb = fib_get_table(net, tbid); 5799 if (unlikely(!tb)) 5800 return BPF_FIB_LKUP_RET_NOT_FWDED; 5801 5802 err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF); 5803 } else { 5804 fl4.flowi4_mark = 0; 5805 fl4.flowi4_secid = 0; 5806 fl4.flowi4_tun_key.tun_id = 0; 5807 fl4.flowi4_uid = sock_net_uid(net, NULL); 5808 5809 err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF); 5810 } 5811 5812 if (err) { 5813 /* map fib lookup errors to RTN_ type */ 5814 if (err == -EINVAL) 5815 return BPF_FIB_LKUP_RET_BLACKHOLE; 5816 if (err == -EHOSTUNREACH) 5817 return BPF_FIB_LKUP_RET_UNREACHABLE; 5818 if (err == -EACCES) 5819 return BPF_FIB_LKUP_RET_PROHIBIT; 5820 5821 return BPF_FIB_LKUP_RET_NOT_FWDED; 5822 } 5823 5824 if (res.type != RTN_UNICAST) 5825 return BPF_FIB_LKUP_RET_NOT_FWDED; 5826 5827 if (fib_info_num_path(res.fi) > 1) 5828 fib_select_path(net, &res, &fl4, NULL); 5829 5830 if (check_mtu) { 5831 mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst); 5832 if (params->tot_len > mtu) { 5833 params->mtu_result = mtu; /* union with tot_len */ 5834 return BPF_FIB_LKUP_RET_FRAG_NEEDED; 5835 } 5836 } 5837 5838 nhc = res.nhc; 5839 5840 /* do not handle lwt encaps right now */ 5841 if (nhc->nhc_lwtstate) 5842 return BPF_FIB_LKUP_RET_UNSUPP_LWT; 5843 5844 dev = nhc->nhc_dev; 5845 5846 params->rt_metric = res.fi->fib_priority; 5847 params->ifindex = dev->ifindex; 5848 5849 /* xdp and cls_bpf programs are run in RCU-bh so 5850 * rcu_read_lock_bh is not needed here 5851 */ 5852 if (likely(nhc->nhc_gw_family != AF_INET6)) { 5853 if (nhc->nhc_gw_family) 5854 params->ipv4_dst = nhc->nhc_gw.ipv4; 5855 5856 neigh = __ipv4_neigh_lookup_noref(dev, 5857 (__force u32)params->ipv4_dst); 5858 } else { 5859 struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst; 5860 5861 params->family = AF_INET6; 5862 *dst = nhc->nhc_gw.ipv6; 5863 neigh = __ipv6_neigh_lookup_noref_stub(dev, dst); 5864 } 5865 5866 if (!neigh) 5867 return BPF_FIB_LKUP_RET_NO_NEIGH; 5868 5869 return bpf_fib_set_fwd_params(params, neigh, dev, mtu); 5870 } 5871 #endif 5872 5873 #if IS_ENABLED(CONFIG_IPV6) 5874 static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, 5875 u32 flags, bool check_mtu) 5876 { 5877 struct in6_addr *src = (struct in6_addr *) params->ipv6_src; 5878 struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst; 5879 struct fib6_result res = {}; 5880 struct neighbour *neigh; 5881 struct net_device *dev; 5882 struct inet6_dev *idev; 5883 struct flowi6 fl6; 5884 int strict = 0; 5885 int oif, err; 5886 u32 mtu = 0; 5887 5888 /* link local addresses are never forwarded */ 5889 if (rt6_need_strict(dst) || rt6_need_strict(src)) 5890 return BPF_FIB_LKUP_RET_NOT_FWDED; 5891 5892 dev = dev_get_by_index_rcu(net,