~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/bpf/sockmap.c

Version: ~ [ linux-5.15-rc6 ] ~ [ linux-5.14.14 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.75 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.155 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.213 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.252 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.287 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.289 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
  2  *
  3  * This program is free software; you can redistribute it and/or
  4  * modify it under the terms of version 2 of the GNU General Public
  5  * License as published by the Free Software Foundation.
  6  *
  7  * This program is distributed in the hope that it will be useful, but
  8  * WITHOUT ANY WARRANTY; without even the implied warranty of
  9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 10  * General Public License for more details.
 11  */
 12 
 13 /* A BPF sock_map is used to store sock objects. This is primarly used
 14  * for doing socket redirect with BPF helper routines.
 15  *
 16  * A sock map may have BPF programs attached to it, currently a program
 17  * used to parse packets and a program to provide a verdict and redirect
 18  * decision on the packet are supported. Any programs attached to a sock
 19  * map are inherited by sock objects when they are added to the map. If
 20  * no BPF programs are attached the sock object may only be used for sock
 21  * redirect.
 22  *
 23  * A sock object may be in multiple maps, but can only inherit a single
 24  * parse or verdict program. If adding a sock object to a map would result
 25  * in having multiple parsing programs the update will return an EBUSY error.
 26  *
 27  * For reference this program is similar to devmap used in XDP context
 28  * reviewing these together may be useful. For an example please review
 29  * ./samples/bpf/sockmap/.
 30  */
 31 #include <linux/bpf.h>
 32 #include <net/sock.h>
 33 #include <linux/filter.h>
 34 #include <linux/errno.h>
 35 #include <linux/file.h>
 36 #include <linux/kernel.h>
 37 #include <linux/net.h>
 38 #include <linux/skbuff.h>
 39 #include <linux/workqueue.h>
 40 #include <linux/list.h>
 41 #include <linux/mm.h>
 42 #include <net/strparser.h>
 43 #include <net/tcp.h>
 44 #include <linux/ptr_ring.h>
 45 #include <net/inet_common.h>
 46 #include <linux/sched/signal.h>
 47 
 48 #define SOCK_CREATE_FLAG_MASK \
 49         (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
 50 
 51 struct bpf_stab {
 52         struct bpf_map map;
 53         struct sock **sock_map;
 54         struct bpf_prog *bpf_tx_msg;
 55         struct bpf_prog *bpf_parse;
 56         struct bpf_prog *bpf_verdict;
 57 };
 58 
 59 enum smap_psock_state {
 60         SMAP_TX_RUNNING,
 61 };
 62 
 63 struct smap_psock_map_entry {
 64         struct list_head list;
 65         struct sock **entry;
 66 };
 67 
 68 struct smap_psock {
 69         struct rcu_head rcu;
 70         refcount_t refcnt;
 71 
 72         /* datapath variables */
 73         struct sk_buff_head rxqueue;
 74         bool strp_enabled;
 75 
 76         /* datapath error path cache across tx work invocations */
 77         int save_rem;
 78         int save_off;
 79         struct sk_buff *save_skb;
 80 
 81         /* datapath variables for tx_msg ULP */
 82         struct sock *sk_redir;
 83         int apply_bytes;
 84         int cork_bytes;
 85         int sg_size;
 86         int eval;
 87         struct sk_msg_buff *cork;
 88         struct list_head ingress;
 89 
 90         struct strparser strp;
 91         struct bpf_prog *bpf_tx_msg;
 92         struct bpf_prog *bpf_parse;
 93         struct bpf_prog *bpf_verdict;
 94         struct list_head maps;
 95 
 96         /* Back reference used when sock callback trigger sockmap operations */
 97         struct sock *sock;
 98         unsigned long state;
 99 
100         struct work_struct tx_work;
101         struct work_struct gc_work;
102 
103         struct proto *sk_proto;
104         void (*save_close)(struct sock *sk, long timeout);
105         void (*save_data_ready)(struct sock *sk);
106         void (*save_write_space)(struct sock *sk);
107 };
108 
109 static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
110 static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
111                            int nonblock, int flags, int *addr_len);
112 static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
113 static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
114                             int offset, size_t size, int flags);
115 static void bpf_tcp_close(struct sock *sk, long timeout);
116 
117 static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
118 {
119         return rcu_dereference_sk_user_data(sk);
120 }
121 
122 static bool bpf_tcp_stream_read(const struct sock *sk)
123 {
124         struct smap_psock *psock;
125         bool empty = true;
126 
127         rcu_read_lock();
128         psock = smap_psock_sk(sk);
129         if (unlikely(!psock))
130                 goto out;
131         empty = list_empty(&psock->ingress);
132 out:
133         rcu_read_unlock();
134         return !empty;
135 }
136 
137 enum {
138         SOCKMAP_IPV4,
139         SOCKMAP_IPV6,
140         SOCKMAP_NUM_PROTS,
141 };
142 
143 enum {
144         SOCKMAP_BASE,
145         SOCKMAP_TX,
146         SOCKMAP_NUM_CONFIGS,
147 };
148 
149 static struct proto *saved_tcpv6_prot __read_mostly;
150 static DEFINE_SPINLOCK(tcpv6_prot_lock);
151 static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS];
152 static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS],
153                          struct proto *base)
154 {
155         prot[SOCKMAP_BASE]                      = *base;
156         prot[SOCKMAP_BASE].close                = bpf_tcp_close;
157         prot[SOCKMAP_BASE].recvmsg              = bpf_tcp_recvmsg;
158         prot[SOCKMAP_BASE].stream_memory_read   = bpf_tcp_stream_read;
159 
160         prot[SOCKMAP_TX]                        = prot[SOCKMAP_BASE];
161         prot[SOCKMAP_TX].sendmsg                = bpf_tcp_sendmsg;
162         prot[SOCKMAP_TX].sendpage               = bpf_tcp_sendpage;
163 }
164 
165 static void update_sk_prot(struct sock *sk, struct smap_psock *psock)
166 {
167         int family = sk->sk_family == AF_INET6 ? SOCKMAP_IPV6 : SOCKMAP_IPV4;
168         int conf = psock->bpf_tx_msg ? SOCKMAP_TX : SOCKMAP_BASE;
169 
170         sk->sk_prot = &bpf_tcp_prots[family][conf];
171 }
172 
173 static int bpf_tcp_init(struct sock *sk)
174 {
175         struct smap_psock *psock;
176 
177         rcu_read_lock();
178         psock = smap_psock_sk(sk);
179         if (unlikely(!psock)) {
180                 rcu_read_unlock();
181                 return -EINVAL;
182         }
183 
184         if (unlikely(psock->sk_proto)) {
185                 rcu_read_unlock();
186                 return -EBUSY;
187         }
188 
189         psock->save_close = sk->sk_prot->close;
190         psock->sk_proto = sk->sk_prot;
191 
192         /* Build IPv6 sockmap whenever the address of tcpv6_prot changes */
193         if (sk->sk_family == AF_INET6 &&
194             unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
195                 spin_lock_bh(&tcpv6_prot_lock);
196                 if (likely(sk->sk_prot != saved_tcpv6_prot)) {
197                         build_protos(bpf_tcp_prots[SOCKMAP_IPV6], sk->sk_prot);
198                         smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
199                 }
200                 spin_unlock_bh(&tcpv6_prot_lock);
201         }
202         update_sk_prot(sk, psock);
203         rcu_read_unlock();
204         return 0;
205 }
206 
207 static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
208 static int free_start_sg(struct sock *sk, struct sk_msg_buff *md);
209 
210 static void bpf_tcp_release(struct sock *sk)
211 {
212         struct smap_psock *psock;
213 
214         rcu_read_lock();
215         psock = smap_psock_sk(sk);
216         if (unlikely(!psock))
217                 goto out;
218 
219         if (psock->cork) {
220                 free_start_sg(psock->sock, psock->cork);
221                 kfree(psock->cork);
222                 psock->cork = NULL;
223         }
224 
225         if (psock->sk_proto) {
226                 sk->sk_prot = psock->sk_proto;
227                 psock->sk_proto = NULL;
228         }
229 out:
230         rcu_read_unlock();
231 }
232 
233 static void bpf_tcp_close(struct sock *sk, long timeout)
234 {
235         void (*close_fun)(struct sock *sk, long timeout);
236         struct smap_psock_map_entry *e, *tmp;
237         struct sk_msg_buff *md, *mtmp;
238         struct smap_psock *psock;
239         struct sock *osk;
240 
241         rcu_read_lock();
242         psock = smap_psock_sk(sk);
243         if (unlikely(!psock)) {
244                 rcu_read_unlock();
245                 return sk->sk_prot->close(sk, timeout);
246         }
247 
248         /* The psock may be destroyed anytime after exiting the RCU critial
249          * section so by the time we use close_fun the psock may no longer
250          * be valid. However, bpf_tcp_close is called with the sock lock
251          * held so the close hook and sk are still valid.
252          */
253         close_fun = psock->save_close;
254 
255         write_lock_bh(&sk->sk_callback_lock);
256         if (psock->cork) {
257                 free_start_sg(psock->sock, psock->cork);
258                 kfree(psock->cork);
259                 psock->cork = NULL;
260         }
261 
262         list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
263                 list_del(&md->list);
264                 free_start_sg(psock->sock, md);
265                 kfree(md);
266         }
267 
268         list_for_each_entry_safe(e, tmp, &psock->maps, list) {
269                 osk = cmpxchg(e->entry, sk, NULL);
270                 if (osk == sk) {
271                         list_del(&e->list);
272                         smap_release_sock(psock, sk);
273                 }
274         }
275         write_unlock_bh(&sk->sk_callback_lock);
276         rcu_read_unlock();
277         close_fun(sk, timeout);
278 }
279 
280 enum __sk_action {
281         __SK_DROP = 0,
282         __SK_PASS,
283         __SK_REDIRECT,
284         __SK_NONE,
285 };
286 
287 static struct tcp_ulp_ops bpf_tcp_ulp_ops __read_mostly = {
288         .name           = "bpf_tcp",
289         .uid            = TCP_ULP_BPF,
290         .user_visible   = false,
291         .owner          = NULL,
292         .init           = bpf_tcp_init,
293         .release        = bpf_tcp_release,
294 };
295 
296 static int memcopy_from_iter(struct sock *sk,
297                              struct sk_msg_buff *md,
298                              struct iov_iter *from, int bytes)
299 {
300         struct scatterlist *sg = md->sg_data;
301         int i = md->sg_curr, rc = -ENOSPC;
302 
303         do {
304                 int copy;
305                 char *to;
306 
307                 if (md->sg_copybreak >= sg[i].length) {
308                         md->sg_copybreak = 0;
309 
310                         if (++i == MAX_SKB_FRAGS)
311                                 i = 0;
312 
313                         if (i == md->sg_end)
314                                 break;
315                 }
316 
317                 copy = sg[i].length - md->sg_copybreak;
318                 to = sg_virt(&sg[i]) + md->sg_copybreak;
319                 md->sg_copybreak += copy;
320 
321                 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
322                         rc = copy_from_iter_nocache(to, copy, from);
323                 else
324                         rc = copy_from_iter(to, copy, from);
325 
326                 if (rc != copy) {
327                         rc = -EFAULT;
328                         goto out;
329                 }
330 
331                 bytes -= copy;
332                 if (!bytes)
333                         break;
334 
335                 md->sg_copybreak = 0;
336                 if (++i == MAX_SKB_FRAGS)
337                         i = 0;
338         } while (i != md->sg_end);
339 out:
340         md->sg_curr = i;
341         return rc;
342 }
343 
344 static int bpf_tcp_push(struct sock *sk, int apply_bytes,
345                         struct sk_msg_buff *md,
346                         int flags, bool uncharge)
347 {
348         bool apply = apply_bytes;
349         struct scatterlist *sg;
350         int offset, ret = 0;
351         struct page *p;
352         size_t size;
353 
354         while (1) {
355                 sg = md->sg_data + md->sg_start;
356                 size = (apply && apply_bytes < sg->length) ?
357                         apply_bytes : sg->length;
358                 offset = sg->offset;
359 
360                 tcp_rate_check_app_limited(sk);
361                 p = sg_page(sg);
362 retry:
363                 ret = do_tcp_sendpages(sk, p, offset, size, flags);
364                 if (ret != size) {
365                         if (ret > 0) {
366                                 if (apply)
367                                         apply_bytes -= ret;
368 
369                                 sg->offset += ret;
370                                 sg->length -= ret;
371                                 size -= ret;
372                                 offset += ret;
373                                 if (uncharge)
374                                         sk_mem_uncharge(sk, ret);
375                                 goto retry;
376                         }
377 
378                         return ret;
379                 }
380 
381                 if (apply)
382                         apply_bytes -= ret;
383                 sg->offset += ret;
384                 sg->length -= ret;
385                 if (uncharge)
386                         sk_mem_uncharge(sk, ret);
387 
388                 if (!sg->length) {
389                         put_page(p);
390                         md->sg_start++;
391                         if (md->sg_start == MAX_SKB_FRAGS)
392                                 md->sg_start = 0;
393                         sg_init_table(sg, 1);
394 
395                         if (md->sg_start == md->sg_end)
396                                 break;
397                 }
398 
399                 if (apply && !apply_bytes)
400                         break;
401         }
402         return 0;
403 }
404 
405 static inline void bpf_compute_data_pointers_sg(struct sk_msg_buff *md)
406 {
407         struct scatterlist *sg = md->sg_data + md->sg_start;
408 
409         if (md->sg_copy[md->sg_start]) {
410                 md->data = md->data_end = 0;
411         } else {
412                 md->data = sg_virt(sg);
413                 md->data_end = md->data + sg->length;
414         }
415 }
416 
417 static void return_mem_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
418 {
419         struct scatterlist *sg = md->sg_data;
420         int i = md->sg_start;
421 
422         do {
423                 int uncharge = (bytes < sg[i].length) ? bytes : sg[i].length;
424 
425                 sk_mem_uncharge(sk, uncharge);
426                 bytes -= uncharge;
427                 if (!bytes)
428                         break;
429                 i++;
430                 if (i == MAX_SKB_FRAGS)
431                         i = 0;
432         } while (i != md->sg_end);
433 }
434 
435 static void free_bytes_sg(struct sock *sk, int bytes,
436                           struct sk_msg_buff *md, bool charge)
437 {
438         struct scatterlist *sg = md->sg_data;
439         int i = md->sg_start, free;
440 
441         while (bytes && sg[i].length) {
442                 free = sg[i].length;
443                 if (bytes < free) {
444                         sg[i].length -= bytes;
445                         sg[i].offset += bytes;
446                         if (charge)
447                                 sk_mem_uncharge(sk, bytes);
448                         break;
449                 }
450 
451                 if (charge)
452                         sk_mem_uncharge(sk, sg[i].length);
453                 put_page(sg_page(&sg[i]));
454                 bytes -= sg[i].length;
455                 sg[i].length = 0;
456                 sg[i].page_link = 0;
457                 sg[i].offset = 0;
458                 i++;
459 
460                 if (i == MAX_SKB_FRAGS)
461                         i = 0;
462         }
463         md->sg_start = i;
464 }
465 
466 static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
467 {
468         struct scatterlist *sg = md->sg_data;
469         int i = start, free = 0;
470 
471         while (sg[i].length) {
472                 free += sg[i].length;
473                 sk_mem_uncharge(sk, sg[i].length);
474                 if (!md->skb)
475                         put_page(sg_page(&sg[i]));
476                 sg[i].length = 0;
477                 sg[i].page_link = 0;
478                 sg[i].offset = 0;
479                 i++;
480 
481                 if (i == MAX_SKB_FRAGS)
482                         i = 0;
483         }
484         if (md->skb)
485                 consume_skb(md->skb);
486 
487         return free;
488 }
489 
490 static int free_start_sg(struct sock *sk, struct sk_msg_buff *md)
491 {
492         int free = free_sg(sk, md->sg_start, md);
493 
494         md->sg_start = md->sg_end;
495         return free;
496 }
497 
498 static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md)
499 {
500         return free_sg(sk, md->sg_curr, md);
501 }
502 
503 static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md)
504 {
505         return ((_rc == SK_PASS) ?
506                (md->map ? __SK_REDIRECT : __SK_PASS) :
507                __SK_DROP);
508 }
509 
510 static unsigned int smap_do_tx_msg(struct sock *sk,
511                                    struct smap_psock *psock,
512                                    struct sk_msg_buff *md)
513 {
514         struct bpf_prog *prog;
515         unsigned int rc, _rc;
516 
517         preempt_disable();
518         rcu_read_lock();
519 
520         /* If the policy was removed mid-send then default to 'accept' */
521         prog = READ_ONCE(psock->bpf_tx_msg);
522         if (unlikely(!prog)) {
523                 _rc = SK_PASS;
524                 goto verdict;
525         }
526 
527         bpf_compute_data_pointers_sg(md);
528         rc = (*prog->bpf_func)(md, prog->insnsi);
529         psock->apply_bytes = md->apply_bytes;
530 
531         /* Moving return codes from UAPI namespace into internal namespace */
532         _rc = bpf_map_msg_verdict(rc, md);
533 
534         /* The psock has a refcount on the sock but not on the map and because
535          * we need to drop rcu read lock here its possible the map could be
536          * removed between here and when we need it to execute the sock
537          * redirect. So do the map lookup now for future use.
538          */
539         if (_rc == __SK_REDIRECT) {
540                 if (psock->sk_redir)
541                         sock_put(psock->sk_redir);
542                 psock->sk_redir = do_msg_redirect_map(md);
543                 if (!psock->sk_redir) {
544                         _rc = __SK_DROP;
545                         goto verdict;
546                 }
547                 sock_hold(psock->sk_redir);
548         }
549 verdict:
550         rcu_read_unlock();
551         preempt_enable();
552 
553         return _rc;
554 }
555 
556 static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
557                            struct smap_psock *psock,
558                            struct sk_msg_buff *md, int flags)
559 {
560         bool apply = apply_bytes;
561         size_t size, copied = 0;
562         struct sk_msg_buff *r;
563         int err = 0, i;
564 
565         r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_KERNEL);
566         if (unlikely(!r))
567                 return -ENOMEM;
568 
569         lock_sock(sk);
570         r->sg_start = md->sg_start;
571         i = md->sg_start;
572 
573         do {
574                 size = (apply && apply_bytes < md->sg_data[i].length) ?
575                         apply_bytes : md->sg_data[i].length;
576 
577                 if (!sk_wmem_schedule(sk, size)) {
578                         if (!copied)
579                                 err = -ENOMEM;
580                         break;
581                 }
582 
583                 sk_mem_charge(sk, size);
584                 r->sg_data[i] = md->sg_data[i];
585                 r->sg_data[i].length = size;
586                 md->sg_data[i].length -= size;
587                 md->sg_data[i].offset += size;
588                 copied += size;
589 
590                 if (md->sg_data[i].length) {
591                         get_page(sg_page(&r->sg_data[i]));
592                         r->sg_end = (i + 1) == MAX_SKB_FRAGS ? 0 : i + 1;
593                 } else {
594                         i++;
595                         if (i == MAX_SKB_FRAGS)
596                                 i = 0;
597                         r->sg_end = i;
598                 }
599 
600                 if (apply) {
601                         apply_bytes -= size;
602                         if (!apply_bytes)
603                                 break;
604                 }
605         } while (i != md->sg_end);
606 
607         md->sg_start = i;
608 
609         if (!err) {
610                 list_add_tail(&r->list, &psock->ingress);
611                 sk->sk_data_ready(sk);
612         } else {
613                 free_start_sg(sk, r);
614                 kfree(r);
615         }
616 
617         release_sock(sk);
618         return err;
619 }
620 
621 static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
622                                        struct sk_msg_buff *md,
623                                        int flags)
624 {
625         bool ingress = !!(md->flags & BPF_F_INGRESS);
626         struct smap_psock *psock;
627         struct scatterlist *sg;
628         int err = 0;
629 
630         sg = md->sg_data;
631 
632         rcu_read_lock();
633         psock = smap_psock_sk(sk);
634         if (unlikely(!psock))
635                 goto out_rcu;
636 
637         if (!refcount_inc_not_zero(&psock->refcnt))
638                 goto out_rcu;
639 
640         rcu_read_unlock();
641 
642         if (ingress) {
643                 err = bpf_tcp_ingress(sk, send, psock, md, flags);
644         } else {
645                 lock_sock(sk);
646                 err = bpf_tcp_push(sk, send, md, flags, false);
647                 release_sock(sk);
648         }
649         smap_release_sock(psock, sk);
650         if (unlikely(err))
651                 goto out;
652         return 0;
653 out_rcu:
654         rcu_read_unlock();
655 out:
656         free_bytes_sg(NULL, send, md, false);
657         return err;
658 }
659 
660 static inline void bpf_md_init(struct smap_psock *psock)
661 {
662         if (!psock->apply_bytes) {
663                 psock->eval =  __SK_NONE;
664                 if (psock->sk_redir) {
665                         sock_put(psock->sk_redir);
666                         psock->sk_redir = NULL;
667                 }
668         }
669 }
670 
671 static void apply_bytes_dec(struct smap_psock *psock, int i)
672 {
673         if (psock->apply_bytes) {
674                 if (psock->apply_bytes < i)
675                         psock->apply_bytes = 0;
676                 else
677                         psock->apply_bytes -= i;
678         }
679 }
680 
681 static int bpf_exec_tx_verdict(struct smap_psock *psock,
682                                struct sk_msg_buff *m,
683                                struct sock *sk,
684                                int *copied, int flags)
685 {
686         bool cork = false, enospc = (m->sg_start == m->sg_end);
687         struct sock *redir;
688         int err = 0;
689         int send;
690 
691 more_data:
692         if (psock->eval == __SK_NONE)
693                 psock->eval = smap_do_tx_msg(sk, psock, m);
694 
695         if (m->cork_bytes &&
696             m->cork_bytes > psock->sg_size && !enospc) {
697                 psock->cork_bytes = m->cork_bytes - psock->sg_size;
698                 if (!psock->cork) {
699                         psock->cork = kcalloc(1,
700                                         sizeof(struct sk_msg_buff),
701                                         GFP_ATOMIC | __GFP_NOWARN);
702 
703                         if (!psock->cork) {
704                                 err = -ENOMEM;
705                                 goto out_err;
706                         }
707                 }
708                 memcpy(psock->cork, m, sizeof(*m));
709                 goto out_err;
710         }
711 
712         send = psock->sg_size;
713         if (psock->apply_bytes && psock->apply_bytes < send)
714                 send = psock->apply_bytes;
715 
716         switch (psock->eval) {
717         case __SK_PASS:
718                 err = bpf_tcp_push(sk, send, m, flags, true);
719                 if (unlikely(err)) {
720                         *copied -= free_start_sg(sk, m);
721                         break;
722                 }
723 
724                 apply_bytes_dec(psock, send);
725                 psock->sg_size -= send;
726                 break;
727         case __SK_REDIRECT:
728                 redir = psock->sk_redir;
729                 apply_bytes_dec(psock, send);
730 
731                 if (psock->cork) {
732                         cork = true;
733                         psock->cork = NULL;
734                 }
735 
736                 return_mem_sg(sk, send, m);
737                 release_sock(sk);
738 
739                 err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags);
740                 lock_sock(sk);
741 
742                 if (unlikely(err < 0)) {
743                         free_start_sg(sk, m);
744                         psock->sg_size = 0;
745                         if (!cork)
746                                 *copied -= send;
747                 } else {
748                         psock->sg_size -= send;
749                 }
750 
751                 if (cork) {
752                         free_start_sg(sk, m);
753                         psock->sg_size = 0;
754                         kfree(m);
755                         m = NULL;
756                         err = 0;
757                 }
758                 break;
759         case __SK_DROP:
760         default:
761                 free_bytes_sg(sk, send, m, true);
762                 apply_bytes_dec(psock, send);
763                 *copied -= send;
764                 psock->sg_size -= send;
765                 err = -EACCES;
766                 break;
767         }
768 
769         if (likely(!err)) {
770                 bpf_md_init(psock);
771                 if (m &&
772                     m->sg_data[m->sg_start].page_link &&
773                     m->sg_data[m->sg_start].length)
774                         goto more_data;
775         }
776 
777 out_err:
778         return err;
779 }
780 
781 static int bpf_wait_data(struct sock *sk,
782                          struct smap_psock *psk, int flags,
783                          long timeo, int *err)
784 {
785         int rc;
786 
787         DEFINE_WAIT_FUNC(wait, woken_wake_function);
788 
789         add_wait_queue(sk_sleep(sk), &wait);
790         sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
791         rc = sk_wait_event(sk, &timeo,
792                            !list_empty(&psk->ingress) ||
793                            !skb_queue_empty(&sk->sk_receive_queue),
794                            &wait);
795         sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
796         remove_wait_queue(sk_sleep(sk), &wait);
797 
798         return rc;
799 }
800 
801 static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
802                            int nonblock, int flags, int *addr_len)
803 {
804         struct iov_iter *iter = &msg->msg_iter;
805         struct smap_psock *psock;
806         int copied = 0;
807 
808         if (unlikely(flags & MSG_ERRQUEUE))
809                 return inet_recv_error(sk, msg, len, addr_len);
810 
811         rcu_read_lock();
812         psock = smap_psock_sk(sk);
813         if (unlikely(!psock))
814                 goto out;
815 
816         if (unlikely(!refcount_inc_not_zero(&psock->refcnt)))
817                 goto out;
818         rcu_read_unlock();
819 
820         if (!skb_queue_empty(&sk->sk_receive_queue))
821                 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
822 
823         lock_sock(sk);
824 bytes_ready:
825         while (copied != len) {
826                 struct scatterlist *sg;
827                 struct sk_msg_buff *md;
828                 int i;
829 
830                 md = list_first_entry_or_null(&psock->ingress,
831                                               struct sk_msg_buff, list);
832                 if (unlikely(!md))
833                         break;
834                 i = md->sg_start;
835                 do {
836                         struct page *page;
837                         int n, copy;
838 
839                         sg = &md->sg_data[i];
840                         copy = sg->length;
841                         page = sg_page(sg);
842 
843                         if (copied + copy > len)
844                                 copy = len - copied;
845 
846                         n = copy_page_to_iter(page, sg->offset, copy, iter);
847                         if (n != copy) {
848                                 md->sg_start = i;
849                                 release_sock(sk);
850                                 smap_release_sock(psock, sk);
851                                 return -EFAULT;
852                         }
853 
854                         copied += copy;
855                         sg->offset += copy;
856                         sg->length -= copy;
857                         sk_mem_uncharge(sk, copy);
858 
859                         if (!sg->length) {
860                                 i++;
861                                 if (i == MAX_SKB_FRAGS)
862                                         i = 0;
863                                 if (!md->skb)
864                                         put_page(page);
865                         }
866                         if (copied == len)
867                                 break;
868                 } while (i != md->sg_end);
869                 md->sg_start = i;
870 
871                 if (!sg->length && md->sg_start == md->sg_end) {
872                         list_del(&md->list);
873                         if (md->skb)
874                                 consume_skb(md->skb);
875                         kfree(md);
876                 }
877         }
878 
879         if (!copied) {
880                 long timeo;
881                 int data;
882                 int err = 0;
883 
884                 timeo = sock_rcvtimeo(sk, nonblock);
885                 data = bpf_wait_data(sk, psock, flags, timeo, &err);
886 
887                 if (data) {
888                         if (!skb_queue_empty(&sk->sk_receive_queue)) {
889                                 release_sock(sk);
890                                 smap_release_sock(psock, sk);
891                                 copied = tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
892                                 return copied;
893                         }
894                         goto bytes_ready;
895                 }
896 
897                 if (err)
898                         copied = err;
899         }
900 
901         release_sock(sk);
902         smap_release_sock(psock, sk);
903         return copied;
904 out:
905         rcu_read_unlock();
906         return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
907 }
908 
909 
910 static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
911 {
912         int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
913         struct sk_msg_buff md = {0};
914         unsigned int sg_copy = 0;
915         struct smap_psock *psock;
916         int copied = 0, err = 0;
917         struct scatterlist *sg;
918         long timeo;
919 
920         /* Its possible a sock event or user removed the psock _but_ the ops
921          * have not been reprogrammed yet so we get here. In this case fallback
922          * to tcp_sendmsg. Note this only works because we _only_ ever allow
923          * a single ULP there is no hierarchy here.
924          */
925         rcu_read_lock();
926         psock = smap_psock_sk(sk);
927         if (unlikely(!psock)) {
928                 rcu_read_unlock();
929                 return tcp_sendmsg(sk, msg, size);
930         }
931 
932         /* Increment the psock refcnt to ensure its not released while sending a
933          * message. Required because sk lookup and bpf programs are used in
934          * separate rcu critical sections. Its OK if we lose the map entry
935          * but we can't lose the sock reference.
936          */
937         if (!refcount_inc_not_zero(&psock->refcnt)) {
938                 rcu_read_unlock();
939                 return tcp_sendmsg(sk, msg, size);
940         }
941 
942         sg = md.sg_data;
943         sg_init_marker(sg, MAX_SKB_FRAGS);
944         rcu_read_unlock();
945 
946         lock_sock(sk);
947         timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
948 
949         while (msg_data_left(msg)) {
950                 struct sk_msg_buff *m = NULL;
951                 bool enospc = false;
952                 int copy;
953 
954                 if (sk->sk_err) {
955                         err = -sk->sk_err;
956                         goto out_err;
957                 }
958 
959                 copy = msg_data_left(msg);
960                 if (!sk_stream_memory_free(sk))
961                         goto wait_for_sndbuf;
962 
963                 m = psock->cork_bytes ? psock->cork : &md;
964                 m->sg_curr = m->sg_copybreak ? m->sg_curr : m->sg_end;
965                 err = sk_alloc_sg(sk, copy, m->sg_data,
966                                   m->sg_start, &m->sg_end, &sg_copy,
967                                   m->sg_end - 1);
968                 if (err) {
969                         if (err != -ENOSPC)
970                                 goto wait_for_memory;
971                         enospc = true;
972                         copy = sg_copy;
973                 }
974 
975                 err = memcopy_from_iter(sk, m, &msg->msg_iter, copy);
976                 if (err < 0) {
977                         free_curr_sg(sk, m);
978                         goto out_err;
979                 }
980 
981                 psock->sg_size += copy;
982                 copied += copy;
983                 sg_copy = 0;
984 
985                 /* When bytes are being corked skip running BPF program and
986                  * applying verdict unless there is no more buffer space. In
987                  * the ENOSPC case simply run BPF prorgram with currently
988                  * accumulated data. We don't have much choice at this point
989                  * we could try extending the page frags or chaining complex
990                  * frags but even in these cases _eventually_ we will hit an
991                  * OOM scenario. More complex recovery schemes may be
992                  * implemented in the future, but BPF programs must handle
993                  * the case where apply_cork requests are not honored. The
994                  * canonical method to verify this is to check data length.
995                  */
996                 if (psock->cork_bytes) {
997                         if (copy > psock->cork_bytes)
998                                 psock->cork_bytes = 0;
999                         else
1000                                 psock->cork_bytes -= copy;
1001 
1002                         if (psock->cork_bytes && !enospc)
1003                                 goto out_cork;
1004 
1005                         /* All cork bytes accounted for re-run filter */
1006                         psock->eval = __SK_NONE;
1007                         psock->cork_bytes = 0;
1008                 }
1009 
1010                 err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags);
1011                 if (unlikely(err < 0))
1012                         goto out_err;
1013                 continue;
1014 wait_for_sndbuf:
1015                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1016 wait_for_memory:
1017                 err = sk_stream_wait_memory(sk, &timeo);
1018                 if (err) {
1019                         if (m && m != psock->cork)
1020                                 free_start_sg(sk, m);
1021                         goto out_err;
1022                 }
1023         }
1024 out_err:
1025         if (err < 0)
1026                 err = sk_stream_error(sk, msg->msg_flags, err);
1027 out_cork:
1028         release_sock(sk);
1029         smap_release_sock(psock, sk);
1030         return copied ? copied : err;
1031 }
1032 
1033 static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
1034                             int offset, size_t size, int flags)
1035 {
1036         struct sk_msg_buff md = {0}, *m = NULL;
1037         int err = 0, copied = 0;
1038         struct smap_psock *psock;
1039         struct scatterlist *sg;
1040         bool enospc = false;
1041 
1042         rcu_read_lock();
1043         psock = smap_psock_sk(sk);
1044         if (unlikely(!psock))
1045                 goto accept;
1046 
1047         if (!refcount_inc_not_zero(&psock->refcnt))
1048                 goto accept;
1049         rcu_read_unlock();
1050 
1051         lock_sock(sk);
1052 
1053         if (psock->cork_bytes) {
1054                 m = psock->cork;
1055                 sg = &m->sg_data[m->sg_end];
1056         } else {
1057                 m = &md;
1058                 sg = m->sg_data;
1059                 sg_init_marker(sg, MAX_SKB_FRAGS);
1060         }
1061 
1062         /* Catch case where ring is full and sendpage is stalled. */
1063         if (unlikely(m->sg_end == m->sg_start &&
1064             m->sg_data[m->sg_end].length))
1065                 goto out_err;
1066 
1067         psock->sg_size += size;
1068         sg_set_page(sg, page, size, offset);
1069         get_page(page);
1070         m->sg_copy[m->sg_end] = true;
1071         sk_mem_charge(sk, size);
1072         m->sg_end++;
1073         copied = size;
1074 
1075         if (m->sg_end == MAX_SKB_FRAGS)
1076                 m->sg_end = 0;
1077 
1078         if (m->sg_end == m->sg_start)
1079                 enospc = true;
1080 
1081         if (psock->cork_bytes) {
1082                 if (size > psock->cork_bytes)
1083                         psock->cork_bytes = 0;
1084                 else
1085                         psock->cork_bytes -= size;
1086 
1087                 if (psock->cork_bytes && !enospc)
1088                         goto out_err;
1089 
1090                 /* All cork bytes accounted for re-run filter */
1091                 psock->eval = __SK_NONE;
1092                 psock->cork_bytes = 0;
1093         }
1094 
1095         err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags);
1096 out_err:
1097         release_sock(sk);
1098         smap_release_sock(psock, sk);
1099         return copied ? copied : err;
1100 accept:
1101         rcu_read_unlock();
1102         return tcp_sendpage(sk, page, offset, size, flags);
1103 }
1104 
1105 static void bpf_tcp_msg_add(struct smap_psock *psock,
1106                             struct sock *sk,
1107                             struct bpf_prog *tx_msg)
1108 {
1109         struct bpf_prog *orig_tx_msg;
1110 
1111         orig_tx_msg = xchg(&psock->bpf_tx_msg, tx_msg);
1112         if (orig_tx_msg)
1113                 bpf_prog_put(orig_tx_msg);
1114 }
1115 
1116 static int bpf_tcp_ulp_register(void)
1117 {
1118         build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot);
1119         /* Once BPF TX ULP is registered it is never unregistered. It
1120          * will be in the ULP list for the lifetime of the system. Doing
1121          * duplicate registers is not a problem.
1122          */
1123         return tcp_register_ulp(&bpf_tcp_ulp_ops);
1124 }
1125 
1126 static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
1127 {
1128         struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
1129         int rc;
1130 
1131         if (unlikely(!prog))
1132                 return __SK_DROP;
1133 
1134         skb_orphan(skb);
1135         /* We need to ensure that BPF metadata for maps is also cleared
1136          * when we orphan the skb so that we don't have the possibility
1137          * to reference a stale map.
1138          */
1139         TCP_SKB_CB(skb)->bpf.map = NULL;
1140         skb->sk = psock->sock;
1141         bpf_compute_data_pointers(skb);
1142         preempt_disable();
1143         rc = (*prog->bpf_func)(skb, prog->insnsi);
1144         preempt_enable();
1145         skb->sk = NULL;
1146 
1147         /* Moving return codes from UAPI namespace into internal namespace */
1148         return rc == SK_PASS ?
1149                 (TCP_SKB_CB(skb)->bpf.map ? __SK_REDIRECT : __SK_PASS) :
1150                 __SK_DROP;
1151 }
1152 
1153 static int smap_do_ingress(struct smap_psock *psock, struct sk_buff *skb)
1154 {
1155         struct sock *sk = psock->sock;
1156         int copied = 0, num_sg;
1157         struct sk_msg_buff *r;
1158 
1159         r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_ATOMIC);
1160         if (unlikely(!r))
1161                 return -EAGAIN;
1162 
1163         if (!sk_rmem_schedule(sk, skb, skb->len)) {
1164                 kfree(r);
1165                 return -EAGAIN;
1166         }
1167 
1168         sg_init_table(r->sg_data, MAX_SKB_FRAGS);
1169         num_sg = skb_to_sgvec(skb, r->sg_data, 0, skb->len);
1170         if (unlikely(num_sg < 0)) {
1171                 kfree(r);
1172                 return num_sg;
1173         }
1174         sk_mem_charge(sk, skb->len);
1175         copied = skb->len;
1176         r->sg_start = 0;
1177         r->sg_end = num_sg == MAX_SKB_FRAGS ? 0 : num_sg;
1178         r->skb = skb;
1179         list_add_tail(&r->list, &psock->ingress);
1180         sk->sk_data_ready(sk);
1181         return copied;
1182 }
1183 
1184 static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
1185 {
1186         struct smap_psock *peer;
1187         struct sock *sk;
1188         __u32 in;
1189         int rc;
1190 
1191         rc = smap_verdict_func(psock, skb);
1192         switch (rc) {
1193         case __SK_REDIRECT:
1194                 sk = do_sk_redirect_map(skb);
1195                 if (!sk) {
1196                         kfree_skb(skb);
1197                         break;
1198                 }
1199 
1200                 peer = smap_psock_sk(sk);
1201                 in = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
1202 
1203                 if (unlikely(!peer || sock_flag(sk, SOCK_DEAD) ||
1204                              !test_bit(SMAP_TX_RUNNING, &peer->state))) {
1205                         kfree_skb(skb);
1206                         break;
1207                 }
1208 
1209                 if (!in && sock_writeable(sk)) {
1210                         skb_set_owner_w(skb, sk);
1211                         skb_queue_tail(&peer->rxqueue, skb);
1212                         schedule_work(&peer->tx_work);
1213                         break;
1214                 } else if (in &&
1215                            atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
1216                         skb_queue_tail(&peer->rxqueue, skb);
1217                         schedule_work(&peer->tx_work);
1218                         break;
1219                 }
1220         /* Fall through and free skb otherwise */
1221         case __SK_DROP:
1222         default:
1223                 kfree_skb(skb);
1224         }
1225 }
1226 
1227 static void smap_report_sk_error(struct smap_psock *psock, int err)
1228 {
1229         struct sock *sk = psock->sock;
1230 
1231         sk->sk_err = err;
1232         sk->sk_error_report(sk);
1233 }
1234 
1235 static void smap_read_sock_strparser(struct strparser *strp,
1236                                      struct sk_buff *skb)
1237 {
1238         struct smap_psock *psock;
1239 
1240         rcu_read_lock();
1241         psock = container_of(strp, struct smap_psock, strp);
1242         smap_do_verdict(psock, skb);
1243         rcu_read_unlock();
1244 }
1245 
1246 /* Called with lock held on socket */
1247 static void smap_data_ready(struct sock *sk)
1248 {
1249         struct smap_psock *psock;
1250 
1251         rcu_read_lock();
1252         psock = smap_psock_sk(sk);
1253         if (likely(psock)) {
1254                 write_lock_bh(&sk->sk_callback_lock);
1255                 strp_data_ready(&psock->strp);
1256                 write_unlock_bh(&sk->sk_callback_lock);
1257         }
1258         rcu_read_unlock();
1259 }
1260 
1261 static void smap_tx_work(struct work_struct *w)
1262 {
1263         struct smap_psock *psock;
1264         struct sk_buff *skb;
1265         int rem, off, n;
1266 
1267         psock = container_of(w, struct smap_psock, tx_work);
1268 
1269         /* lock sock to avoid losing sk_socket at some point during loop */
1270         lock_sock(psock->sock);
1271         if (psock->save_skb) {
1272                 skb = psock->save_skb;
1273                 rem = psock->save_rem;
1274                 off = psock->save_off;
1275                 psock->save_skb = NULL;
1276                 goto start;
1277         }
1278 
1279         while ((skb = skb_dequeue(&psock->rxqueue))) {
1280                 __u32 flags;
1281 
1282                 rem = skb->len;
1283                 off = 0;
1284 start:
1285                 flags = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
1286                 do {
1287                         if (likely(psock->sock->sk_socket)) {
1288                                 if (flags)
1289                                         n = smap_do_ingress(psock, skb);
1290                                 else
1291                                         n = skb_send_sock_locked(psock->sock,
1292                                                                  skb, off, rem);
1293                         } else {
1294                                 n = -EINVAL;
1295                         }
1296 
1297                         if (n <= 0) {
1298                                 if (n == -EAGAIN) {
1299                                         /* Retry when space is available */
1300                                         psock->save_skb = skb;
1301                                         psock->save_rem = rem;
1302                                         psock->save_off = off;
1303                                         goto out;
1304                                 }
1305                                 /* Hard errors break pipe and stop xmit */
1306                                 smap_report_sk_error(psock, n ? -n : EPIPE);
1307                                 clear_bit(SMAP_TX_RUNNING, &psock->state);
1308                                 kfree_skb(skb);
1309                                 goto out;
1310                         }
1311                         rem -= n;
1312                         off += n;
1313                 } while (rem);
1314 
1315                 if (!flags)
1316                         kfree_skb(skb);
1317         }
1318 out:
1319         release_sock(psock->sock);
1320 }
1321 
1322 static void smap_write_space(struct sock *sk)
1323 {
1324         struct smap_psock *psock;
1325 
1326         rcu_read_lock();
1327         psock = smap_psock_sk(sk);
1328         if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
1329                 schedule_work(&psock->tx_work);
1330         rcu_read_unlock();
1331 }
1332 
1333 static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
1334 {
1335         if (!psock->strp_enabled)
1336                 return;
1337         sk->sk_data_ready = psock->save_data_ready;
1338         sk->sk_write_space = psock->save_write_space;
1339         psock->save_data_ready = NULL;
1340         psock->save_write_space = NULL;
1341         strp_stop(&psock->strp);
1342         psock->strp_enabled = false;
1343 }
1344 
1345 static void smap_destroy_psock(struct rcu_head *rcu)
1346 {
1347         struct smap_psock *psock = container_of(rcu,
1348                                                   struct smap_psock, rcu);
1349 
1350         /* Now that a grace period has passed there is no longer
1351          * any reference to this sock in the sockmap so we can
1352          * destroy the psock, strparser, and bpf programs. But,
1353          * because we use workqueue sync operations we can not
1354          * do it in rcu context
1355          */
1356         schedule_work(&psock->gc_work);
1357 }
1358 
1359 static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
1360 {
1361         if (refcount_dec_and_test(&psock->refcnt)) {
1362                 tcp_cleanup_ulp(sock);
1363                 smap_stop_sock(psock, sock);
1364                 clear_bit(SMAP_TX_RUNNING, &psock->state);
1365                 rcu_assign_sk_user_data(sock, NULL);
1366                 call_rcu_sched(&psock->rcu, smap_destroy_psock);
1367         }
1368 }
1369 
1370 static int smap_parse_func_strparser(struct strparser *strp,
1371                                        struct sk_buff *skb)
1372 {
1373         struct smap_psock *psock;
1374         struct bpf_prog *prog;
1375         int rc;
1376 
1377         rcu_read_lock();
1378         psock = container_of(strp, struct smap_psock, strp);
1379         prog = READ_ONCE(psock->bpf_parse);
1380 
1381         if (unlikely(!prog)) {
1382                 rcu_read_unlock();
1383                 return skb->len;
1384         }
1385 
1386         /* Attach socket for bpf program to use if needed we can do this
1387          * because strparser clones the skb before handing it to a upper
1388          * layer, meaning skb_orphan has been called. We NULL sk on the
1389          * way out to ensure we don't trigger a BUG_ON in skb/sk operations
1390          * later and because we are not charging the memory of this skb to
1391          * any socket yet.
1392          */
1393         skb->sk = psock->sock;
1394         bpf_compute_data_pointers(skb);
1395         rc = (*prog->bpf_func)(skb, prog->insnsi);
1396         skb->sk = NULL;
1397         rcu_read_unlock();
1398         return rc;
1399 }
1400 
1401 static int smap_read_sock_done(struct strparser *strp, int err)
1402 {
1403         return err;
1404 }
1405 
1406 static int smap_init_sock(struct smap_psock *psock,
1407                           struct sock *sk)
1408 {
1409         static const struct strp_callbacks cb = {
1410                 .rcv_msg = smap_read_sock_strparser,
1411                 .parse_msg = smap_parse_func_strparser,
1412                 .read_sock_done = smap_read_sock_done,
1413         };
1414 
1415         return strp_init(&psock->strp, sk, &cb);
1416 }
1417 
1418 static void smap_init_progs(struct smap_psock *psock,
1419                             struct bpf_stab *stab,
1420                             struct bpf_prog *verdict,
1421                             struct bpf_prog *parse)
1422 {
1423         struct bpf_prog *orig_parse, *orig_verdict;
1424 
1425         orig_parse = xchg(&psock->bpf_parse, parse);
1426         orig_verdict = xchg(&psock->bpf_verdict, verdict);
1427 
1428         if (orig_verdict)
1429                 bpf_prog_put(orig_verdict);
1430         if (orig_parse)
1431                 bpf_prog_put(orig_parse);
1432 }
1433 
1434 static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
1435 {
1436         if (sk->sk_data_ready == smap_data_ready)
1437                 return;
1438         psock->save_data_ready = sk->sk_data_ready;
1439         psock->save_write_space = sk->sk_write_space;
1440         sk->sk_data_ready = smap_data_ready;
1441         sk->sk_write_space = smap_write_space;
1442         psock->strp_enabled = true;
1443 }
1444 
1445 static void sock_map_remove_complete(struct bpf_stab *stab)
1446 {
1447         bpf_map_area_free(stab->sock_map);
1448         kfree(stab);
1449 }
1450 
1451 static void smap_gc_work(struct work_struct *w)
1452 {
1453         struct smap_psock_map_entry *e, *tmp;
1454         struct sk_msg_buff *md, *mtmp;
1455         struct smap_psock *psock;
1456 
1457         psock = container_of(w, struct smap_psock, gc_work);
1458 
1459         /* no callback lock needed because we already detached sockmap ops */
1460         if (psock->strp_enabled)
1461                 strp_done(&psock->strp);
1462 
1463         cancel_work_sync(&psock->tx_work);
1464         __skb_queue_purge(&psock->rxqueue);
1465 
1466         /* At this point all strparser and xmit work must be complete */
1467         if (psock->bpf_parse)
1468                 bpf_prog_put(psock->bpf_parse);
1469         if (psock->bpf_verdict)
1470                 bpf_prog_put(psock->bpf_verdict);
1471         if (psock->bpf_tx_msg)
1472                 bpf_prog_put(psock->bpf_tx_msg);
1473 
1474         if (psock->cork) {
1475                 free_start_sg(psock->sock, psock->cork);
1476                 kfree(psock->cork);
1477         }
1478 
1479         list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
1480                 list_del(&md->list);
1481                 free_start_sg(psock->sock, md);
1482                 kfree(md);
1483         }
1484 
1485         list_for_each_entry_safe(e, tmp, &psock->maps, list) {
1486                 list_del(&e->list);
1487                 kfree(e);
1488         }
1489 
1490         if (psock->sk_redir)
1491                 sock_put(psock->sk_redir);
1492 
1493         sock_put(psock->sock);
1494         kfree(psock);
1495 }
1496 
1497 static struct smap_psock *smap_init_psock(struct sock *sock,
1498                                           struct bpf_stab *stab)
1499 {
1500         struct smap_psock *psock;
1501 
1502         psock = kzalloc_node(sizeof(struct smap_psock),
1503                              GFP_ATOMIC | __GFP_NOWARN,
1504                              stab->map.numa_node);
1505         if (!psock)
1506                 return ERR_PTR(-ENOMEM);
1507 
1508         psock->eval =  __SK_NONE;
1509         psock->sock = sock;
1510         skb_queue_head_init(&psock->rxqueue);
1511         INIT_WORK(&psock->tx_work, smap_tx_work);
1512         INIT_WORK(&psock->gc_work, smap_gc_work);
1513         INIT_LIST_HEAD(&psock->maps);
1514         INIT_LIST_HEAD(&psock->ingress);
1515         refcount_set(&psock->refcnt, 1);
1516 
1517         rcu_assign_sk_user_data(sock, psock);
1518         sock_hold(sock);
1519         return psock;
1520 }
1521 
1522 static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
1523 {
1524         struct bpf_stab *stab;
1525         u64 cost;
1526         int err;
1527 
1528         if (!capable(CAP_NET_ADMIN))
1529                 return ERR_PTR(-EPERM);
1530 
1531         /* check sanity of attributes */
1532         if (attr->max_entries == 0 || attr->key_size != 4 ||
1533             attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1534                 return ERR_PTR(-EINVAL);
1535 
1536         err = bpf_tcp_ulp_register();
1537         if (err && err != -EEXIST)
1538                 return ERR_PTR(err);
1539 
1540         stab = kzalloc(sizeof(*stab), GFP_USER);
1541         if (!stab)
1542                 return ERR_PTR(-ENOMEM);
1543 
1544         bpf_map_init_from_attr(&stab->map, attr);
1545 
1546         /* make sure page count doesn't overflow */
1547         cost = (u64) stab->map.max_entries * sizeof(struct sock *);
1548         err = -EINVAL;
1549         if (cost >= U32_MAX - PAGE_SIZE)
1550                 goto free_stab;
1551 
1552         stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
1553 
1554         /* if map size is larger than memlock limit, reject it early */
1555         err = bpf_map_precharge_memlock(stab->map.pages);
1556         if (err)
1557                 goto free_stab;
1558 
1559         err = -ENOMEM;
1560         stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
1561                                             sizeof(struct sock *),
1562                                             stab->map.numa_node);
1563         if (!stab->sock_map)
1564                 goto free_stab;
1565 
1566         return &stab->map;
1567 free_stab:
1568         kfree(stab);
1569         return ERR_PTR(err);
1570 }
1571 
1572 static void smap_list_remove(struct smap_psock *psock, struct sock **entry)
1573 {
1574         struct smap_psock_map_entry *e, *tmp;
1575 
1576         list_for_each_entry_safe(e, tmp, &psock->maps, list) {
1577                 if (e->entry == entry) {
1578                         list_del(&e->list);
1579                         break;
1580                 }
1581         }
1582 }
1583 
1584 static void sock_map_free(struct bpf_map *map)
1585 {
1586         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1587         int i;
1588 
1589         synchronize_rcu();
1590 
1591         /* At this point no update, lookup or delete operations can happen.
1592          * However, be aware we can still get a socket state event updates,
1593          * and data ready callabacks that reference the psock from sk_user_data
1594          * Also psock worker threads are still in-flight. So smap_release_sock
1595          * will only free the psock after cancel_sync on the worker threads
1596          * and a grace period expire to ensure psock is really safe to remove.
1597          */
1598         rcu_read_lock();
1599         for (i = 0; i < stab->map.max_entries; i++) {
1600                 struct smap_psock *psock;
1601                 struct sock *sock;
1602 
1603                 sock = xchg(&stab->sock_map[i], NULL);
1604                 if (!sock)
1605                         continue;
1606 
1607                 write_lock_bh(&sock->sk_callback_lock);
1608                 psock = smap_psock_sk(sock);
1609                 /* This check handles a racing sock event that can get the
1610                  * sk_callback_lock before this case but after xchg happens
1611                  * causing the refcnt to hit zero and sock user data (psock)
1612                  * to be null and queued for garbage collection.
1613                  */
1614                 if (likely(psock)) {
1615                         smap_list_remove(psock, &stab->sock_map[i]);
1616                         smap_release_sock(psock, sock);
1617                 }
1618                 write_unlock_bh(&sock->sk_callback_lock);
1619         }
1620         rcu_read_unlock();
1621 
1622         sock_map_remove_complete(stab);
1623 }
1624 
1625 static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
1626 {
1627         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1628         u32 i = key ? *(u32 *)key : U32_MAX;
1629         u32 *next = (u32 *)next_key;
1630 
1631         if (i >= stab->map.max_entries) {
1632                 *next = 0;
1633                 return 0;
1634         }
1635 
1636         if (i == stab->map.max_entries - 1)
1637                 return -ENOENT;
1638 
1639         *next = i + 1;
1640         return 0;
1641 }
1642 
1643 struct sock  *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
1644 {
1645         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1646 
1647         if (key >= map->max_entries)
1648                 return NULL;
1649 
1650         return READ_ONCE(stab->sock_map[key]);
1651 }
1652 
1653 static int sock_map_delete_elem(struct bpf_map *map, void *key)
1654 {
1655         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1656         struct smap_psock *psock;
1657         int k = *(u32 *)key;
1658         struct sock *sock;
1659 
1660         if (k >= map->max_entries)
1661                 return -EINVAL;
1662 
1663         sock = xchg(&stab->sock_map[k], NULL);
1664         if (!sock)
1665                 return -EINVAL;
1666 
1667         write_lock_bh(&sock->sk_callback_lock);
1668         psock = smap_psock_sk(sock);
1669         if (!psock)
1670                 goto out;
1671 
1672         if (psock->bpf_parse)
1673                 smap_stop_sock(psock, sock);
1674         smap_list_remove(psock, &stab->sock_map[k]);
1675         smap_release_sock(psock, sock);
1676 out:
1677         write_unlock_bh(&sock->sk_callback_lock);
1678         return 0;
1679 }
1680 
1681 /* Locking notes: Concurrent updates, deletes, and lookups are allowed and are
1682  * done inside rcu critical sections. This ensures on updates that the psock
1683  * will not be released via smap_release_sock() until concurrent updates/deletes
1684  * complete. All operations operate on sock_map using cmpxchg and xchg
1685  * operations to ensure we do not get stale references. Any reads into the
1686  * map must be done with READ_ONCE() because of this.
1687  *
1688  * A psock is destroyed via call_rcu and after any worker threads are cancelled
1689  * and syncd so we are certain all references from the update/lookup/delete
1690  * operations as well as references in the data path are no longer in use.
1691  *
1692  * Psocks may exist in multiple maps, but only a single set of parse/verdict
1693  * programs may be inherited from the maps it belongs to. A reference count
1694  * is kept with the total number of references to the psock from all maps. The
1695  * psock will not be released until this reaches zero. The psock and sock
1696  * user data data use the sk_callback_lock to protect critical data structures
1697  * from concurrent access. This allows us to avoid two updates from modifying
1698  * the user data in sock and the lock is required anyways for modifying
1699  * callbacks, we simply increase its scope slightly.
1700  *
1701  * Rules to follow,
1702  *  - psock must always be read inside RCU critical section
1703  *  - sk_user_data must only be modified inside sk_callback_lock and read
1704  *    inside RCU critical section.
1705  *  - psock->maps list must only be read & modified inside sk_callback_lock
1706  *  - sock_map must use READ_ONCE and (cmp)xchg operations
1707  *  - BPF verdict/parse programs must use READ_ONCE and xchg operations
1708  */
1709 static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
1710                                     struct bpf_map *map,
1711                                     void *key, u64 flags)
1712 {
1713         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1714         struct smap_psock_map_entry *e = NULL;
1715         struct bpf_prog *verdict, *parse, *tx_msg;
1716         struct sock *osock, *sock;
1717         struct smap_psock *psock;
1718         u32 i = *(u32 *)key;
1719         bool new = false;
1720         int err;
1721 
1722         if (unlikely(flags > BPF_EXIST))
1723                 return -EINVAL;
1724 
1725         if (unlikely(i >= stab->map.max_entries))
1726                 return -E2BIG;
1727 
1728         sock = READ_ONCE(stab->sock_map[i]);
1729         if (flags == BPF_EXIST && !sock)
1730                 return -ENOENT;
1731         else if (flags == BPF_NOEXIST && sock)
1732                 return -EEXIST;
1733 
1734         sock = skops->sk;
1735 
1736         /* 1. If sock map has BPF programs those will be inherited by the
1737          * sock being added. If the sock is already attached to BPF programs
1738          * this results in an error.
1739          */
1740         verdict = READ_ONCE(stab->bpf_verdict);
1741         parse = READ_ONCE(stab->bpf_parse);
1742         tx_msg = READ_ONCE(stab->bpf_tx_msg);
1743 
1744         if (parse && verdict) {
1745                 /* bpf prog refcnt may be zero if a concurrent attach operation
1746                  * removes the program after the above READ_ONCE() but before
1747                  * we increment the refcnt. If this is the case abort with an
1748                  * error.
1749                  */
1750                 verdict = bpf_prog_inc_not_zero(verdict);
1751                 if (IS_ERR(verdict))
1752                         return PTR_ERR(verdict);
1753 
1754                 parse = bpf_prog_inc_not_zero(parse);
1755                 if (IS_ERR(parse)) {
1756                         bpf_prog_put(verdict);
1757                         return PTR_ERR(parse);
1758                 }
1759         }
1760 
1761         if (tx_msg) {
1762                 tx_msg = bpf_prog_inc_not_zero(tx_msg);
1763                 if (IS_ERR(tx_msg)) {
1764                         if (parse && verdict) {
1765                                 bpf_prog_put(parse);
1766                                 bpf_prog_put(verdict);
1767                         }
1768                         return PTR_ERR(tx_msg);
1769                 }
1770         }
1771 
1772         write_lock_bh(&sock->sk_callback_lock);
1773         psock = smap_psock_sk(sock);
1774 
1775         /* 2. Do not allow inheriting programs if psock exists and has
1776          * already inherited programs. This would create confusion on
1777          * which parser/verdict program is running. If no psock exists
1778          * create one. Inside sk_callback_lock to ensure concurrent create
1779          * doesn't update user data.
1780          */
1781         if (psock) {
1782                 if (READ_ONCE(psock->bpf_parse) && parse) {
1783                         err = -EBUSY;
1784                         goto out_progs;
1785                 }
1786                 if (READ_ONCE(psock->bpf_tx_msg) && tx_msg) {
1787                         err = -EBUSY;
1788                         goto out_progs;
1789                 }
1790                 if (!refcount_inc_not_zero(&psock->refcnt)) {
1791                         err = -EAGAIN;
1792                         goto out_progs;
1793                 }
1794         } else {
1795                 psock = smap_init_psock(sock, stab);
1796                 if (IS_ERR(psock)) {
1797                         err = PTR_ERR(psock);
1798                         goto out_progs;
1799                 }
1800 
1801                 set_bit(SMAP_TX_RUNNING, &psock->state);
1802                 new = true;
1803         }
1804 
1805         e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
1806         if (!e) {
1807                 err = -ENOMEM;
1808                 goto out_progs;
1809         }
1810         e->entry = &stab->sock_map[i];
1811 
1812         /* 3. At this point we have a reference to a valid psock that is
1813          * running. Attach any BPF programs needed.
1814          */
1815         if (tx_msg)
1816                 bpf_tcp_msg_add(psock, sock, tx_msg);
1817         if (new) {
1818                 err = tcp_set_ulp_id(sock, TCP_ULP_BPF);
1819                 if (err)
1820                         goto out_free;
1821         }
1822 
1823         if (parse && verdict && !psock->strp_enabled) {
1824                 err = smap_init_sock(psock, sock);
1825                 if (err)
1826                         goto out_free;
1827                 smap_init_progs(psock, stab, verdict, parse);
1828                 smap_start_sock(psock, sock);
1829         }
1830 
1831         /* 4. Place psock in sockmap for use and stop any programs on
1832          * the old sock assuming its not the same sock we are replacing
1833          * it with. Because we can only have a single set of programs if
1834          * old_sock has a strp we can stop it.
1835          */
1836         list_add_tail(&e->list, &psock->maps);
1837         write_unlock_bh(&sock->sk_callback_lock);
1838 
1839         osock = xchg(&stab->sock_map[i], sock);
1840         if (osock) {
1841                 struct smap_psock *opsock = smap_psock_sk(osock);
1842 
1843                 write_lock_bh(&osock->sk_callback_lock);
1844                 smap_list_remove(opsock, &stab->sock_map[i]);
1845                 smap_release_sock(opsock, osock);
1846                 write_unlock_bh(&osock->sk_callback_lock);
1847         }
1848         return 0;
1849 out_free:
1850         smap_release_sock(psock, sock);
1851 out_progs:
1852         if (parse && verdict) {
1853                 bpf_prog_put(parse);
1854                 bpf_prog_put(verdict);
1855         }
1856         if (tx_msg)
1857                 bpf_prog_put(tx_msg);
1858         write_unlock_bh(&sock->sk_callback_lock);
1859         kfree(e);
1860         return err;
1861 }
1862 
1863 int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
1864 {
1865         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1866         struct bpf_prog *orig;
1867 
1868         if (unlikely(map->map_type != BPF_MAP_TYPE_SOCKMAP))
1869                 return -EINVAL;
1870 
1871         switch (type) {
1872         case BPF_SK_MSG_VERDICT:
1873                 orig = xchg(&stab->bpf_tx_msg, prog);
1874                 break;
1875         case BPF_SK_SKB_STREAM_PARSER:
1876                 orig = xchg(&stab->bpf_parse, prog);
1877                 break;
1878         case BPF_SK_SKB_STREAM_VERDICT:
1879                 orig = xchg(&stab->bpf_verdict, prog);
1880                 break;
1881         default:
1882                 return -EOPNOTSUPP;
1883         }
1884 
1885         if (orig)
1886                 bpf_prog_put(orig);
1887 
1888         return 0;
1889 }
1890 
1891 static void *sock_map_lookup(struct bpf_map *map, void *key)
1892 {
1893         return NULL;
1894 }
1895 
1896 static int sock_map_update_elem(struct bpf_map *map,
1897                                 void *key, void *value, u64 flags)
1898 {
1899         struct bpf_sock_ops_kern skops;
1900         u32 fd = *(u32 *)value;
1901         struct socket *socket;
1902         int err;
1903 
1904         socket = sockfd_lookup(fd, &err);
1905         if (!socket)
1906                 return err;
1907 
1908         skops.sk = socket->sk;
1909         if (!skops.sk) {
1910                 fput(socket->file);
1911                 return -EINVAL;
1912         }
1913 
1914         if (skops.sk->sk_type != SOCK_STREAM ||
1915             skops.sk->sk_protocol != IPPROTO_TCP) {
1916                 fput(socket->file);
1917                 return -EOPNOTSUPP;
1918         }
1919 
1920         err = sock_map_ctx_update_elem(&skops, map, key, flags);
1921         fput(socket->file);
1922         return err;
1923 }
1924 
1925 static void sock_map_release(struct bpf_map *map)
1926 {
1927         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1928         struct bpf_prog *orig;
1929 
1930         orig = xchg(&stab->bpf_parse, NULL);
1931         if (orig)
1932                 bpf_prog_put(orig);
1933         orig = xchg(&stab->bpf_verdict, NULL);
1934         if (orig)
1935                 bpf_prog_put(orig);
1936 
1937         orig = xchg(&stab->bpf_tx_msg, NULL);
1938         if (orig)
1939                 bpf_prog_put(orig);
1940 }
1941 
1942 const struct bpf_map_ops sock_map_ops = {
1943         .map_alloc = sock_map_alloc,
1944         .map_free = sock_map_free,
1945         .map_lookup_elem = sock_map_lookup,
1946         .map_get_next_key = sock_map_get_next_key,
1947         .map_update_elem = sock_map_update_elem,
1948         .map_delete_elem = sock_map_delete_elem,
1949         .map_release_uref = sock_map_release,
1950 };
1951 
1952 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
1953            struct bpf_map *, map, void *, key, u64, flags)
1954 {
1955         WARN_ON_ONCE(!rcu_read_lock_held());
1956         return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
1957 }
1958 
1959 const struct bpf_func_proto bpf_sock_map_update_proto = {
1960         .func           = bpf_sock_map_update,
1961         .gpl_only       = false,
1962         .pkt_access     = true,
1963         .ret_type       = RET_INTEGER,
1964         .arg1_type      = ARG_PTR_TO_CTX,
1965         .arg2_type      = ARG_CONST_MAP_PTR,
1966         .arg3_type      = ARG_PTR_TO_MAP_KEY,
1967         .arg4_type      = ARG_ANYTHING,
1968 };
1969 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp