~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/bpf/sockmap.c

Version: ~ [ linux-4.17-rc2 ] ~ [ linux-4.16.4 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.36 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.96 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.129 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.51 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.106 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.56 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.101 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
  2  *
  3  * This program is free software; you can redistribute it and/or
  4  * modify it under the terms of version 2 of the GNU General Public
  5  * License as published by the Free Software Foundation.
  6  *
  7  * This program is distributed in the hope that it will be useful, but
  8  * WITHOUT ANY WARRANTY; without even the implied warranty of
  9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 10  * General Public License for more details.
 11  */
 12 
 13 /* A BPF sock_map is used to store sock objects. This is primarly used
 14  * for doing socket redirect with BPF helper routines.
 15  *
 16  * A sock map may have BPF programs attached to it, currently a program
 17  * used to parse packets and a program to provide a verdict and redirect
 18  * decision on the packet are supported. Any programs attached to a sock
 19  * map are inherited by sock objects when they are added to the map. If
 20  * no BPF programs are attached the sock object may only be used for sock
 21  * redirect.
 22  *
 23  * A sock object may be in multiple maps, but can only inherit a single
 24  * parse or verdict program. If adding a sock object to a map would result
 25  * in having multiple parsing programs the update will return an EBUSY error.
 26  *
 27  * For reference this program is similar to devmap used in XDP context
 28  * reviewing these together may be useful. For an example please review
 29  * ./samples/bpf/sockmap/.
 30  */
 31 #include <linux/bpf.h>
 32 #include <net/sock.h>
 33 #include <linux/filter.h>
 34 #include <linux/errno.h>
 35 #include <linux/file.h>
 36 #include <linux/kernel.h>
 37 #include <linux/net.h>
 38 #include <linux/skbuff.h>
 39 #include <linux/workqueue.h>
 40 #include <linux/list.h>
 41 #include <linux/mm.h>
 42 #include <net/strparser.h>
 43 #include <net/tcp.h>
 44 #include <linux/ptr_ring.h>
 45 #include <net/inet_common.h>
 46 
 47 #define SOCK_CREATE_FLAG_MASK \
 48         (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
 49 
 50 struct bpf_stab {
 51         struct bpf_map map;
 52         struct sock **sock_map;
 53         struct bpf_prog *bpf_tx_msg;
 54         struct bpf_prog *bpf_parse;
 55         struct bpf_prog *bpf_verdict;
 56 };
 57 
 58 enum smap_psock_state {
 59         SMAP_TX_RUNNING,
 60 };
 61 
 62 struct smap_psock_map_entry {
 63         struct list_head list;
 64         struct sock **entry;
 65 };
 66 
 67 struct smap_psock {
 68         struct rcu_head rcu;
 69         refcount_t refcnt;
 70 
 71         /* datapath variables */
 72         struct sk_buff_head rxqueue;
 73         bool strp_enabled;
 74 
 75         /* datapath error path cache across tx work invocations */
 76         int save_rem;
 77         int save_off;
 78         struct sk_buff *save_skb;
 79 
 80         /* datapath variables for tx_msg ULP */
 81         struct sock *sk_redir;
 82         int apply_bytes;
 83         int cork_bytes;
 84         int sg_size;
 85         int eval;
 86         struct sk_msg_buff *cork;
 87         struct list_head ingress;
 88 
 89         struct strparser strp;
 90         struct bpf_prog *bpf_tx_msg;
 91         struct bpf_prog *bpf_parse;
 92         struct bpf_prog *bpf_verdict;
 93         struct list_head maps;
 94 
 95         /* Back reference used when sock callback trigger sockmap operations */
 96         struct sock *sock;
 97         unsigned long state;
 98 
 99         struct work_struct tx_work;
100         struct work_struct gc_work;
101 
102         struct proto *sk_proto;
103         void (*save_close)(struct sock *sk, long timeout);
104         void (*save_data_ready)(struct sock *sk);
105         void (*save_write_space)(struct sock *sk);
106 };
107 
108 static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
109 static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
110                            int nonblock, int flags, int *addr_len);
111 static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
112 static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
113                             int offset, size_t size, int flags);
114 
115 static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
116 {
117         return rcu_dereference_sk_user_data(sk);
118 }
119 
120 static bool bpf_tcp_stream_read(const struct sock *sk)
121 {
122         struct smap_psock *psock;
123         bool empty = true;
124 
125         rcu_read_lock();
126         psock = smap_psock_sk(sk);
127         if (unlikely(!psock))
128                 goto out;
129         empty = list_empty(&psock->ingress);
130 out:
131         rcu_read_unlock();
132         return !empty;
133 }
134 
135 static struct proto tcp_bpf_proto;
136 static int bpf_tcp_init(struct sock *sk)
137 {
138         struct smap_psock *psock;
139 
140         rcu_read_lock();
141         psock = smap_psock_sk(sk);
142         if (unlikely(!psock)) {
143                 rcu_read_unlock();
144                 return -EINVAL;
145         }
146 
147         if (unlikely(psock->sk_proto)) {
148                 rcu_read_unlock();
149                 return -EBUSY;
150         }
151 
152         psock->save_close = sk->sk_prot->close;
153         psock->sk_proto = sk->sk_prot;
154 
155         if (psock->bpf_tx_msg) {
156                 tcp_bpf_proto.sendmsg = bpf_tcp_sendmsg;
157                 tcp_bpf_proto.sendpage = bpf_tcp_sendpage;
158                 tcp_bpf_proto.recvmsg = bpf_tcp_recvmsg;
159                 tcp_bpf_proto.stream_memory_read = bpf_tcp_stream_read;
160         }
161 
162         sk->sk_prot = &tcp_bpf_proto;
163         rcu_read_unlock();
164         return 0;
165 }
166 
167 static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
168 static int free_start_sg(struct sock *sk, struct sk_msg_buff *md);
169 
170 static void bpf_tcp_release(struct sock *sk)
171 {
172         struct smap_psock *psock;
173 
174         rcu_read_lock();
175         psock = smap_psock_sk(sk);
176         if (unlikely(!psock))
177                 goto out;
178 
179         if (psock->cork) {
180                 free_start_sg(psock->sock, psock->cork);
181                 kfree(psock->cork);
182                 psock->cork = NULL;
183         }
184 
185         if (psock->sk_proto) {
186                 sk->sk_prot = psock->sk_proto;
187                 psock->sk_proto = NULL;
188         }
189 out:
190         rcu_read_unlock();
191 }
192 
193 static void bpf_tcp_close(struct sock *sk, long timeout)
194 {
195         void (*close_fun)(struct sock *sk, long timeout);
196         struct smap_psock_map_entry *e, *tmp;
197         struct sk_msg_buff *md, *mtmp;
198         struct smap_psock *psock;
199         struct sock *osk;
200 
201         rcu_read_lock();
202         psock = smap_psock_sk(sk);
203         if (unlikely(!psock)) {
204                 rcu_read_unlock();
205                 return sk->sk_prot->close(sk, timeout);
206         }
207 
208         /* The psock may be destroyed anytime after exiting the RCU critial
209          * section so by the time we use close_fun the psock may no longer
210          * be valid. However, bpf_tcp_close is called with the sock lock
211          * held so the close hook and sk are still valid.
212          */
213         close_fun = psock->save_close;
214 
215         write_lock_bh(&sk->sk_callback_lock);
216         if (psock->cork) {
217                 free_start_sg(psock->sock, psock->cork);
218                 kfree(psock->cork);
219                 psock->cork = NULL;
220         }
221 
222         list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
223                 list_del(&md->list);
224                 free_start_sg(psock->sock, md);
225                 kfree(md);
226         }
227 
228         list_for_each_entry_safe(e, tmp, &psock->maps, list) {
229                 osk = cmpxchg(e->entry, sk, NULL);
230                 if (osk == sk) {
231                         list_del(&e->list);
232                         smap_release_sock(psock, sk);
233                 }
234         }
235         write_unlock_bh(&sk->sk_callback_lock);
236         rcu_read_unlock();
237         close_fun(sk, timeout);
238 }
239 
240 enum __sk_action {
241         __SK_DROP = 0,
242         __SK_PASS,
243         __SK_REDIRECT,
244         __SK_NONE,
245 };
246 
247 static struct tcp_ulp_ops bpf_tcp_ulp_ops __read_mostly = {
248         .name           = "bpf_tcp",
249         .uid            = TCP_ULP_BPF,
250         .user_visible   = false,
251         .owner          = NULL,
252         .init           = bpf_tcp_init,
253         .release        = bpf_tcp_release,
254 };
255 
256 static int memcopy_from_iter(struct sock *sk,
257                              struct sk_msg_buff *md,
258                              struct iov_iter *from, int bytes)
259 {
260         struct scatterlist *sg = md->sg_data;
261         int i = md->sg_curr, rc = -ENOSPC;
262 
263         do {
264                 int copy;
265                 char *to;
266 
267                 if (md->sg_copybreak >= sg[i].length) {
268                         md->sg_copybreak = 0;
269 
270                         if (++i == MAX_SKB_FRAGS)
271                                 i = 0;
272 
273                         if (i == md->sg_end)
274                                 break;
275                 }
276 
277                 copy = sg[i].length - md->sg_copybreak;
278                 to = sg_virt(&sg[i]) + md->sg_copybreak;
279                 md->sg_copybreak += copy;
280 
281                 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
282                         rc = copy_from_iter_nocache(to, copy, from);
283                 else
284                         rc = copy_from_iter(to, copy, from);
285 
286                 if (rc != copy) {
287                         rc = -EFAULT;
288                         goto out;
289                 }
290 
291                 bytes -= copy;
292                 if (!bytes)
293                         break;
294 
295                 md->sg_copybreak = 0;
296                 if (++i == MAX_SKB_FRAGS)
297                         i = 0;
298         } while (i != md->sg_end);
299 out:
300         md->sg_curr = i;
301         return rc;
302 }
303 
304 static int bpf_tcp_push(struct sock *sk, int apply_bytes,
305                         struct sk_msg_buff *md,
306                         int flags, bool uncharge)
307 {
308         bool apply = apply_bytes;
309         struct scatterlist *sg;
310         int offset, ret = 0;
311         struct page *p;
312         size_t size;
313 
314         while (1) {
315                 sg = md->sg_data + md->sg_start;
316                 size = (apply && apply_bytes < sg->length) ?
317                         apply_bytes : sg->length;
318                 offset = sg->offset;
319 
320                 tcp_rate_check_app_limited(sk);
321                 p = sg_page(sg);
322 retry:
323                 ret = do_tcp_sendpages(sk, p, offset, size, flags);
324                 if (ret != size) {
325                         if (ret > 0) {
326                                 if (apply)
327                                         apply_bytes -= ret;
328                                 size -= ret;
329                                 offset += ret;
330                                 if (uncharge)
331                                         sk_mem_uncharge(sk, ret);
332                                 goto retry;
333                         }
334 
335                         sg->length = size;
336                         sg->offset = offset;
337                         return ret;
338                 }
339 
340                 if (apply)
341                         apply_bytes -= ret;
342                 sg->offset += ret;
343                 sg->length -= ret;
344                 if (uncharge)
345                         sk_mem_uncharge(sk, ret);
346 
347                 if (!sg->length) {
348                         put_page(p);
349                         md->sg_start++;
350                         if (md->sg_start == MAX_SKB_FRAGS)
351                                 md->sg_start = 0;
352                         sg_init_table(sg, 1);
353 
354                         if (md->sg_start == md->sg_end)
355                                 break;
356                 }
357 
358                 if (apply && !apply_bytes)
359                         break;
360         }
361         return 0;
362 }
363 
364 static inline void bpf_compute_data_pointers_sg(struct sk_msg_buff *md)
365 {
366         struct scatterlist *sg = md->sg_data + md->sg_start;
367 
368         if (md->sg_copy[md->sg_start]) {
369                 md->data = md->data_end = 0;
370         } else {
371                 md->data = sg_virt(sg);
372                 md->data_end = md->data + sg->length;
373         }
374 }
375 
376 static void return_mem_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
377 {
378         struct scatterlist *sg = md->sg_data;
379         int i = md->sg_start;
380 
381         do {
382                 int uncharge = (bytes < sg[i].length) ? bytes : sg[i].length;
383 
384                 sk_mem_uncharge(sk, uncharge);
385                 bytes -= uncharge;
386                 if (!bytes)
387                         break;
388                 i++;
389                 if (i == MAX_SKB_FRAGS)
390                         i = 0;
391         } while (i != md->sg_end);
392 }
393 
394 static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
395 {
396         struct scatterlist *sg = md->sg_data;
397         int i = md->sg_start, free;
398 
399         while (bytes && sg[i].length) {
400                 free = sg[i].length;
401                 if (bytes < free) {
402                         sg[i].length -= bytes;
403                         sg[i].offset += bytes;
404                         sk_mem_uncharge(sk, bytes);
405                         break;
406                 }
407 
408                 sk_mem_uncharge(sk, sg[i].length);
409                 put_page(sg_page(&sg[i]));
410                 bytes -= sg[i].length;
411                 sg[i].length = 0;
412                 sg[i].page_link = 0;
413                 sg[i].offset = 0;
414                 i++;
415 
416                 if (i == MAX_SKB_FRAGS)
417                         i = 0;
418         }
419 }
420 
421 static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
422 {
423         struct scatterlist *sg = md->sg_data;
424         int i = start, free = 0;
425 
426         while (sg[i].length) {
427                 free += sg[i].length;
428                 sk_mem_uncharge(sk, sg[i].length);
429                 put_page(sg_page(&sg[i]));
430                 sg[i].length = 0;
431                 sg[i].page_link = 0;
432                 sg[i].offset = 0;
433                 i++;
434 
435                 if (i == MAX_SKB_FRAGS)
436                         i = 0;
437         }
438 
439         return free;
440 }
441 
442 static int free_start_sg(struct sock *sk, struct sk_msg_buff *md)
443 {
444         int free = free_sg(sk, md->sg_start, md);
445 
446         md->sg_start = md->sg_end;
447         return free;
448 }
449 
450 static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md)
451 {
452         return free_sg(sk, md->sg_curr, md);
453 }
454 
455 static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md)
456 {
457         return ((_rc == SK_PASS) ?
458                (md->map ? __SK_REDIRECT : __SK_PASS) :
459                __SK_DROP);
460 }
461 
462 static unsigned int smap_do_tx_msg(struct sock *sk,
463                                    struct smap_psock *psock,
464                                    struct sk_msg_buff *md)
465 {
466         struct bpf_prog *prog;
467         unsigned int rc, _rc;
468 
469         preempt_disable();
470         rcu_read_lock();
471 
472         /* If the policy was removed mid-send then default to 'accept' */
473         prog = READ_ONCE(psock->bpf_tx_msg);
474         if (unlikely(!prog)) {
475                 _rc = SK_PASS;
476                 goto verdict;
477         }
478 
479         bpf_compute_data_pointers_sg(md);
480         rc = (*prog->bpf_func)(md, prog->insnsi);
481         psock->apply_bytes = md->apply_bytes;
482 
483         /* Moving return codes from UAPI namespace into internal namespace */
484         _rc = bpf_map_msg_verdict(rc, md);
485 
486         /* The psock has a refcount on the sock but not on the map and because
487          * we need to drop rcu read lock here its possible the map could be
488          * removed between here and when we need it to execute the sock
489          * redirect. So do the map lookup now for future use.
490          */
491         if (_rc == __SK_REDIRECT) {
492                 if (psock->sk_redir)
493                         sock_put(psock->sk_redir);
494                 psock->sk_redir = do_msg_redirect_map(md);
495                 if (!psock->sk_redir) {
496                         _rc = __SK_DROP;
497                         goto verdict;
498                 }
499                 sock_hold(psock->sk_redir);
500         }
501 verdict:
502         rcu_read_unlock();
503         preempt_enable();
504 
505         return _rc;
506 }
507 
508 static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
509                            struct smap_psock *psock,
510                            struct sk_msg_buff *md, int flags)
511 {
512         bool apply = apply_bytes;
513         size_t size, copied = 0;
514         struct sk_msg_buff *r;
515         int err = 0, i;
516 
517         r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_KERNEL);
518         if (unlikely(!r))
519                 return -ENOMEM;
520 
521         lock_sock(sk);
522         r->sg_start = md->sg_start;
523         i = md->sg_start;
524 
525         do {
526                 r->sg_data[i] = md->sg_data[i];
527 
528                 size = (apply && apply_bytes < md->sg_data[i].length) ?
529                         apply_bytes : md->sg_data[i].length;
530 
531                 if (!sk_wmem_schedule(sk, size)) {
532                         if (!copied)
533                                 err = -ENOMEM;
534                         break;
535                 }
536 
537                 sk_mem_charge(sk, size);
538                 r->sg_data[i].length = size;
539                 md->sg_data[i].length -= size;
540                 md->sg_data[i].offset += size;
541                 copied += size;
542 
543                 if (md->sg_data[i].length) {
544                         get_page(sg_page(&r->sg_data[i]));
545                         r->sg_end = (i + 1) == MAX_SKB_FRAGS ? 0 : i + 1;
546                 } else {
547                         i++;
548                         if (i == MAX_SKB_FRAGS)
549                                 i = 0;
550                         r->sg_end = i;
551                 }
552 
553                 if (apply) {
554                         apply_bytes -= size;
555                         if (!apply_bytes)
556                                 break;
557                 }
558         } while (i != md->sg_end);
559 
560         md->sg_start = i;
561 
562         if (!err) {
563                 list_add_tail(&r->list, &psock->ingress);
564                 sk->sk_data_ready(sk);
565         } else {
566                 free_start_sg(sk, r);
567                 kfree(r);
568         }
569 
570         release_sock(sk);
571         return err;
572 }
573 
574 static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
575                                        struct sk_msg_buff *md,
576                                        int flags)
577 {
578         struct smap_psock *psock;
579         struct scatterlist *sg;
580         int i, err, free = 0;
581         bool ingress = !!(md->flags & BPF_F_INGRESS);
582 
583         sg = md->sg_data;
584 
585         rcu_read_lock();
586         psock = smap_psock_sk(sk);
587         if (unlikely(!psock))
588                 goto out_rcu;
589 
590         if (!refcount_inc_not_zero(&psock->refcnt))
591                 goto out_rcu;
592 
593         rcu_read_unlock();
594 
595         if (ingress) {
596                 err = bpf_tcp_ingress(sk, send, psock, md, flags);
597         } else {
598                 lock_sock(sk);
599                 err = bpf_tcp_push(sk, send, md, flags, false);
600                 release_sock(sk);
601         }
602         smap_release_sock(psock, sk);
603         if (unlikely(err))
604                 goto out;
605         return 0;
606 out_rcu:
607         rcu_read_unlock();
608 out:
609         i = md->sg_start;
610         while (sg[i].length) {
611                 free += sg[i].length;
612                 put_page(sg_page(&sg[i]));
613                 sg[i].length = 0;
614                 i++;
615                 if (i == MAX_SKB_FRAGS)
616                         i = 0;
617         }
618         return free;
619 }
620 
621 static inline void bpf_md_init(struct smap_psock *psock)
622 {
623         if (!psock->apply_bytes) {
624                 psock->eval =  __SK_NONE;
625                 if (psock->sk_redir) {
626                         sock_put(psock->sk_redir);
627                         psock->sk_redir = NULL;
628                 }
629         }
630 }
631 
632 static void apply_bytes_dec(struct smap_psock *psock, int i)
633 {
634         if (psock->apply_bytes) {
635                 if (psock->apply_bytes < i)
636                         psock->apply_bytes = 0;
637                 else
638                         psock->apply_bytes -= i;
639         }
640 }
641 
642 static int bpf_exec_tx_verdict(struct smap_psock *psock,
643                                struct sk_msg_buff *m,
644                                struct sock *sk,
645                                int *copied, int flags)
646 {
647         bool cork = false, enospc = (m->sg_start == m->sg_end);
648         struct sock *redir;
649         int err = 0;
650         int send;
651 
652 more_data:
653         if (psock->eval == __SK_NONE)
654                 psock->eval = smap_do_tx_msg(sk, psock, m);
655 
656         if (m->cork_bytes &&
657             m->cork_bytes > psock->sg_size && !enospc) {
658                 psock->cork_bytes = m->cork_bytes - psock->sg_size;
659                 if (!psock->cork) {
660                         psock->cork = kcalloc(1,
661                                         sizeof(struct sk_msg_buff),
662                                         GFP_ATOMIC | __GFP_NOWARN);
663 
664                         if (!psock->cork) {
665                                 err = -ENOMEM;
666                                 goto out_err;
667                         }
668                 }
669                 memcpy(psock->cork, m, sizeof(*m));
670                 goto out_err;
671         }
672 
673         send = psock->sg_size;
674         if (psock->apply_bytes && psock->apply_bytes < send)
675                 send = psock->apply_bytes;
676 
677         switch (psock->eval) {
678         case __SK_PASS:
679                 err = bpf_tcp_push(sk, send, m, flags, true);
680                 if (unlikely(err)) {
681                         *copied -= free_start_sg(sk, m);
682                         break;
683                 }
684 
685                 apply_bytes_dec(psock, send);
686                 psock->sg_size -= send;
687                 break;
688         case __SK_REDIRECT:
689                 redir = psock->sk_redir;
690                 apply_bytes_dec(psock, send);
691 
692                 if (psock->cork) {
693                         cork = true;
694                         psock->cork = NULL;
695                 }
696 
697                 return_mem_sg(sk, send, m);
698                 release_sock(sk);
699 
700                 err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags);
701                 lock_sock(sk);
702 
703                 if (cork) {
704                         free_start_sg(sk, m);
705                         kfree(m);
706                         m = NULL;
707                 }
708                 if (unlikely(err))
709                         *copied -= err;
710                 else
711                         psock->sg_size -= send;
712                 break;
713         case __SK_DROP:
714         default:
715                 free_bytes_sg(sk, send, m);
716                 apply_bytes_dec(psock, send);
717                 *copied -= send;
718                 psock->sg_size -= send;
719                 err = -EACCES;
720                 break;
721         }
722 
723         if (likely(!err)) {
724                 bpf_md_init(psock);
725                 if (m &&
726                     m->sg_data[m->sg_start].page_link &&
727                     m->sg_data[m->sg_start].length)
728                         goto more_data;
729         }
730 
731 out_err:
732         return err;
733 }
734 
735 static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
736                            int nonblock, int flags, int *addr_len)
737 {
738         struct iov_iter *iter = &msg->msg_iter;
739         struct smap_psock *psock;
740         int copied = 0;
741 
742         if (unlikely(flags & MSG_ERRQUEUE))
743                 return inet_recv_error(sk, msg, len, addr_len);
744 
745         rcu_read_lock();
746         psock = smap_psock_sk(sk);
747         if (unlikely(!psock))
748                 goto out;
749 
750         if (unlikely(!refcount_inc_not_zero(&psock->refcnt)))
751                 goto out;
752         rcu_read_unlock();
753 
754         if (!skb_queue_empty(&sk->sk_receive_queue))
755                 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
756 
757         lock_sock(sk);
758         while (copied != len) {
759                 struct scatterlist *sg;
760                 struct sk_msg_buff *md;
761                 int i;
762 
763                 md = list_first_entry_or_null(&psock->ingress,
764                                               struct sk_msg_buff, list);
765                 if (unlikely(!md))
766                         break;
767                 i = md->sg_start;
768                 do {
769                         struct page *page;
770                         int n, copy;
771 
772                         sg = &md->sg_data[i];
773                         copy = sg->length;
774                         page = sg_page(sg);
775 
776                         if (copied + copy > len)
777                                 copy = len - copied;
778 
779                         n = copy_page_to_iter(page, sg->offset, copy, iter);
780                         if (n != copy) {
781                                 md->sg_start = i;
782                                 release_sock(sk);
783                                 smap_release_sock(psock, sk);
784                                 return -EFAULT;
785                         }
786 
787                         copied += copy;
788                         sg->offset += copy;
789                         sg->length -= copy;
790                         sk_mem_uncharge(sk, copy);
791 
792                         if (!sg->length) {
793                                 i++;
794                                 if (i == MAX_SKB_FRAGS)
795                                         i = 0;
796                                 if (!md->skb)
797                                         put_page(page);
798                         }
799                         if (copied == len)
800                                 break;
801                 } while (i != md->sg_end);
802                 md->sg_start = i;
803 
804                 if (!sg->length && md->sg_start == md->sg_end) {
805                         list_del(&md->list);
806                         if (md->skb)
807                                 consume_skb(md->skb);
808                         kfree(md);
809                 }
810         }
811 
812         release_sock(sk);
813         smap_release_sock(psock, sk);
814         return copied;
815 out:
816         rcu_read_unlock();
817         return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
818 }
819 
820 
821 static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
822 {
823         int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
824         struct sk_msg_buff md = {0};
825         unsigned int sg_copy = 0;
826         struct smap_psock *psock;
827         int copied = 0, err = 0;
828         struct scatterlist *sg;
829         long timeo;
830 
831         /* Its possible a sock event or user removed the psock _but_ the ops
832          * have not been reprogrammed yet so we get here. In this case fallback
833          * to tcp_sendmsg. Note this only works because we _only_ ever allow
834          * a single ULP there is no hierarchy here.
835          */
836         rcu_read_lock();
837         psock = smap_psock_sk(sk);
838         if (unlikely(!psock)) {
839                 rcu_read_unlock();
840                 return tcp_sendmsg(sk, msg, size);
841         }
842 
843         /* Increment the psock refcnt to ensure its not released while sending a
844          * message. Required because sk lookup and bpf programs are used in
845          * separate rcu critical sections. Its OK if we lose the map entry
846          * but we can't lose the sock reference.
847          */
848         if (!refcount_inc_not_zero(&psock->refcnt)) {
849                 rcu_read_unlock();
850                 return tcp_sendmsg(sk, msg, size);
851         }
852 
853         sg = md.sg_data;
854         sg_init_marker(sg, MAX_SKB_FRAGS);
855         rcu_read_unlock();
856 
857         lock_sock(sk);
858         timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
859 
860         while (msg_data_left(msg)) {
861                 struct sk_msg_buff *m;
862                 bool enospc = false;
863                 int copy;
864 
865                 if (sk->sk_err) {
866                         err = sk->sk_err;
867                         goto out_err;
868                 }
869 
870                 copy = msg_data_left(msg);
871                 if (!sk_stream_memory_free(sk))
872                         goto wait_for_sndbuf;
873 
874                 m = psock->cork_bytes ? psock->cork : &md;
875                 m->sg_curr = m->sg_copybreak ? m->sg_curr : m->sg_end;
876                 err = sk_alloc_sg(sk, copy, m->sg_data,
877                                   m->sg_start, &m->sg_end, &sg_copy,
878                                   m->sg_end - 1);
879                 if (err) {
880                         if (err != -ENOSPC)
881                                 goto wait_for_memory;
882                         enospc = true;
883                         copy = sg_copy;
884                 }
885 
886                 err = memcopy_from_iter(sk, m, &msg->msg_iter, copy);
887                 if (err < 0) {
888                         free_curr_sg(sk, m);
889                         goto out_err;
890                 }
891 
892                 psock->sg_size += copy;
893                 copied += copy;
894                 sg_copy = 0;
895 
896                 /* When bytes are being corked skip running BPF program and
897                  * applying verdict unless there is no more buffer space. In
898                  * the ENOSPC case simply run BPF prorgram with currently
899                  * accumulated data. We don't have much choice at this point
900                  * we could try extending the page frags or chaining complex
901                  * frags but even in these cases _eventually_ we will hit an
902                  * OOM scenario. More complex recovery schemes may be
903                  * implemented in the future, but BPF programs must handle
904                  * the case where apply_cork requests are not honored. The
905                  * canonical method to verify this is to check data length.
906                  */
907                 if (psock->cork_bytes) {
908                         if (copy > psock->cork_bytes)
909                                 psock->cork_bytes = 0;
910                         else
911                                 psock->cork_bytes -= copy;
912 
913                         if (psock->cork_bytes && !enospc)
914                                 goto out_cork;
915 
916                         /* All cork bytes accounted for re-run filter */
917                         psock->eval = __SK_NONE;
918                         psock->cork_bytes = 0;
919                 }
920 
921                 err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags);
922                 if (unlikely(err < 0))
923                         goto out_err;
924                 continue;
925 wait_for_sndbuf:
926                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
927 wait_for_memory:
928                 err = sk_stream_wait_memory(sk, &timeo);
929                 if (err)
930                         goto out_err;
931         }
932 out_err:
933         if (err < 0)
934                 err = sk_stream_error(sk, msg->msg_flags, err);
935 out_cork:
936         release_sock(sk);
937         smap_release_sock(psock, sk);
938         return copied ? copied : err;
939 }
940 
941 static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
942                             int offset, size_t size, int flags)
943 {
944         struct sk_msg_buff md = {0}, *m = NULL;
945         int err = 0, copied = 0;
946         struct smap_psock *psock;
947         struct scatterlist *sg;
948         bool enospc = false;
949 
950         rcu_read_lock();
951         psock = smap_psock_sk(sk);
952         if (unlikely(!psock))
953                 goto accept;
954 
955         if (!refcount_inc_not_zero(&psock->refcnt))
956                 goto accept;
957         rcu_read_unlock();
958 
959         lock_sock(sk);
960 
961         if (psock->cork_bytes) {
962                 m = psock->cork;
963                 sg = &m->sg_data[m->sg_end];
964         } else {
965                 m = &md;
966                 sg = m->sg_data;
967                 sg_init_marker(sg, MAX_SKB_FRAGS);
968         }
969 
970         /* Catch case where ring is full and sendpage is stalled. */
971         if (unlikely(m->sg_end == m->sg_start &&
972             m->sg_data[m->sg_end].length))
973                 goto out_err;
974 
975         psock->sg_size += size;
976         sg_set_page(sg, page, size, offset);
977         get_page(page);
978         m->sg_copy[m->sg_end] = true;
979         sk_mem_charge(sk, size);
980         m->sg_end++;
981         copied = size;
982 
983         if (m->sg_end == MAX_SKB_FRAGS)
984                 m->sg_end = 0;
985 
986         if (m->sg_end == m->sg_start)
987                 enospc = true;
988 
989         if (psock->cork_bytes) {
990                 if (size > psock->cork_bytes)
991                         psock->cork_bytes = 0;
992                 else
993                         psock->cork_bytes -= size;
994 
995                 if (psock->cork_bytes && !enospc)
996                         goto out_err;
997 
998                 /* All cork bytes accounted for re-run filter */
999                 psock->eval = __SK_NONE;
1000                 psock->cork_bytes = 0;
1001         }
1002 
1003         err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags);
1004 out_err:
1005         release_sock(sk);
1006         smap_release_sock(psock, sk);
1007         return copied ? copied : err;
1008 accept:
1009         rcu_read_unlock();
1010         return tcp_sendpage(sk, page, offset, size, flags);
1011 }
1012 
1013 static void bpf_tcp_msg_add(struct smap_psock *psock,
1014                             struct sock *sk,
1015                             struct bpf_prog *tx_msg)
1016 {
1017         struct bpf_prog *orig_tx_msg;
1018 
1019         orig_tx_msg = xchg(&psock->bpf_tx_msg, tx_msg);
1020         if (orig_tx_msg)
1021                 bpf_prog_put(orig_tx_msg);
1022 }
1023 
1024 static int bpf_tcp_ulp_register(void)
1025 {
1026         tcp_bpf_proto = tcp_prot;
1027         tcp_bpf_proto.close = bpf_tcp_close;
1028         /* Once BPF TX ULP is registered it is never unregistered. It
1029          * will be in the ULP list for the lifetime of the system. Doing
1030          * duplicate registers is not a problem.
1031          */
1032         return tcp_register_ulp(&bpf_tcp_ulp_ops);
1033 }
1034 
1035 static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
1036 {
1037         struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
1038         int rc;
1039 
1040         if (unlikely(!prog))
1041                 return __SK_DROP;
1042 
1043         skb_orphan(skb);
1044         /* We need to ensure that BPF metadata for maps is also cleared
1045          * when we orphan the skb so that we don't have the possibility
1046          * to reference a stale map.
1047          */
1048         TCP_SKB_CB(skb)->bpf.map = NULL;
1049         skb->sk = psock->sock;
1050         bpf_compute_data_pointers(skb);
1051         preempt_disable();
1052         rc = (*prog->bpf_func)(skb, prog->insnsi);
1053         preempt_enable();
1054         skb->sk = NULL;
1055 
1056         /* Moving return codes from UAPI namespace into internal namespace */
1057         return rc == SK_PASS ?
1058                 (TCP_SKB_CB(skb)->bpf.map ? __SK_REDIRECT : __SK_PASS) :
1059                 __SK_DROP;
1060 }
1061 
1062 static int smap_do_ingress(struct smap_psock *psock, struct sk_buff *skb)
1063 {
1064         struct sock *sk = psock->sock;
1065         int copied = 0, num_sg;
1066         struct sk_msg_buff *r;
1067 
1068         r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_ATOMIC);
1069         if (unlikely(!r))
1070                 return -EAGAIN;
1071 
1072         if (!sk_rmem_schedule(sk, skb, skb->len)) {
1073                 kfree(r);
1074                 return -EAGAIN;
1075         }
1076 
1077         sg_init_table(r->sg_data, MAX_SKB_FRAGS);
1078         num_sg = skb_to_sgvec(skb, r->sg_data, 0, skb->len);
1079         if (unlikely(num_sg < 0)) {
1080                 kfree(r);
1081                 return num_sg;
1082         }
1083         sk_mem_charge(sk, skb->len);
1084         copied = skb->len;
1085         r->sg_start = 0;
1086         r->sg_end = num_sg == MAX_SKB_FRAGS ? 0 : num_sg;
1087         r->skb = skb;
1088         list_add_tail(&r->list, &psock->ingress);
1089         sk->sk_data_ready(sk);
1090         return copied;
1091 }
1092 
1093 static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
1094 {
1095         struct smap_psock *peer;
1096         struct sock *sk;
1097         __u32 in;
1098         int rc;
1099 
1100         rc = smap_verdict_func(psock, skb);
1101         switch (rc) {
1102         case __SK_REDIRECT:
1103                 sk = do_sk_redirect_map(skb);
1104                 if (!sk) {
1105                         kfree_skb(skb);
1106                         break;
1107                 }
1108 
1109                 peer = smap_psock_sk(sk);
1110                 in = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
1111 
1112                 if (unlikely(!peer || sock_flag(sk, SOCK_DEAD) ||
1113                              !test_bit(SMAP_TX_RUNNING, &peer->state))) {
1114                         kfree_skb(skb);
1115                         break;
1116                 }
1117 
1118                 if (!in && sock_writeable(sk)) {
1119                         skb_set_owner_w(skb, sk);
1120                         skb_queue_tail(&peer->rxqueue, skb);
1121                         schedule_work(&peer->tx_work);
1122                         break;
1123                 } else if (in &&
1124                            atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
1125                         skb_queue_tail(&peer->rxqueue, skb);
1126                         schedule_work(&peer->tx_work);
1127                         break;
1128                 }
1129         /* Fall through and free skb otherwise */
1130         case __SK_DROP:
1131         default:
1132                 kfree_skb(skb);
1133         }
1134 }
1135 
1136 static void smap_report_sk_error(struct smap_psock *psock, int err)
1137 {
1138         struct sock *sk = psock->sock;
1139 
1140         sk->sk_err = err;
1141         sk->sk_error_report(sk);
1142 }
1143 
1144 static void smap_read_sock_strparser(struct strparser *strp,
1145                                      struct sk_buff *skb)
1146 {
1147         struct smap_psock *psock;
1148 
1149         rcu_read_lock();
1150         psock = container_of(strp, struct smap_psock, strp);
1151         smap_do_verdict(psock, skb);
1152         rcu_read_unlock();
1153 }
1154 
1155 /* Called with lock held on socket */
1156 static void smap_data_ready(struct sock *sk)
1157 {
1158         struct smap_psock *psock;
1159 
1160         rcu_read_lock();
1161         psock = smap_psock_sk(sk);
1162         if (likely(psock)) {
1163                 write_lock_bh(&sk->sk_callback_lock);
1164                 strp_data_ready(&psock->strp);
1165                 write_unlock_bh(&sk->sk_callback_lock);
1166         }
1167         rcu_read_unlock();
1168 }
1169 
1170 static void smap_tx_work(struct work_struct *w)
1171 {
1172         struct smap_psock *psock;
1173         struct sk_buff *skb;
1174         int rem, off, n;
1175 
1176         psock = container_of(w, struct smap_psock, tx_work);
1177 
1178         /* lock sock to avoid losing sk_socket at some point during loop */
1179         lock_sock(psock->sock);
1180         if (psock->save_skb) {
1181                 skb = psock->save_skb;
1182                 rem = psock->save_rem;
1183                 off = psock->save_off;
1184                 psock->save_skb = NULL;
1185                 goto start;
1186         }
1187 
1188         while ((skb = skb_dequeue(&psock->rxqueue))) {
1189                 __u32 flags;
1190 
1191                 rem = skb->len;
1192                 off = 0;
1193 start:
1194                 flags = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
1195                 do {
1196                         if (likely(psock->sock->sk_socket)) {
1197                                 if (flags)
1198                                         n = smap_do_ingress(psock, skb);
1199                                 else
1200                                         n = skb_send_sock_locked(psock->sock,
1201                                                                  skb, off, rem);
1202                         } else {
1203                                 n = -EINVAL;
1204                         }
1205 
1206                         if (n <= 0) {
1207                                 if (n == -EAGAIN) {
1208                                         /* Retry when space is available */
1209                                         psock->save_skb = skb;
1210                                         psock->save_rem = rem;
1211                                         psock->save_off = off;
1212                                         goto out;
1213                                 }
1214                                 /* Hard errors break pipe and stop xmit */
1215                                 smap_report_sk_error(psock, n ? -n : EPIPE);
1216                                 clear_bit(SMAP_TX_RUNNING, &psock->state);
1217                                 kfree_skb(skb);
1218                                 goto out;
1219                         }
1220                         rem -= n;
1221                         off += n;
1222                 } while (rem);
1223 
1224                 if (!flags)
1225                         kfree_skb(skb);
1226         }
1227 out:
1228         release_sock(psock->sock);
1229 }
1230 
1231 static void smap_write_space(struct sock *sk)
1232 {
1233         struct smap_psock *psock;
1234 
1235         rcu_read_lock();
1236         psock = smap_psock_sk(sk);
1237         if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
1238                 schedule_work(&psock->tx_work);
1239         rcu_read_unlock();
1240 }
1241 
1242 static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
1243 {
1244         if (!psock->strp_enabled)
1245                 return;
1246         sk->sk_data_ready = psock->save_data_ready;
1247         sk->sk_write_space = psock->save_write_space;
1248         psock->save_data_ready = NULL;
1249         psock->save_write_space = NULL;
1250         strp_stop(&psock->strp);
1251         psock->strp_enabled = false;
1252 }
1253 
1254 static void smap_destroy_psock(struct rcu_head *rcu)
1255 {
1256         struct smap_psock *psock = container_of(rcu,
1257                                                   struct smap_psock, rcu);
1258 
1259         /* Now that a grace period has passed there is no longer
1260          * any reference to this sock in the sockmap so we can
1261          * destroy the psock, strparser, and bpf programs. But,
1262          * because we use workqueue sync operations we can not
1263          * do it in rcu context
1264          */
1265         schedule_work(&psock->gc_work);
1266 }
1267 
1268 static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
1269 {
1270         if (refcount_dec_and_test(&psock->refcnt)) {
1271                 tcp_cleanup_ulp(sock);
1272                 smap_stop_sock(psock, sock);
1273                 clear_bit(SMAP_TX_RUNNING, &psock->state);
1274                 rcu_assign_sk_user_data(sock, NULL);
1275                 call_rcu_sched(&psock->rcu, smap_destroy_psock);
1276         }
1277 }
1278 
1279 static int smap_parse_func_strparser(struct strparser *strp,
1280                                        struct sk_buff *skb)
1281 {
1282         struct smap_psock *psock;
1283         struct bpf_prog *prog;
1284         int rc;
1285 
1286         rcu_read_lock();
1287         psock = container_of(strp, struct smap_psock, strp);
1288         prog = READ_ONCE(psock->bpf_parse);
1289 
1290         if (unlikely(!prog)) {
1291                 rcu_read_unlock();
1292                 return skb->len;
1293         }
1294 
1295         /* Attach socket for bpf program to use if needed we can do this
1296          * because strparser clones the skb before handing it to a upper
1297          * layer, meaning skb_orphan has been called. We NULL sk on the
1298          * way out to ensure we don't trigger a BUG_ON in skb/sk operations
1299          * later and because we are not charging the memory of this skb to
1300          * any socket yet.
1301          */
1302         skb->sk = psock->sock;
1303         bpf_compute_data_pointers(skb);
1304         rc = (*prog->bpf_func)(skb, prog->insnsi);
1305         skb->sk = NULL;
1306         rcu_read_unlock();
1307         return rc;
1308 }
1309 
1310 static int smap_read_sock_done(struct strparser *strp, int err)
1311 {
1312         return err;
1313 }
1314 
1315 static int smap_init_sock(struct smap_psock *psock,
1316                           struct sock *sk)
1317 {
1318         static const struct strp_callbacks cb = {
1319                 .rcv_msg = smap_read_sock_strparser,
1320                 .parse_msg = smap_parse_func_strparser,
1321                 .read_sock_done = smap_read_sock_done,
1322         };
1323 
1324         return strp_init(&psock->strp, sk, &cb);
1325 }
1326 
1327 static void smap_init_progs(struct smap_psock *psock,
1328                             struct bpf_stab *stab,
1329                             struct bpf_prog *verdict,
1330                             struct bpf_prog *parse)
1331 {
1332         struct bpf_prog *orig_parse, *orig_verdict;
1333 
1334         orig_parse = xchg(&psock->bpf_parse, parse);
1335         orig_verdict = xchg(&psock->bpf_verdict, verdict);
1336 
1337         if (orig_verdict)
1338                 bpf_prog_put(orig_verdict);
1339         if (orig_parse)
1340                 bpf_prog_put(orig_parse);
1341 }
1342 
1343 static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
1344 {
1345         if (sk->sk_data_ready == smap_data_ready)
1346                 return;
1347         psock->save_data_ready = sk->sk_data_ready;
1348         psock->save_write_space = sk->sk_write_space;
1349         sk->sk_data_ready = smap_data_ready;
1350         sk->sk_write_space = smap_write_space;
1351         psock->strp_enabled = true;
1352 }
1353 
1354 static void sock_map_remove_complete(struct bpf_stab *stab)
1355 {
1356         bpf_map_area_free(stab->sock_map);
1357         kfree(stab);
1358 }
1359 
1360 static void smap_gc_work(struct work_struct *w)
1361 {
1362         struct smap_psock_map_entry *e, *tmp;
1363         struct sk_msg_buff *md, *mtmp;
1364         struct smap_psock *psock;
1365 
1366         psock = container_of(w, struct smap_psock, gc_work);
1367 
1368         /* no callback lock needed because we already detached sockmap ops */
1369         if (psock->strp_enabled)
1370                 strp_done(&psock->strp);
1371 
1372         cancel_work_sync(&psock->tx_work);
1373         __skb_queue_purge(&psock->rxqueue);
1374 
1375         /* At this point all strparser and xmit work must be complete */
1376         if (psock->bpf_parse)
1377                 bpf_prog_put(psock->bpf_parse);
1378         if (psock->bpf_verdict)
1379                 bpf_prog_put(psock->bpf_verdict);
1380         if (psock->bpf_tx_msg)
1381                 bpf_prog_put(psock->bpf_tx_msg);
1382 
1383         if (psock->cork) {
1384                 free_start_sg(psock->sock, psock->cork);
1385                 kfree(psock->cork);
1386         }
1387 
1388         list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
1389                 list_del(&md->list);
1390                 free_start_sg(psock->sock, md);
1391                 kfree(md);
1392         }
1393 
1394         list_for_each_entry_safe(e, tmp, &psock->maps, list) {
1395                 list_del(&e->list);
1396                 kfree(e);
1397         }
1398 
1399         if (psock->sk_redir)
1400                 sock_put(psock->sk_redir);
1401 
1402         sock_put(psock->sock);
1403         kfree(psock);
1404 }
1405 
1406 static struct smap_psock *smap_init_psock(struct sock *sock,
1407                                           struct bpf_stab *stab)
1408 {
1409         struct smap_psock *psock;
1410 
1411         psock = kzalloc_node(sizeof(struct smap_psock),
1412                              GFP_ATOMIC | __GFP_NOWARN,
1413                              stab->map.numa_node);
1414         if (!psock)
1415                 return ERR_PTR(-ENOMEM);
1416 
1417         psock->eval =  __SK_NONE;
1418         psock->sock = sock;
1419         skb_queue_head_init(&psock->rxqueue);
1420         INIT_WORK(&psock->tx_work, smap_tx_work);
1421         INIT_WORK(&psock->gc_work, smap_gc_work);
1422         INIT_LIST_HEAD(&psock->maps);
1423         INIT_LIST_HEAD(&psock->ingress);
1424         refcount_set(&psock->refcnt, 1);
1425 
1426         rcu_assign_sk_user_data(sock, psock);
1427         sock_hold(sock);
1428         return psock;
1429 }
1430 
1431 static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
1432 {
1433         struct bpf_stab *stab;
1434         u64 cost;
1435         int err;
1436 
1437         if (!capable(CAP_NET_ADMIN))
1438                 return ERR_PTR(-EPERM);
1439 
1440         /* check sanity of attributes */
1441         if (attr->max_entries == 0 || attr->key_size != 4 ||
1442             attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1443                 return ERR_PTR(-EINVAL);
1444 
1445         if (attr->value_size > KMALLOC_MAX_SIZE)
1446                 return ERR_PTR(-E2BIG);
1447 
1448         err = bpf_tcp_ulp_register();
1449         if (err && err != -EEXIST)
1450                 return ERR_PTR(err);
1451 
1452         stab = kzalloc(sizeof(*stab), GFP_USER);
1453         if (!stab)
1454                 return ERR_PTR(-ENOMEM);
1455 
1456         bpf_map_init_from_attr(&stab->map, attr);
1457 
1458         /* make sure page count doesn't overflow */
1459         cost = (u64) stab->map.max_entries * sizeof(struct sock *);
1460         err = -EINVAL;
1461         if (cost >= U32_MAX - PAGE_SIZE)
1462                 goto free_stab;
1463 
1464         stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
1465 
1466         /* if map size is larger than memlock limit, reject it early */
1467         err = bpf_map_precharge_memlock(stab->map.pages);
1468         if (err)
1469                 goto free_stab;
1470 
1471         err = -ENOMEM;
1472         stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
1473                                             sizeof(struct sock *),
1474                                             stab->map.numa_node);
1475         if (!stab->sock_map)
1476                 goto free_stab;
1477 
1478         return &stab->map;
1479 free_stab:
1480         kfree(stab);
1481         return ERR_PTR(err);
1482 }
1483 
1484 static void smap_list_remove(struct smap_psock *psock, struct sock **entry)
1485 {
1486         struct smap_psock_map_entry *e, *tmp;
1487 
1488         list_for_each_entry_safe(e, tmp, &psock->maps, list) {
1489                 if (e->entry == entry) {
1490                         list_del(&e->list);
1491                         break;
1492                 }
1493         }
1494 }
1495 
1496 static void sock_map_free(struct bpf_map *map)
1497 {
1498         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1499         int i;
1500 
1501         synchronize_rcu();
1502 
1503         /* At this point no update, lookup or delete operations can happen.
1504          * However, be aware we can still get a socket state event updates,
1505          * and data ready callabacks that reference the psock from sk_user_data
1506          * Also psock worker threads are still in-flight. So smap_release_sock
1507          * will only free the psock after cancel_sync on the worker threads
1508          * and a grace period expire to ensure psock is really safe to remove.
1509          */
1510         rcu_read_lock();
1511         for (i = 0; i < stab->map.max_entries; i++) {
1512                 struct smap_psock *psock;
1513                 struct sock *sock;
1514 
1515                 sock = xchg(&stab->sock_map[i], NULL);
1516                 if (!sock)
1517                         continue;
1518 
1519                 write_lock_bh(&sock->sk_callback_lock);
1520                 psock = smap_psock_sk(sock);
1521                 /* This check handles a racing sock event that can get the
1522                  * sk_callback_lock before this case but after xchg happens
1523                  * causing the refcnt to hit zero and sock user data (psock)
1524                  * to be null and queued for garbage collection.
1525                  */
1526                 if (likely(psock)) {
1527                         smap_list_remove(psock, &stab->sock_map[i]);
1528                         smap_release_sock(psock, sock);
1529                 }
1530                 write_unlock_bh(&sock->sk_callback_lock);
1531         }
1532         rcu_read_unlock();
1533 
1534         sock_map_remove_complete(stab);
1535 }
1536 
1537 static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
1538 {
1539         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1540         u32 i = key ? *(u32 *)key : U32_MAX;
1541         u32 *next = (u32 *)next_key;
1542 
1543         if (i >= stab->map.max_entries) {
1544                 *next = 0;
1545                 return 0;
1546         }
1547 
1548         if (i == stab->map.max_entries - 1)
1549                 return -ENOENT;
1550 
1551         *next = i + 1;
1552         return 0;
1553 }
1554 
1555 struct sock  *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
1556 {
1557         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1558 
1559         if (key >= map->max_entries)
1560                 return NULL;
1561 
1562         return READ_ONCE(stab->sock_map[key]);
1563 }
1564 
1565 static int sock_map_delete_elem(struct bpf_map *map, void *key)
1566 {
1567         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1568         struct smap_psock *psock;
1569         int k = *(u32 *)key;
1570         struct sock *sock;
1571 
1572         if (k >= map->max_entries)
1573                 return -EINVAL;
1574 
1575         sock = xchg(&stab->sock_map[k], NULL);
1576         if (!sock)
1577                 return -EINVAL;
1578 
1579         write_lock_bh(&sock->sk_callback_lock);
1580         psock = smap_psock_sk(sock);
1581         if (!psock)
1582                 goto out;
1583 
1584         if (psock->bpf_parse)
1585                 smap_stop_sock(psock, sock);
1586         smap_list_remove(psock, &stab->sock_map[k]);
1587         smap_release_sock(psock, sock);
1588 out:
1589         write_unlock_bh(&sock->sk_callback_lock);
1590         return 0;
1591 }
1592 
1593 /* Locking notes: Concurrent updates, deletes, and lookups are allowed and are
1594  * done inside rcu critical sections. This ensures on updates that the psock
1595  * will not be released via smap_release_sock() until concurrent updates/deletes
1596  * complete. All operations operate on sock_map using cmpxchg and xchg
1597  * operations to ensure we do not get stale references. Any reads into the
1598  * map must be done with READ_ONCE() because of this.
1599  *
1600  * A psock is destroyed via call_rcu and after any worker threads are cancelled
1601  * and syncd so we are certain all references from the update/lookup/delete
1602  * operations as well as references in the data path are no longer in use.
1603  *
1604  * Psocks may exist in multiple maps, but only a single set of parse/verdict
1605  * programs may be inherited from the maps it belongs to. A reference count
1606  * is kept with the total number of references to the psock from all maps. The
1607  * psock will not be released until this reaches zero. The psock and sock
1608  * user data data use the sk_callback_lock to protect critical data structures
1609  * from concurrent access. This allows us to avoid two updates from modifying
1610  * the user data in sock and the lock is required anyways for modifying
1611  * callbacks, we simply increase its scope slightly.
1612  *
1613  * Rules to follow,
1614  *  - psock must always be read inside RCU critical section
1615  *  - sk_user_data must only be modified inside sk_callback_lock and read
1616  *    inside RCU critical section.
1617  *  - psock->maps list must only be read & modified inside sk_callback_lock
1618  *  - sock_map must use READ_ONCE and (cmp)xchg operations
1619  *  - BPF verdict/parse programs must use READ_ONCE and xchg operations
1620  */
1621 static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
1622                                     struct bpf_map *map,
1623                                     void *key, u64 flags)
1624 {
1625         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1626         struct smap_psock_map_entry *e = NULL;
1627         struct bpf_prog *verdict, *parse, *tx_msg;
1628         struct sock *osock, *sock;
1629         struct smap_psock *psock;
1630         u32 i = *(u32 *)key;
1631         bool new = false;
1632         int err;
1633 
1634         if (unlikely(flags > BPF_EXIST))
1635                 return -EINVAL;
1636 
1637         if (unlikely(i >= stab->map.max_entries))
1638                 return -E2BIG;
1639 
1640         sock = READ_ONCE(stab->sock_map[i]);
1641         if (flags == BPF_EXIST && !sock)
1642                 return -ENOENT;
1643         else if (flags == BPF_NOEXIST && sock)
1644                 return -EEXIST;
1645 
1646         sock = skops->sk;
1647 
1648         /* 1. If sock map has BPF programs those will be inherited by the
1649          * sock being added. If the sock is already attached to BPF programs
1650          * this results in an error.
1651          */
1652         verdict = READ_ONCE(stab->bpf_verdict);
1653         parse = READ_ONCE(stab->bpf_parse);
1654         tx_msg = READ_ONCE(stab->bpf_tx_msg);
1655 
1656         if (parse && verdict) {
1657                 /* bpf prog refcnt may be zero if a concurrent attach operation
1658                  * removes the program after the above READ_ONCE() but before
1659                  * we increment the refcnt. If this is the case abort with an
1660                  * error.
1661                  */
1662                 verdict = bpf_prog_inc_not_zero(stab->bpf_verdict);
1663                 if (IS_ERR(verdict))
1664                         return PTR_ERR(verdict);
1665 
1666                 parse = bpf_prog_inc_not_zero(stab->bpf_parse);
1667                 if (IS_ERR(parse)) {
1668                         bpf_prog_put(verdict);
1669                         return PTR_ERR(parse);
1670                 }
1671         }
1672 
1673         if (tx_msg) {
1674                 tx_msg = bpf_prog_inc_not_zero(stab->bpf_tx_msg);
1675                 if (IS_ERR(tx_msg)) {
1676                         if (verdict)
1677                                 bpf_prog_put(verdict);
1678                         if (parse)
1679                                 bpf_prog_put(parse);
1680                         return PTR_ERR(tx_msg);
1681                 }
1682         }
1683 
1684         write_lock_bh(&sock->sk_callback_lock);
1685         psock = smap_psock_sk(sock);
1686 
1687         /* 2. Do not allow inheriting programs if psock exists and has
1688          * already inherited programs. This would create confusion on
1689          * which parser/verdict program is running. If no psock exists
1690          * create one. Inside sk_callback_lock to ensure concurrent create
1691          * doesn't update user data.
1692          */
1693         if (psock) {
1694                 if (READ_ONCE(psock->bpf_parse) && parse) {
1695                         err = -EBUSY;
1696                         goto out_progs;
1697                 }
1698                 if (READ_ONCE(psock->bpf_tx_msg) && tx_msg) {
1699                         err = -EBUSY;
1700                         goto out_progs;
1701                 }
1702                 if (!refcount_inc_not_zero(&psock->refcnt)) {
1703                         err = -EAGAIN;
1704                         goto out_progs;
1705                 }
1706         } else {
1707                 psock = smap_init_psock(sock, stab);
1708                 if (IS_ERR(psock)) {
1709                         err = PTR_ERR(psock);
1710                         goto out_progs;
1711                 }
1712 
1713                 set_bit(SMAP_TX_RUNNING, &psock->state);
1714                 new = true;
1715         }
1716 
1717         e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
1718         if (!e) {
1719                 err = -ENOMEM;
1720                 goto out_progs;
1721         }
1722         e->entry = &stab->sock_map[i];
1723 
1724         /* 3. At this point we have a reference to a valid psock that is
1725          * running. Attach any BPF programs needed.
1726          */
1727         if (tx_msg)
1728                 bpf_tcp_msg_add(psock, sock, tx_msg);
1729         if (new) {
1730                 err = tcp_set_ulp_id(sock, TCP_ULP_BPF);
1731                 if (err)
1732                         goto out_free;
1733         }
1734 
1735         if (parse && verdict && !psock->strp_enabled) {
1736                 err = smap_init_sock(psock, sock);
1737                 if (err)
1738                         goto out_free;
1739                 smap_init_progs(psock, stab, verdict, parse);
1740                 smap_start_sock(psock, sock);
1741         }
1742 
1743         /* 4. Place psock in sockmap for use and stop any programs on
1744          * the old sock assuming its not the same sock we are replacing
1745          * it with. Because we can only have a single set of programs if
1746          * old_sock has a strp we can stop it.
1747          */
1748         list_add_tail(&e->list, &psock->maps);
1749         write_unlock_bh(&sock->sk_callback_lock);
1750 
1751         osock = xchg(&stab->sock_map[i], sock);
1752         if (osock) {
1753                 struct smap_psock *opsock = smap_psock_sk(osock);
1754 
1755                 write_lock_bh(&osock->sk_callback_lock);
1756                 smap_list_remove(opsock, &stab->sock_map[i]);
1757                 smap_release_sock(opsock, osock);
1758                 write_unlock_bh(&osock->sk_callback_lock);
1759         }
1760         return 0;
1761 out_free:
1762         smap_release_sock(psock, sock);
1763 out_progs:
1764         if (verdict)
1765                 bpf_prog_put(verdict);
1766         if (parse)
1767                 bpf_prog_put(parse);
1768         if (tx_msg)
1769                 bpf_prog_put(tx_msg);
1770         write_unlock_bh(&sock->sk_callback_lock);
1771         kfree(e);
1772         return err;
1773 }
1774 
1775 int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
1776 {
1777         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1778         struct bpf_prog *orig;
1779 
1780         if (unlikely(map->map_type != BPF_MAP_TYPE_SOCKMAP))
1781                 return -EINVAL;
1782 
1783         switch (type) {
1784         case BPF_SK_MSG_VERDICT:
1785                 orig = xchg(&stab->bpf_tx_msg, prog);
1786                 break;
1787         case BPF_SK_SKB_STREAM_PARSER:
1788                 orig = xchg(&stab->bpf_parse, prog);
1789                 break;
1790         case BPF_SK_SKB_STREAM_VERDICT:
1791                 orig = xchg(&stab->bpf_verdict, prog);
1792                 break;
1793         default:
1794                 return -EOPNOTSUPP;
1795         }
1796 
1797         if (orig)
1798                 bpf_prog_put(orig);
1799 
1800         return 0;
1801 }
1802 
1803 static void *sock_map_lookup(struct bpf_map *map, void *key)
1804 {
1805         return NULL;
1806 }
1807 
1808 static int sock_map_update_elem(struct bpf_map *map,
1809                                 void *key, void *value, u64 flags)
1810 {
1811         struct bpf_sock_ops_kern skops;
1812         u32 fd = *(u32 *)value;
1813         struct socket *socket;
1814         int err;
1815 
1816         socket = sockfd_lookup(fd, &err);
1817         if (!socket)
1818                 return err;
1819 
1820         skops.sk = socket->sk;
1821         if (!skops.sk) {
1822                 fput(socket->file);
1823                 return -EINVAL;
1824         }
1825 
1826         if (skops.sk->sk_type != SOCK_STREAM ||
1827             skops.sk->sk_protocol != IPPROTO_TCP) {
1828                 fput(socket->file);
1829                 return -EOPNOTSUPP;
1830         }
1831 
1832         err = sock_map_ctx_update_elem(&skops, map, key, flags);
1833         fput(socket->file);
1834         return err;
1835 }
1836 
1837 static void sock_map_release(struct bpf_map *map, struct file *map_file)
1838 {
1839         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1840         struct bpf_prog *orig;
1841 
1842         orig = xchg(&stab->bpf_parse, NULL);
1843         if (orig)
1844                 bpf_prog_put(orig);
1845         orig = xchg(&stab->bpf_verdict, NULL);
1846         if (orig)
1847                 bpf_prog_put(orig);
1848 
1849         orig = xchg(&stab->bpf_tx_msg, NULL);
1850         if (orig)
1851                 bpf_prog_put(orig);
1852 }
1853 
1854 const struct bpf_map_ops sock_map_ops = {
1855         .map_alloc = sock_map_alloc,
1856         .map_free = sock_map_free,
1857         .map_lookup_elem = sock_map_lookup,
1858         .map_get_next_key = sock_map_get_next_key,
1859         .map_update_elem = sock_map_update_elem,
1860         .map_delete_elem = sock_map_delete_elem,
1861         .map_release = sock_map_release,
1862 };
1863 
1864 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
1865            struct bpf_map *, map, void *, key, u64, flags)
1866 {
1867         WARN_ON_ONCE(!rcu_read_lock_held());
1868         return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
1869 }
1870 
1871 const struct bpf_func_proto bpf_sock_map_update_proto = {
1872         .func           = bpf_sock_map_update,
1873         .gpl_only       = false,
1874         .pkt_access     = true,
1875         .ret_type       = RET_INTEGER,
1876         .arg1_type      = ARG_PTR_TO_CTX,
1877         .arg2_type      = ARG_CONST_MAP_PTR,
1878         .arg3_type      = ARG_PTR_TO_MAP_KEY,
1879         .arg4_type      = ARG_ANYTHING,
1880 };
1881 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp