~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/bpf/sockmap.c

Version: ~ [ linux-4.14 ] ~ [ linux-4.13.12 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.61 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.97 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.46 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.80 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.50 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.95 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
  2  *
  3  * This program is free software; you can redistribute it and/or
  4  * modify it under the terms of version 2 of the GNU General Public
  5  * License as published by the Free Software Foundation.
  6  *
  7  * This program is distributed in the hope that it will be useful, but
  8  * WITHOUT ANY WARRANTY; without even the implied warranty of
  9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 10  * General Public License for more details.
 11  */
 12 
 13 /* A BPF sock_map is used to store sock objects. This is primarly used
 14  * for doing socket redirect with BPF helper routines.
 15  *
 16  * A sock map may have BPF programs attached to it, currently a program
 17  * used to parse packets and a program to provide a verdict and redirect
 18  * decision on the packet are supported. Any programs attached to a sock
 19  * map are inherited by sock objects when they are added to the map. If
 20  * no BPF programs are attached the sock object may only be used for sock
 21  * redirect.
 22  *
 23  * A sock object may be in multiple maps, but can only inherit a single
 24  * parse or verdict program. If adding a sock object to a map would result
 25  * in having multiple parsing programs the update will return an EBUSY error.
 26  *
 27  * For reference this program is similar to devmap used in XDP context
 28  * reviewing these together may be useful. For an example please review
 29  * ./samples/bpf/sockmap/.
 30  */
 31 #include <linux/bpf.h>
 32 #include <net/sock.h>
 33 #include <linux/filter.h>
 34 #include <linux/errno.h>
 35 #include <linux/file.h>
 36 #include <linux/kernel.h>
 37 #include <linux/net.h>
 38 #include <linux/skbuff.h>
 39 #include <linux/workqueue.h>
 40 #include <linux/list.h>
 41 #include <net/strparser.h>
 42 #include <net/tcp.h>
 43 
 44 struct bpf_stab {
 45         struct bpf_map map;
 46         struct sock **sock_map;
 47         struct bpf_prog *bpf_parse;
 48         struct bpf_prog *bpf_verdict;
 49 };
 50 
 51 enum smap_psock_state {
 52         SMAP_TX_RUNNING,
 53 };
 54 
 55 struct smap_psock_map_entry {
 56         struct list_head list;
 57         struct sock **entry;
 58 };
 59 
 60 struct smap_psock {
 61         struct rcu_head rcu;
 62         /* refcnt is used inside sk_callback_lock */
 63         u32 refcnt;
 64 
 65         /* datapath variables */
 66         struct sk_buff_head rxqueue;
 67         bool strp_enabled;
 68 
 69         /* datapath error path cache across tx work invocations */
 70         int save_rem;
 71         int save_off;
 72         struct sk_buff *save_skb;
 73 
 74         struct strparser strp;
 75         struct bpf_prog *bpf_parse;
 76         struct bpf_prog *bpf_verdict;
 77         struct list_head maps;
 78 
 79         /* Back reference used when sock callback trigger sockmap operations */
 80         struct sock *sock;
 81         unsigned long state;
 82 
 83         struct work_struct tx_work;
 84         struct work_struct gc_work;
 85 
 86         void (*save_data_ready)(struct sock *sk);
 87         void (*save_write_space)(struct sock *sk);
 88         void (*save_state_change)(struct sock *sk);
 89 };
 90 
 91 static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
 92 {
 93         return rcu_dereference_sk_user_data(sk);
 94 }
 95 
 96 /* compute the linear packet data range [data, data_end) for skb when
 97  * sk_skb type programs are in use.
 98  */
 99 static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
100 {
101         TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
102 }
103 
104 enum __sk_action {
105         __SK_DROP = 0,
106         __SK_PASS,
107         __SK_REDIRECT,
108 };
109 
110 static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
111 {
112         struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
113         int rc;
114 
115         if (unlikely(!prog))
116                 return __SK_DROP;
117 
118         skb_orphan(skb);
119         /* We need to ensure that BPF metadata for maps is also cleared
120          * when we orphan the skb so that we don't have the possibility
121          * to reference a stale map.
122          */
123         TCP_SKB_CB(skb)->bpf.map = NULL;
124         skb->sk = psock->sock;
125         bpf_compute_data_end_sk_skb(skb);
126         preempt_disable();
127         rc = (*prog->bpf_func)(skb, prog->insnsi);
128         preempt_enable();
129         skb->sk = NULL;
130 
131         /* Moving return codes from UAPI namespace into internal namespace */
132         return rc == SK_PASS ?
133                 (TCP_SKB_CB(skb)->bpf.map ? __SK_REDIRECT : __SK_PASS) :
134                 __SK_DROP;
135 }
136 
137 static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
138 {
139         struct sock *sk;
140         int rc;
141 
142         rc = smap_verdict_func(psock, skb);
143         switch (rc) {
144         case __SK_REDIRECT:
145                 sk = do_sk_redirect_map(skb);
146                 if (likely(sk)) {
147                         struct smap_psock *peer = smap_psock_sk(sk);
148 
149                         if (likely(peer &&
150                                    test_bit(SMAP_TX_RUNNING, &peer->state) &&
151                                    !sock_flag(sk, SOCK_DEAD) &&
152                                    sock_writeable(sk))) {
153                                 skb_set_owner_w(skb, sk);
154                                 skb_queue_tail(&peer->rxqueue, skb);
155                                 schedule_work(&peer->tx_work);
156                                 break;
157                         }
158                 }
159         /* Fall through and free skb otherwise */
160         case __SK_DROP:
161         default:
162                 kfree_skb(skb);
163         }
164 }
165 
166 static void smap_report_sk_error(struct smap_psock *psock, int err)
167 {
168         struct sock *sk = psock->sock;
169 
170         sk->sk_err = err;
171         sk->sk_error_report(sk);
172 }
173 
174 static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
175 
176 /* Called with lock_sock(sk) held */
177 static void smap_state_change(struct sock *sk)
178 {
179         struct smap_psock_map_entry *e, *tmp;
180         struct smap_psock *psock;
181         struct socket_wq *wq;
182         struct sock *osk;
183 
184         rcu_read_lock();
185 
186         /* Allowing transitions into an established syn_recv states allows
187          * for early binding sockets to a smap object before the connection
188          * is established.
189          */
190         switch (sk->sk_state) {
191         case TCP_SYN_SENT:
192         case TCP_SYN_RECV:
193         case TCP_ESTABLISHED:
194                 break;
195         case TCP_CLOSE_WAIT:
196         case TCP_CLOSING:
197         case TCP_LAST_ACK:
198         case TCP_FIN_WAIT1:
199         case TCP_FIN_WAIT2:
200         case TCP_LISTEN:
201                 break;
202         case TCP_CLOSE:
203                 /* Only release if the map entry is in fact the sock in
204                  * question. There is a case where the operator deletes
205                  * the sock from the map, but the TCP sock is closed before
206                  * the psock is detached. Use cmpxchg to verify correct
207                  * sock is removed.
208                  */
209                 psock = smap_psock_sk(sk);
210                 if (unlikely(!psock))
211                         break;
212                 write_lock_bh(&sk->sk_callback_lock);
213                 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
214                         osk = cmpxchg(e->entry, sk, NULL);
215                         if (osk == sk) {
216                                 list_del(&e->list);
217                                 smap_release_sock(psock, sk);
218                         }
219                 }
220                 write_unlock_bh(&sk->sk_callback_lock);
221                 break;
222         default:
223                 psock = smap_psock_sk(sk);
224                 if (unlikely(!psock))
225                         break;
226                 smap_report_sk_error(psock, EPIPE);
227                 break;
228         }
229 
230         wq = rcu_dereference(sk->sk_wq);
231         if (skwq_has_sleeper(wq))
232                 wake_up_interruptible_all(&wq->wait);
233         rcu_read_unlock();
234 }
235 
236 static void smap_read_sock_strparser(struct strparser *strp,
237                                      struct sk_buff *skb)
238 {
239         struct smap_psock *psock;
240 
241         rcu_read_lock();
242         psock = container_of(strp, struct smap_psock, strp);
243         smap_do_verdict(psock, skb);
244         rcu_read_unlock();
245 }
246 
247 /* Called with lock held on socket */
248 static void smap_data_ready(struct sock *sk)
249 {
250         struct smap_psock *psock;
251 
252         rcu_read_lock();
253         psock = smap_psock_sk(sk);
254         if (likely(psock)) {
255                 write_lock_bh(&sk->sk_callback_lock);
256                 strp_data_ready(&psock->strp);
257                 write_unlock_bh(&sk->sk_callback_lock);
258         }
259         rcu_read_unlock();
260 }
261 
262 static void smap_tx_work(struct work_struct *w)
263 {
264         struct smap_psock *psock;
265         struct sk_buff *skb;
266         int rem, off, n;
267 
268         psock = container_of(w, struct smap_psock, tx_work);
269 
270         /* lock sock to avoid losing sk_socket at some point during loop */
271         lock_sock(psock->sock);
272         if (psock->save_skb) {
273                 skb = psock->save_skb;
274                 rem = psock->save_rem;
275                 off = psock->save_off;
276                 psock->save_skb = NULL;
277                 goto start;
278         }
279 
280         while ((skb = skb_dequeue(&psock->rxqueue))) {
281                 rem = skb->len;
282                 off = 0;
283 start:
284                 do {
285                         if (likely(psock->sock->sk_socket))
286                                 n = skb_send_sock_locked(psock->sock,
287                                                          skb, off, rem);
288                         else
289                                 n = -EINVAL;
290                         if (n <= 0) {
291                                 if (n == -EAGAIN) {
292                                         /* Retry when space is available */
293                                         psock->save_skb = skb;
294                                         psock->save_rem = rem;
295                                         psock->save_off = off;
296                                         goto out;
297                                 }
298                                 /* Hard errors break pipe and stop xmit */
299                                 smap_report_sk_error(psock, n ? -n : EPIPE);
300                                 clear_bit(SMAP_TX_RUNNING, &psock->state);
301                                 kfree_skb(skb);
302                                 goto out;
303                         }
304                         rem -= n;
305                         off += n;
306                 } while (rem);
307                 kfree_skb(skb);
308         }
309 out:
310         release_sock(psock->sock);
311 }
312 
313 static void smap_write_space(struct sock *sk)
314 {
315         struct smap_psock *psock;
316 
317         rcu_read_lock();
318         psock = smap_psock_sk(sk);
319         if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
320                 schedule_work(&psock->tx_work);
321         rcu_read_unlock();
322 }
323 
324 static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
325 {
326         if (!psock->strp_enabled)
327                 return;
328         sk->sk_data_ready = psock->save_data_ready;
329         sk->sk_write_space = psock->save_write_space;
330         sk->sk_state_change = psock->save_state_change;
331         psock->save_data_ready = NULL;
332         psock->save_write_space = NULL;
333         psock->save_state_change = NULL;
334         strp_stop(&psock->strp);
335         psock->strp_enabled = false;
336 }
337 
338 static void smap_destroy_psock(struct rcu_head *rcu)
339 {
340         struct smap_psock *psock = container_of(rcu,
341                                                   struct smap_psock, rcu);
342 
343         /* Now that a grace period has passed there is no longer
344          * any reference to this sock in the sockmap so we can
345          * destroy the psock, strparser, and bpf programs. But,
346          * because we use workqueue sync operations we can not
347          * do it in rcu context
348          */
349         schedule_work(&psock->gc_work);
350 }
351 
352 static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
353 {
354         psock->refcnt--;
355         if (psock->refcnt)
356                 return;
357 
358         smap_stop_sock(psock, sock);
359         clear_bit(SMAP_TX_RUNNING, &psock->state);
360         rcu_assign_sk_user_data(sock, NULL);
361         call_rcu_sched(&psock->rcu, smap_destroy_psock);
362 }
363 
364 static int smap_parse_func_strparser(struct strparser *strp,
365                                        struct sk_buff *skb)
366 {
367         struct smap_psock *psock;
368         struct bpf_prog *prog;
369         int rc;
370 
371         rcu_read_lock();
372         psock = container_of(strp, struct smap_psock, strp);
373         prog = READ_ONCE(psock->bpf_parse);
374 
375         if (unlikely(!prog)) {
376                 rcu_read_unlock();
377                 return skb->len;
378         }
379 
380         /* Attach socket for bpf program to use if needed we can do this
381          * because strparser clones the skb before handing it to a upper
382          * layer, meaning skb_orphan has been called. We NULL sk on the
383          * way out to ensure we don't trigger a BUG_ON in skb/sk operations
384          * later and because we are not charging the memory of this skb to
385          * any socket yet.
386          */
387         skb->sk = psock->sock;
388         bpf_compute_data_end_sk_skb(skb);
389         rc = (*prog->bpf_func)(skb, prog->insnsi);
390         skb->sk = NULL;
391         rcu_read_unlock();
392         return rc;
393 }
394 
395 
396 static int smap_read_sock_done(struct strparser *strp, int err)
397 {
398         return err;
399 }
400 
401 static int smap_init_sock(struct smap_psock *psock,
402                           struct sock *sk)
403 {
404         static const struct strp_callbacks cb = {
405                 .rcv_msg = smap_read_sock_strparser,
406                 .parse_msg = smap_parse_func_strparser,
407                 .read_sock_done = smap_read_sock_done,
408         };
409 
410         return strp_init(&psock->strp, sk, &cb);
411 }
412 
413 static void smap_init_progs(struct smap_psock *psock,
414                             struct bpf_stab *stab,
415                             struct bpf_prog *verdict,
416                             struct bpf_prog *parse)
417 {
418         struct bpf_prog *orig_parse, *orig_verdict;
419 
420         orig_parse = xchg(&psock->bpf_parse, parse);
421         orig_verdict = xchg(&psock->bpf_verdict, verdict);
422 
423         if (orig_verdict)
424                 bpf_prog_put(orig_verdict);
425         if (orig_parse)
426                 bpf_prog_put(orig_parse);
427 }
428 
429 static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
430 {
431         if (sk->sk_data_ready == smap_data_ready)
432                 return;
433         psock->save_data_ready = sk->sk_data_ready;
434         psock->save_write_space = sk->sk_write_space;
435         psock->save_state_change = sk->sk_state_change;
436         sk->sk_data_ready = smap_data_ready;
437         sk->sk_write_space = smap_write_space;
438         sk->sk_state_change = smap_state_change;
439         psock->strp_enabled = true;
440 }
441 
442 static void sock_map_remove_complete(struct bpf_stab *stab)
443 {
444         bpf_map_area_free(stab->sock_map);
445         kfree(stab);
446 }
447 
448 static void smap_gc_work(struct work_struct *w)
449 {
450         struct smap_psock_map_entry *e, *tmp;
451         struct smap_psock *psock;
452 
453         psock = container_of(w, struct smap_psock, gc_work);
454 
455         /* no callback lock needed because we already detached sockmap ops */
456         if (psock->strp_enabled)
457                 strp_done(&psock->strp);
458 
459         cancel_work_sync(&psock->tx_work);
460         __skb_queue_purge(&psock->rxqueue);
461 
462         /* At this point all strparser and xmit work must be complete */
463         if (psock->bpf_parse)
464                 bpf_prog_put(psock->bpf_parse);
465         if (psock->bpf_verdict)
466                 bpf_prog_put(psock->bpf_verdict);
467 
468         list_for_each_entry_safe(e, tmp, &psock->maps, list) {
469                 list_del(&e->list);
470                 kfree(e);
471         }
472 
473         sock_put(psock->sock);
474         kfree(psock);
475 }
476 
477 static struct smap_psock *smap_init_psock(struct sock *sock,
478                                           struct bpf_stab *stab)
479 {
480         struct smap_psock *psock;
481 
482         psock = kzalloc_node(sizeof(struct smap_psock),
483                              GFP_ATOMIC | __GFP_NOWARN,
484                              stab->map.numa_node);
485         if (!psock)
486                 return ERR_PTR(-ENOMEM);
487 
488         psock->sock = sock;
489         skb_queue_head_init(&psock->rxqueue);
490         INIT_WORK(&psock->tx_work, smap_tx_work);
491         INIT_WORK(&psock->gc_work, smap_gc_work);
492         INIT_LIST_HEAD(&psock->maps);
493         psock->refcnt = 1;
494 
495         rcu_assign_sk_user_data(sock, psock);
496         sock_hold(sock);
497         return psock;
498 }
499 
500 static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
501 {
502         struct bpf_stab *stab;
503         int err = -EINVAL;
504         u64 cost;
505 
506         if (!capable(CAP_NET_ADMIN))
507                 return ERR_PTR(-EPERM);
508 
509         /* check sanity of attributes */
510         if (attr->max_entries == 0 || attr->key_size != 4 ||
511             attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
512                 return ERR_PTR(-EINVAL);
513 
514         if (attr->value_size > KMALLOC_MAX_SIZE)
515                 return ERR_PTR(-E2BIG);
516 
517         stab = kzalloc(sizeof(*stab), GFP_USER);
518         if (!stab)
519                 return ERR_PTR(-ENOMEM);
520 
521         /* mandatory map attributes */
522         stab->map.map_type = attr->map_type;
523         stab->map.key_size = attr->key_size;
524         stab->map.value_size = attr->value_size;
525         stab->map.max_entries = attr->max_entries;
526         stab->map.map_flags = attr->map_flags;
527         stab->map.numa_node = bpf_map_attr_numa_node(attr);
528 
529         /* make sure page count doesn't overflow */
530         cost = (u64) stab->map.max_entries * sizeof(struct sock *);
531         if (cost >= U32_MAX - PAGE_SIZE)
532                 goto free_stab;
533 
534         stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
535 
536         /* if map size is larger than memlock limit, reject it early */
537         err = bpf_map_precharge_memlock(stab->map.pages);
538         if (err)
539                 goto free_stab;
540 
541         err = -ENOMEM;
542         stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
543                                             sizeof(struct sock *),
544                                             stab->map.numa_node);
545         if (!stab->sock_map)
546                 goto free_stab;
547 
548         return &stab->map;
549 free_stab:
550         kfree(stab);
551         return ERR_PTR(err);
552 }
553 
554 static void smap_list_remove(struct smap_psock *psock, struct sock **entry)
555 {
556         struct smap_psock_map_entry *e, *tmp;
557 
558         list_for_each_entry_safe(e, tmp, &psock->maps, list) {
559                 if (e->entry == entry) {
560                         list_del(&e->list);
561                         break;
562                 }
563         }
564 }
565 
566 static void sock_map_free(struct bpf_map *map)
567 {
568         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
569         int i;
570 
571         synchronize_rcu();
572 
573         /* At this point no update, lookup or delete operations can happen.
574          * However, be aware we can still get a socket state event updates,
575          * and data ready callabacks that reference the psock from sk_user_data
576          * Also psock worker threads are still in-flight. So smap_release_sock
577          * will only free the psock after cancel_sync on the worker threads
578          * and a grace period expire to ensure psock is really safe to remove.
579          */
580         rcu_read_lock();
581         for (i = 0; i < stab->map.max_entries; i++) {
582                 struct smap_psock *psock;
583                 struct sock *sock;
584 
585                 sock = xchg(&stab->sock_map[i], NULL);
586                 if (!sock)
587                         continue;
588 
589                 write_lock_bh(&sock->sk_callback_lock);
590                 psock = smap_psock_sk(sock);
591                 smap_list_remove(psock, &stab->sock_map[i]);
592                 smap_release_sock(psock, sock);
593                 write_unlock_bh(&sock->sk_callback_lock);
594         }
595         rcu_read_unlock();
596 
597         if (stab->bpf_verdict)
598                 bpf_prog_put(stab->bpf_verdict);
599         if (stab->bpf_parse)
600                 bpf_prog_put(stab->bpf_parse);
601 
602         sock_map_remove_complete(stab);
603 }
604 
605 static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
606 {
607         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
608         u32 i = key ? *(u32 *)key : U32_MAX;
609         u32 *next = (u32 *)next_key;
610 
611         if (i >= stab->map.max_entries) {
612                 *next = 0;
613                 return 0;
614         }
615 
616         if (i == stab->map.max_entries - 1)
617                 return -ENOENT;
618 
619         *next = i + 1;
620         return 0;
621 }
622 
623 struct sock  *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
624 {
625         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
626 
627         if (key >= map->max_entries)
628                 return NULL;
629 
630         return READ_ONCE(stab->sock_map[key]);
631 }
632 
633 static int sock_map_delete_elem(struct bpf_map *map, void *key)
634 {
635         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
636         struct smap_psock *psock;
637         int k = *(u32 *)key;
638         struct sock *sock;
639 
640         if (k >= map->max_entries)
641                 return -EINVAL;
642 
643         sock = xchg(&stab->sock_map[k], NULL);
644         if (!sock)
645                 return -EINVAL;
646 
647         write_lock_bh(&sock->sk_callback_lock);
648         psock = smap_psock_sk(sock);
649         if (!psock)
650                 goto out;
651 
652         if (psock->bpf_parse)
653                 smap_stop_sock(psock, sock);
654         smap_list_remove(psock, &stab->sock_map[k]);
655         smap_release_sock(psock, sock);
656 out:
657         write_unlock_bh(&sock->sk_callback_lock);
658         return 0;
659 }
660 
661 /* Locking notes: Concurrent updates, deletes, and lookups are allowed and are
662  * done inside rcu critical sections. This ensures on updates that the psock
663  * will not be released via smap_release_sock() until concurrent updates/deletes
664  * complete. All operations operate on sock_map using cmpxchg and xchg
665  * operations to ensure we do not get stale references. Any reads into the
666  * map must be done with READ_ONCE() because of this.
667  *
668  * A psock is destroyed via call_rcu and after any worker threads are cancelled
669  * and syncd so we are certain all references from the update/lookup/delete
670  * operations as well as references in the data path are no longer in use.
671  *
672  * Psocks may exist in multiple maps, but only a single set of parse/verdict
673  * programs may be inherited from the maps it belongs to. A reference count
674  * is kept with the total number of references to the psock from all maps. The
675  * psock will not be released until this reaches zero. The psock and sock
676  * user data data use the sk_callback_lock to protect critical data structures
677  * from concurrent access. This allows us to avoid two updates from modifying
678  * the user data in sock and the lock is required anyways for modifying
679  * callbacks, we simply increase its scope slightly.
680  *
681  * Rules to follow,
682  *  - psock must always be read inside RCU critical section
683  *  - sk_user_data must only be modified inside sk_callback_lock and read
684  *    inside RCU critical section.
685  *  - psock->maps list must only be read & modified inside sk_callback_lock
686  *  - sock_map must use READ_ONCE and (cmp)xchg operations
687  *  - BPF verdict/parse programs must use READ_ONCE and xchg operations
688  */
689 static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
690                                     struct bpf_map *map,
691                                     void *key, u64 flags)
692 {
693         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
694         struct smap_psock_map_entry *e = NULL;
695         struct bpf_prog *verdict, *parse;
696         struct sock *osock, *sock;
697         struct smap_psock *psock;
698         u32 i = *(u32 *)key;
699         int err;
700 
701         if (unlikely(flags > BPF_EXIST))
702                 return -EINVAL;
703 
704         if (unlikely(i >= stab->map.max_entries))
705                 return -E2BIG;
706 
707         sock = READ_ONCE(stab->sock_map[i]);
708         if (flags == BPF_EXIST && !sock)
709                 return -ENOENT;
710         else if (flags == BPF_NOEXIST && sock)
711                 return -EEXIST;
712 
713         sock = skops->sk;
714 
715         /* 1. If sock map has BPF programs those will be inherited by the
716          * sock being added. If the sock is already attached to BPF programs
717          * this results in an error.
718          */
719         verdict = READ_ONCE(stab->bpf_verdict);
720         parse = READ_ONCE(stab->bpf_parse);
721 
722         if (parse && verdict) {
723                 /* bpf prog refcnt may be zero if a concurrent attach operation
724                  * removes the program after the above READ_ONCE() but before
725                  * we increment the refcnt. If this is the case abort with an
726                  * error.
727                  */
728                 verdict = bpf_prog_inc_not_zero(stab->bpf_verdict);
729                 if (IS_ERR(verdict))
730                         return PTR_ERR(verdict);
731 
732                 parse = bpf_prog_inc_not_zero(stab->bpf_parse);
733                 if (IS_ERR(parse)) {
734                         bpf_prog_put(verdict);
735                         return PTR_ERR(parse);
736                 }
737         }
738 
739         write_lock_bh(&sock->sk_callback_lock);
740         psock = smap_psock_sk(sock);
741 
742         /* 2. Do not allow inheriting programs if psock exists and has
743          * already inherited programs. This would create confusion on
744          * which parser/verdict program is running. If no psock exists
745          * create one. Inside sk_callback_lock to ensure concurrent create
746          * doesn't update user data.
747          */
748         if (psock) {
749                 if (READ_ONCE(psock->bpf_parse) && parse) {
750                         err = -EBUSY;
751                         goto out_progs;
752                 }
753                 psock->refcnt++;
754         } else {
755                 psock = smap_init_psock(sock, stab);
756                 if (IS_ERR(psock)) {
757                         err = PTR_ERR(psock);
758                         goto out_progs;
759                 }
760 
761                 set_bit(SMAP_TX_RUNNING, &psock->state);
762         }
763 
764         e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
765         if (!e) {
766                 err = -ENOMEM;
767                 goto out_progs;
768         }
769         e->entry = &stab->sock_map[i];
770 
771         /* 3. At this point we have a reference to a valid psock that is
772          * running. Attach any BPF programs needed.
773          */
774         if (parse && verdict && !psock->strp_enabled) {
775                 err = smap_init_sock(psock, sock);
776                 if (err)
777                         goto out_free;
778                 smap_init_progs(psock, stab, verdict, parse);
779                 smap_start_sock(psock, sock);
780         }
781 
782         /* 4. Place psock in sockmap for use and stop any programs on
783          * the old sock assuming its not the same sock we are replacing
784          * it with. Because we can only have a single set of programs if
785          * old_sock has a strp we can stop it.
786          */
787         list_add_tail(&e->list, &psock->maps);
788         write_unlock_bh(&sock->sk_callback_lock);
789 
790         osock = xchg(&stab->sock_map[i], sock);
791         if (osock) {
792                 struct smap_psock *opsock = smap_psock_sk(osock);
793 
794                 write_lock_bh(&osock->sk_callback_lock);
795                 if (osock != sock && parse)
796                         smap_stop_sock(opsock, osock);
797                 smap_list_remove(opsock, &stab->sock_map[i]);
798                 smap_release_sock(opsock, osock);
799                 write_unlock_bh(&osock->sk_callback_lock);
800         }
801         return 0;
802 out_free:
803         smap_release_sock(psock, sock);
804 out_progs:
805         if (verdict)
806                 bpf_prog_put(verdict);
807         if (parse)
808                 bpf_prog_put(parse);
809         write_unlock_bh(&sock->sk_callback_lock);
810         kfree(e);
811         return err;
812 }
813 
814 int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
815 {
816         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
817         struct bpf_prog *orig;
818 
819         if (unlikely(map->map_type != BPF_MAP_TYPE_SOCKMAP))
820                 return -EINVAL;
821 
822         switch (type) {
823         case BPF_SK_SKB_STREAM_PARSER:
824                 orig = xchg(&stab->bpf_parse, prog);
825                 break;
826         case BPF_SK_SKB_STREAM_VERDICT:
827                 orig = xchg(&stab->bpf_verdict, prog);
828                 break;
829         default:
830                 return -EOPNOTSUPP;
831         }
832 
833         if (orig)
834                 bpf_prog_put(orig);
835 
836         return 0;
837 }
838 
839 static void *sock_map_lookup(struct bpf_map *map, void *key)
840 {
841         return NULL;
842 }
843 
844 static int sock_map_update_elem(struct bpf_map *map,
845                                 void *key, void *value, u64 flags)
846 {
847         struct bpf_sock_ops_kern skops;
848         u32 fd = *(u32 *)value;
849         struct socket *socket;
850         int err;
851 
852         socket = sockfd_lookup(fd, &err);
853         if (!socket)
854                 return err;
855 
856         skops.sk = socket->sk;
857         if (!skops.sk) {
858                 fput(socket->file);
859                 return -EINVAL;
860         }
861 
862         if (skops.sk->sk_type != SOCK_STREAM ||
863             skops.sk->sk_protocol != IPPROTO_TCP) {
864                 fput(socket->file);
865                 return -EOPNOTSUPP;
866         }
867 
868         err = sock_map_ctx_update_elem(&skops, map, key, flags);
869         fput(socket->file);
870         return err;
871 }
872 
873 const struct bpf_map_ops sock_map_ops = {
874         .map_alloc = sock_map_alloc,
875         .map_free = sock_map_free,
876         .map_lookup_elem = sock_map_lookup,
877         .map_get_next_key = sock_map_get_next_key,
878         .map_update_elem = sock_map_update_elem,
879         .map_delete_elem = sock_map_delete_elem,
880 };
881 
882 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
883            struct bpf_map *, map, void *, key, u64, flags)
884 {
885         WARN_ON_ONCE(!rcu_read_lock_held());
886         return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
887 }
888 
889 const struct bpf_func_proto bpf_sock_map_update_proto = {
890         .func           = bpf_sock_map_update,
891         .gpl_only       = false,
892         .pkt_access     = true,
893         .ret_type       = RET_INTEGER,
894         .arg1_type      = ARG_PTR_TO_CTX,
895         .arg2_type      = ARG_CONST_MAP_PTR,
896         .arg3_type      = ARG_PTR_TO_MAP_KEY,
897         .arg4_type      = ARG_ANYTHING,
898 };
899 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp