~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/bpf/sockmap.c

Version: ~ [ linux-4.15-rc8 ] ~ [ linux-4.14.13 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.76 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.111 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.48 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.91 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.53 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.98 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
  2  *
  3  * This program is free software; you can redistribute it and/or
  4  * modify it under the terms of version 2 of the GNU General Public
  5  * License as published by the Free Software Foundation.
  6  *
  7  * This program is distributed in the hope that it will be useful, but
  8  * WITHOUT ANY WARRANTY; without even the implied warranty of
  9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 10  * General Public License for more details.
 11  */
 12 
 13 /* A BPF sock_map is used to store sock objects. This is primarly used
 14  * for doing socket redirect with BPF helper routines.
 15  *
 16  * A sock map may have BPF programs attached to it, currently a program
 17  * used to parse packets and a program to provide a verdict and redirect
 18  * decision on the packet are supported. Any programs attached to a sock
 19  * map are inherited by sock objects when they are added to the map. If
 20  * no BPF programs are attached the sock object may only be used for sock
 21  * redirect.
 22  *
 23  * A sock object may be in multiple maps, but can only inherit a single
 24  * parse or verdict program. If adding a sock object to a map would result
 25  * in having multiple parsing programs the update will return an EBUSY error.
 26  *
 27  * For reference this program is similar to devmap used in XDP context
 28  * reviewing these together may be useful. For an example please review
 29  * ./samples/bpf/sockmap/.
 30  */
 31 #include <linux/bpf.h>
 32 #include <net/sock.h>
 33 #include <linux/filter.h>
 34 #include <linux/errno.h>
 35 #include <linux/file.h>
 36 #include <linux/kernel.h>
 37 #include <linux/net.h>
 38 #include <linux/skbuff.h>
 39 #include <linux/workqueue.h>
 40 #include <linux/list.h>
 41 #include <net/strparser.h>
 42 #include <net/tcp.h>
 43 
 44 #define SOCK_CREATE_FLAG_MASK \
 45         (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
 46 
 47 struct bpf_stab {
 48         struct bpf_map map;
 49         struct sock **sock_map;
 50         struct bpf_prog *bpf_parse;
 51         struct bpf_prog *bpf_verdict;
 52 };
 53 
 54 enum smap_psock_state {
 55         SMAP_TX_RUNNING,
 56 };
 57 
 58 struct smap_psock_map_entry {
 59         struct list_head list;
 60         struct sock **entry;
 61 };
 62 
 63 struct smap_psock {
 64         struct rcu_head rcu;
 65         /* refcnt is used inside sk_callback_lock */
 66         u32 refcnt;
 67 
 68         /* datapath variables */
 69         struct sk_buff_head rxqueue;
 70         bool strp_enabled;
 71 
 72         /* datapath error path cache across tx work invocations */
 73         int save_rem;
 74         int save_off;
 75         struct sk_buff *save_skb;
 76 
 77         struct strparser strp;
 78         struct bpf_prog *bpf_parse;
 79         struct bpf_prog *bpf_verdict;
 80         struct list_head maps;
 81 
 82         /* Back reference used when sock callback trigger sockmap operations */
 83         struct sock *sock;
 84         unsigned long state;
 85 
 86         struct work_struct tx_work;
 87         struct work_struct gc_work;
 88 
 89         void (*save_data_ready)(struct sock *sk);
 90         void (*save_write_space)(struct sock *sk);
 91         void (*save_state_change)(struct sock *sk);
 92 };
 93 
 94 static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
 95 {
 96         return rcu_dereference_sk_user_data(sk);
 97 }
 98 
 99 /* compute the linear packet data range [data, data_end) for skb when
100  * sk_skb type programs are in use.
101  */
102 static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
103 {
104         TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
105 }
106 
107 enum __sk_action {
108         __SK_DROP = 0,
109         __SK_PASS,
110         __SK_REDIRECT,
111 };
112 
113 static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
114 {
115         struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
116         int rc;
117 
118         if (unlikely(!prog))
119                 return __SK_DROP;
120 
121         skb_orphan(skb);
122         /* We need to ensure that BPF metadata for maps is also cleared
123          * when we orphan the skb so that we don't have the possibility
124          * to reference a stale map.
125          */
126         TCP_SKB_CB(skb)->bpf.map = NULL;
127         skb->sk = psock->sock;
128         bpf_compute_data_pointers(skb);
129         preempt_disable();
130         rc = (*prog->bpf_func)(skb, prog->insnsi);
131         preempt_enable();
132         skb->sk = NULL;
133 
134         /* Moving return codes from UAPI namespace into internal namespace */
135         return rc == SK_PASS ?
136                 (TCP_SKB_CB(skb)->bpf.map ? __SK_REDIRECT : __SK_PASS) :
137                 __SK_DROP;
138 }
139 
140 static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
141 {
142         struct sock *sk;
143         int rc;
144 
145         rc = smap_verdict_func(psock, skb);
146         switch (rc) {
147         case __SK_REDIRECT:
148                 sk = do_sk_redirect_map(skb);
149                 if (likely(sk)) {
150                         struct smap_psock *peer = smap_psock_sk(sk);
151 
152                         if (likely(peer &&
153                                    test_bit(SMAP_TX_RUNNING, &peer->state) &&
154                                    !sock_flag(sk, SOCK_DEAD) &&
155                                    sock_writeable(sk))) {
156                                 skb_set_owner_w(skb, sk);
157                                 skb_queue_tail(&peer->rxqueue, skb);
158                                 schedule_work(&peer->tx_work);
159                                 break;
160                         }
161                 }
162         /* Fall through and free skb otherwise */
163         case __SK_DROP:
164         default:
165                 kfree_skb(skb);
166         }
167 }
168 
169 static void smap_report_sk_error(struct smap_psock *psock, int err)
170 {
171         struct sock *sk = psock->sock;
172 
173         sk->sk_err = err;
174         sk->sk_error_report(sk);
175 }
176 
177 static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
178 
179 /* Called with lock_sock(sk) held */
180 static void smap_state_change(struct sock *sk)
181 {
182         struct smap_psock_map_entry *e, *tmp;
183         struct smap_psock *psock;
184         struct socket_wq *wq;
185         struct sock *osk;
186 
187         rcu_read_lock();
188 
189         /* Allowing transitions into an established syn_recv states allows
190          * for early binding sockets to a smap object before the connection
191          * is established.
192          */
193         switch (sk->sk_state) {
194         case TCP_SYN_SENT:
195         case TCP_SYN_RECV:
196         case TCP_ESTABLISHED:
197                 break;
198         case TCP_CLOSE_WAIT:
199         case TCP_CLOSING:
200         case TCP_LAST_ACK:
201         case TCP_FIN_WAIT1:
202         case TCP_FIN_WAIT2:
203         case TCP_LISTEN:
204                 break;
205         case TCP_CLOSE:
206                 /* Only release if the map entry is in fact the sock in
207                  * question. There is a case where the operator deletes
208                  * the sock from the map, but the TCP sock is closed before
209                  * the psock is detached. Use cmpxchg to verify correct
210                  * sock is removed.
211                  */
212                 psock = smap_psock_sk(sk);
213                 if (unlikely(!psock))
214                         break;
215                 write_lock_bh(&sk->sk_callback_lock);
216                 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
217                         osk = cmpxchg(e->entry, sk, NULL);
218                         if (osk == sk) {
219                                 list_del(&e->list);
220                                 smap_release_sock(psock, sk);
221                         }
222                 }
223                 write_unlock_bh(&sk->sk_callback_lock);
224                 break;
225         default:
226                 psock = smap_psock_sk(sk);
227                 if (unlikely(!psock))
228                         break;
229                 smap_report_sk_error(psock, EPIPE);
230                 break;
231         }
232 
233         wq = rcu_dereference(sk->sk_wq);
234         if (skwq_has_sleeper(wq))
235                 wake_up_interruptible_all(&wq->wait);
236         rcu_read_unlock();
237 }
238 
239 static void smap_read_sock_strparser(struct strparser *strp,
240                                      struct sk_buff *skb)
241 {
242         struct smap_psock *psock;
243 
244         rcu_read_lock();
245         psock = container_of(strp, struct smap_psock, strp);
246         smap_do_verdict(psock, skb);
247         rcu_read_unlock();
248 }
249 
250 /* Called with lock held on socket */
251 static void smap_data_ready(struct sock *sk)
252 {
253         struct smap_psock *psock;
254 
255         rcu_read_lock();
256         psock = smap_psock_sk(sk);
257         if (likely(psock)) {
258                 write_lock_bh(&sk->sk_callback_lock);
259                 strp_data_ready(&psock->strp);
260                 write_unlock_bh(&sk->sk_callback_lock);
261         }
262         rcu_read_unlock();
263 }
264 
265 static void smap_tx_work(struct work_struct *w)
266 {
267         struct smap_psock *psock;
268         struct sk_buff *skb;
269         int rem, off, n;
270 
271         psock = container_of(w, struct smap_psock, tx_work);
272 
273         /* lock sock to avoid losing sk_socket at some point during loop */
274         lock_sock(psock->sock);
275         if (psock->save_skb) {
276                 skb = psock->save_skb;
277                 rem = psock->save_rem;
278                 off = psock->save_off;
279                 psock->save_skb = NULL;
280                 goto start;
281         }
282 
283         while ((skb = skb_dequeue(&psock->rxqueue))) {
284                 rem = skb->len;
285                 off = 0;
286 start:
287                 do {
288                         if (likely(psock->sock->sk_socket))
289                                 n = skb_send_sock_locked(psock->sock,
290                                                          skb, off, rem);
291                         else
292                                 n = -EINVAL;
293                         if (n <= 0) {
294                                 if (n == -EAGAIN) {
295                                         /* Retry when space is available */
296                                         psock->save_skb = skb;
297                                         psock->save_rem = rem;
298                                         psock->save_off = off;
299                                         goto out;
300                                 }
301                                 /* Hard errors break pipe and stop xmit */
302                                 smap_report_sk_error(psock, n ? -n : EPIPE);
303                                 clear_bit(SMAP_TX_RUNNING, &psock->state);
304                                 kfree_skb(skb);
305                                 goto out;
306                         }
307                         rem -= n;
308                         off += n;
309                 } while (rem);
310                 kfree_skb(skb);
311         }
312 out:
313         release_sock(psock->sock);
314 }
315 
316 static void smap_write_space(struct sock *sk)
317 {
318         struct smap_psock *psock;
319 
320         rcu_read_lock();
321         psock = smap_psock_sk(sk);
322         if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
323                 schedule_work(&psock->tx_work);
324         rcu_read_unlock();
325 }
326 
327 static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
328 {
329         if (!psock->strp_enabled)
330                 return;
331         sk->sk_data_ready = psock->save_data_ready;
332         sk->sk_write_space = psock->save_write_space;
333         sk->sk_state_change = psock->save_state_change;
334         psock->save_data_ready = NULL;
335         psock->save_write_space = NULL;
336         psock->save_state_change = NULL;
337         strp_stop(&psock->strp);
338         psock->strp_enabled = false;
339 }
340 
341 static void smap_destroy_psock(struct rcu_head *rcu)
342 {
343         struct smap_psock *psock = container_of(rcu,
344                                                   struct smap_psock, rcu);
345 
346         /* Now that a grace period has passed there is no longer
347          * any reference to this sock in the sockmap so we can
348          * destroy the psock, strparser, and bpf programs. But,
349          * because we use workqueue sync operations we can not
350          * do it in rcu context
351          */
352         schedule_work(&psock->gc_work);
353 }
354 
355 static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
356 {
357         psock->refcnt--;
358         if (psock->refcnt)
359                 return;
360 
361         smap_stop_sock(psock, sock);
362         clear_bit(SMAP_TX_RUNNING, &psock->state);
363         rcu_assign_sk_user_data(sock, NULL);
364         call_rcu_sched(&psock->rcu, smap_destroy_psock);
365 }
366 
367 static int smap_parse_func_strparser(struct strparser *strp,
368                                        struct sk_buff *skb)
369 {
370         struct smap_psock *psock;
371         struct bpf_prog *prog;
372         int rc;
373 
374         rcu_read_lock();
375         psock = container_of(strp, struct smap_psock, strp);
376         prog = READ_ONCE(psock->bpf_parse);
377 
378         if (unlikely(!prog)) {
379                 rcu_read_unlock();
380                 return skb->len;
381         }
382 
383         /* Attach socket for bpf program to use if needed we can do this
384          * because strparser clones the skb before handing it to a upper
385          * layer, meaning skb_orphan has been called. We NULL sk on the
386          * way out to ensure we don't trigger a BUG_ON in skb/sk operations
387          * later and because we are not charging the memory of this skb to
388          * any socket yet.
389          */
390         skb->sk = psock->sock;
391         bpf_compute_data_pointers(skb);
392         rc = (*prog->bpf_func)(skb, prog->insnsi);
393         skb->sk = NULL;
394         rcu_read_unlock();
395         return rc;
396 }
397 
398 
399 static int smap_read_sock_done(struct strparser *strp, int err)
400 {
401         return err;
402 }
403 
404 static int smap_init_sock(struct smap_psock *psock,
405                           struct sock *sk)
406 {
407         static const struct strp_callbacks cb = {
408                 .rcv_msg = smap_read_sock_strparser,
409                 .parse_msg = smap_parse_func_strparser,
410                 .read_sock_done = smap_read_sock_done,
411         };
412 
413         return strp_init(&psock->strp, sk, &cb);
414 }
415 
416 static void smap_init_progs(struct smap_psock *psock,
417                             struct bpf_stab *stab,
418                             struct bpf_prog *verdict,
419                             struct bpf_prog *parse)
420 {
421         struct bpf_prog *orig_parse, *orig_verdict;
422 
423         orig_parse = xchg(&psock->bpf_parse, parse);
424         orig_verdict = xchg(&psock->bpf_verdict, verdict);
425 
426         if (orig_verdict)
427                 bpf_prog_put(orig_verdict);
428         if (orig_parse)
429                 bpf_prog_put(orig_parse);
430 }
431 
432 static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
433 {
434         if (sk->sk_data_ready == smap_data_ready)
435                 return;
436         psock->save_data_ready = sk->sk_data_ready;
437         psock->save_write_space = sk->sk_write_space;
438         psock->save_state_change = sk->sk_state_change;
439         sk->sk_data_ready = smap_data_ready;
440         sk->sk_write_space = smap_write_space;
441         sk->sk_state_change = smap_state_change;
442         psock->strp_enabled = true;
443 }
444 
445 static void sock_map_remove_complete(struct bpf_stab *stab)
446 {
447         bpf_map_area_free(stab->sock_map);
448         kfree(stab);
449 }
450 
451 static void smap_gc_work(struct work_struct *w)
452 {
453         struct smap_psock_map_entry *e, *tmp;
454         struct smap_psock *psock;
455 
456         psock = container_of(w, struct smap_psock, gc_work);
457 
458         /* no callback lock needed because we already detached sockmap ops */
459         if (psock->strp_enabled)
460                 strp_done(&psock->strp);
461 
462         cancel_work_sync(&psock->tx_work);
463         __skb_queue_purge(&psock->rxqueue);
464 
465         /* At this point all strparser and xmit work must be complete */
466         if (psock->bpf_parse)
467                 bpf_prog_put(psock->bpf_parse);
468         if (psock->bpf_verdict)
469                 bpf_prog_put(psock->bpf_verdict);
470 
471         list_for_each_entry_safe(e, tmp, &psock->maps, list) {
472                 list_del(&e->list);
473                 kfree(e);
474         }
475 
476         sock_put(psock->sock);
477         kfree(psock);
478 }
479 
480 static struct smap_psock *smap_init_psock(struct sock *sock,
481                                           struct bpf_stab *stab)
482 {
483         struct smap_psock *psock;
484 
485         psock = kzalloc_node(sizeof(struct smap_psock),
486                              GFP_ATOMIC | __GFP_NOWARN,
487                              stab->map.numa_node);
488         if (!psock)
489                 return ERR_PTR(-ENOMEM);
490 
491         psock->sock = sock;
492         skb_queue_head_init(&psock->rxqueue);
493         INIT_WORK(&psock->tx_work, smap_tx_work);
494         INIT_WORK(&psock->gc_work, smap_gc_work);
495         INIT_LIST_HEAD(&psock->maps);
496         psock->refcnt = 1;
497 
498         rcu_assign_sk_user_data(sock, psock);
499         sock_hold(sock);
500         return psock;
501 }
502 
503 static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
504 {
505         struct bpf_stab *stab;
506         int err = -EINVAL;
507         u64 cost;
508 
509         if (!capable(CAP_NET_ADMIN))
510                 return ERR_PTR(-EPERM);
511 
512         /* check sanity of attributes */
513         if (attr->max_entries == 0 || attr->key_size != 4 ||
514             attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
515                 return ERR_PTR(-EINVAL);
516 
517         if (attr->value_size > KMALLOC_MAX_SIZE)
518                 return ERR_PTR(-E2BIG);
519 
520         stab = kzalloc(sizeof(*stab), GFP_USER);
521         if (!stab)
522                 return ERR_PTR(-ENOMEM);
523 
524         /* mandatory map attributes */
525         stab->map.map_type = attr->map_type;
526         stab->map.key_size = attr->key_size;
527         stab->map.value_size = attr->value_size;
528         stab->map.max_entries = attr->max_entries;
529         stab->map.map_flags = attr->map_flags;
530         stab->map.numa_node = bpf_map_attr_numa_node(attr);
531 
532         /* make sure page count doesn't overflow */
533         cost = (u64) stab->map.max_entries * sizeof(struct sock *);
534         if (cost >= U32_MAX - PAGE_SIZE)
535                 goto free_stab;
536 
537         stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
538 
539         /* if map size is larger than memlock limit, reject it early */
540         err = bpf_map_precharge_memlock(stab->map.pages);
541         if (err)
542                 goto free_stab;
543 
544         err = -ENOMEM;
545         stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
546                                             sizeof(struct sock *),
547                                             stab->map.numa_node);
548         if (!stab->sock_map)
549                 goto free_stab;
550 
551         return &stab->map;
552 free_stab:
553         kfree(stab);
554         return ERR_PTR(err);
555 }
556 
557 static void smap_list_remove(struct smap_psock *psock, struct sock **entry)
558 {
559         struct smap_psock_map_entry *e, *tmp;
560 
561         list_for_each_entry_safe(e, tmp, &psock->maps, list) {
562                 if (e->entry == entry) {
563                         list_del(&e->list);
564                         break;
565                 }
566         }
567 }
568 
569 static void sock_map_free(struct bpf_map *map)
570 {
571         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
572         int i;
573 
574         synchronize_rcu();
575 
576         /* At this point no update, lookup or delete operations can happen.
577          * However, be aware we can still get a socket state event updates,
578          * and data ready callabacks that reference the psock from sk_user_data
579          * Also psock worker threads are still in-flight. So smap_release_sock
580          * will only free the psock after cancel_sync on the worker threads
581          * and a grace period expire to ensure psock is really safe to remove.
582          */
583         rcu_read_lock();
584         for (i = 0; i < stab->map.max_entries; i++) {
585                 struct smap_psock *psock;
586                 struct sock *sock;
587 
588                 sock = xchg(&stab->sock_map[i], NULL);
589                 if (!sock)
590                         continue;
591 
592                 write_lock_bh(&sock->sk_callback_lock);
593                 psock = smap_psock_sk(sock);
594                 /* This check handles a racing sock event that can get the
595                  * sk_callback_lock before this case but after xchg happens
596                  * causing the refcnt to hit zero and sock user data (psock)
597                  * to be null and queued for garbage collection.
598                  */
599                 if (likely(psock)) {
600                         smap_list_remove(psock, &stab->sock_map[i]);
601                         smap_release_sock(psock, sock);
602                 }
603                 write_unlock_bh(&sock->sk_callback_lock);
604         }
605         rcu_read_unlock();
606 
607         if (stab->bpf_verdict)
608                 bpf_prog_put(stab->bpf_verdict);
609         if (stab->bpf_parse)
610                 bpf_prog_put(stab->bpf_parse);
611 
612         sock_map_remove_complete(stab);
613 }
614 
615 static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
616 {
617         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
618         u32 i = key ? *(u32 *)key : U32_MAX;
619         u32 *next = (u32 *)next_key;
620 
621         if (i >= stab->map.max_entries) {
622                 *next = 0;
623                 return 0;
624         }
625 
626         if (i == stab->map.max_entries - 1)
627                 return -ENOENT;
628 
629         *next = i + 1;
630         return 0;
631 }
632 
633 struct sock  *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
634 {
635         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
636 
637         if (key >= map->max_entries)
638                 return NULL;
639 
640         return READ_ONCE(stab->sock_map[key]);
641 }
642 
643 static int sock_map_delete_elem(struct bpf_map *map, void *key)
644 {
645         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
646         struct smap_psock *psock;
647         int k = *(u32 *)key;
648         struct sock *sock;
649 
650         if (k >= map->max_entries)
651                 return -EINVAL;
652 
653         sock = xchg(&stab->sock_map[k], NULL);
654         if (!sock)
655                 return -EINVAL;
656 
657         write_lock_bh(&sock->sk_callback_lock);
658         psock = smap_psock_sk(sock);
659         if (!psock)
660                 goto out;
661 
662         if (psock->bpf_parse)
663                 smap_stop_sock(psock, sock);
664         smap_list_remove(psock, &stab->sock_map[k]);
665         smap_release_sock(psock, sock);
666 out:
667         write_unlock_bh(&sock->sk_callback_lock);
668         return 0;
669 }
670 
671 /* Locking notes: Concurrent updates, deletes, and lookups are allowed and are
672  * done inside rcu critical sections. This ensures on updates that the psock
673  * will not be released via smap_release_sock() until concurrent updates/deletes
674  * complete. All operations operate on sock_map using cmpxchg and xchg
675  * operations to ensure we do not get stale references. Any reads into the
676  * map must be done with READ_ONCE() because of this.
677  *
678  * A psock is destroyed via call_rcu and after any worker threads are cancelled
679  * and syncd so we are certain all references from the update/lookup/delete
680  * operations as well as references in the data path are no longer in use.
681  *
682  * Psocks may exist in multiple maps, but only a single set of parse/verdict
683  * programs may be inherited from the maps it belongs to. A reference count
684  * is kept with the total number of references to the psock from all maps. The
685  * psock will not be released until this reaches zero. The psock and sock
686  * user data data use the sk_callback_lock to protect critical data structures
687  * from concurrent access. This allows us to avoid two updates from modifying
688  * the user data in sock and the lock is required anyways for modifying
689  * callbacks, we simply increase its scope slightly.
690  *
691  * Rules to follow,
692  *  - psock must always be read inside RCU critical section
693  *  - sk_user_data must only be modified inside sk_callback_lock and read
694  *    inside RCU critical section.
695  *  - psock->maps list must only be read & modified inside sk_callback_lock
696  *  - sock_map must use READ_ONCE and (cmp)xchg operations
697  *  - BPF verdict/parse programs must use READ_ONCE and xchg operations
698  */
699 static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
700                                     struct bpf_map *map,
701                                     void *key, u64 flags)
702 {
703         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
704         struct smap_psock_map_entry *e = NULL;
705         struct bpf_prog *verdict, *parse;
706         struct sock *osock, *sock;
707         struct smap_psock *psock;
708         u32 i = *(u32 *)key;
709         int err;
710 
711         if (unlikely(flags > BPF_EXIST))
712                 return -EINVAL;
713 
714         if (unlikely(i >= stab->map.max_entries))
715                 return -E2BIG;
716 
717         sock = READ_ONCE(stab->sock_map[i]);
718         if (flags == BPF_EXIST && !sock)
719                 return -ENOENT;
720         else if (flags == BPF_NOEXIST && sock)
721                 return -EEXIST;
722 
723         sock = skops->sk;
724 
725         /* 1. If sock map has BPF programs those will be inherited by the
726          * sock being added. If the sock is already attached to BPF programs
727          * this results in an error.
728          */
729         verdict = READ_ONCE(stab->bpf_verdict);
730         parse = READ_ONCE(stab->bpf_parse);
731 
732         if (parse && verdict) {
733                 /* bpf prog refcnt may be zero if a concurrent attach operation
734                  * removes the program after the above READ_ONCE() but before
735                  * we increment the refcnt. If this is the case abort with an
736                  * error.
737                  */
738                 verdict = bpf_prog_inc_not_zero(stab->bpf_verdict);
739                 if (IS_ERR(verdict))
740                         return PTR_ERR(verdict);
741 
742                 parse = bpf_prog_inc_not_zero(stab->bpf_parse);
743                 if (IS_ERR(parse)) {
744                         bpf_prog_put(verdict);
745                         return PTR_ERR(parse);
746                 }
747         }
748 
749         write_lock_bh(&sock->sk_callback_lock);
750         psock = smap_psock_sk(sock);
751 
752         /* 2. Do not allow inheriting programs if psock exists and has
753          * already inherited programs. This would create confusion on
754          * which parser/verdict program is running. If no psock exists
755          * create one. Inside sk_callback_lock to ensure concurrent create
756          * doesn't update user data.
757          */
758         if (psock) {
759                 if (READ_ONCE(psock->bpf_parse) && parse) {
760                         err = -EBUSY;
761                         goto out_progs;
762                 }
763                 psock->refcnt++;
764         } else {
765                 psock = smap_init_psock(sock, stab);
766                 if (IS_ERR(psock)) {
767                         err = PTR_ERR(psock);
768                         goto out_progs;
769                 }
770 
771                 set_bit(SMAP_TX_RUNNING, &psock->state);
772         }
773 
774         e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
775         if (!e) {
776                 err = -ENOMEM;
777                 goto out_progs;
778         }
779         e->entry = &stab->sock_map[i];
780 
781         /* 3. At this point we have a reference to a valid psock that is
782          * running. Attach any BPF programs needed.
783          */
784         if (parse && verdict && !psock->strp_enabled) {
785                 err = smap_init_sock(psock, sock);
786                 if (err)
787                         goto out_free;
788                 smap_init_progs(psock, stab, verdict, parse);
789                 smap_start_sock(psock, sock);
790         }
791 
792         /* 4. Place psock in sockmap for use and stop any programs on
793          * the old sock assuming its not the same sock we are replacing
794          * it with. Because we can only have a single set of programs if
795          * old_sock has a strp we can stop it.
796          */
797         list_add_tail(&e->list, &psock->maps);
798         write_unlock_bh(&sock->sk_callback_lock);
799 
800         osock = xchg(&stab->sock_map[i], sock);
801         if (osock) {
802                 struct smap_psock *opsock = smap_psock_sk(osock);
803 
804                 write_lock_bh(&osock->sk_callback_lock);
805                 if (osock != sock && parse)
806                         smap_stop_sock(opsock, osock);
807                 smap_list_remove(opsock, &stab->sock_map[i]);
808                 smap_release_sock(opsock, osock);
809                 write_unlock_bh(&osock->sk_callback_lock);
810         }
811         return 0;
812 out_free:
813         smap_release_sock(psock, sock);
814 out_progs:
815         if (verdict)
816                 bpf_prog_put(verdict);
817         if (parse)
818                 bpf_prog_put(parse);
819         write_unlock_bh(&sock->sk_callback_lock);
820         kfree(e);
821         return err;
822 }
823 
824 int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
825 {
826         struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
827         struct bpf_prog *orig;
828 
829         if (unlikely(map->map_type != BPF_MAP_TYPE_SOCKMAP))
830                 return -EINVAL;
831 
832         switch (type) {
833         case BPF_SK_SKB_STREAM_PARSER:
834                 orig = xchg(&stab->bpf_parse, prog);
835                 break;
836         case BPF_SK_SKB_STREAM_VERDICT:
837                 orig = xchg(&stab->bpf_verdict, prog);
838                 break;
839         default:
840                 return -EOPNOTSUPP;
841         }
842 
843         if (orig)
844                 bpf_prog_put(orig);
845 
846         return 0;
847 }
848 
849 static void *sock_map_lookup(struct bpf_map *map, void *key)
850 {
851         return NULL;
852 }
853 
854 static int sock_map_update_elem(struct bpf_map *map,
855                                 void *key, void *value, u64 flags)
856 {
857         struct bpf_sock_ops_kern skops;
858         u32 fd = *(u32 *)value;
859         struct socket *socket;
860         int err;
861 
862         socket = sockfd_lookup(fd, &err);
863         if (!socket)
864                 return err;
865 
866         skops.sk = socket->sk;
867         if (!skops.sk) {
868                 fput(socket->file);
869                 return -EINVAL;
870         }
871 
872         if (skops.sk->sk_type != SOCK_STREAM ||
873             skops.sk->sk_protocol != IPPROTO_TCP) {
874                 fput(socket->file);
875                 return -EOPNOTSUPP;
876         }
877 
878         err = sock_map_ctx_update_elem(&skops, map, key, flags);
879         fput(socket->file);
880         return err;
881 }
882 
883 const struct bpf_map_ops sock_map_ops = {
884         .map_alloc = sock_map_alloc,
885         .map_free = sock_map_free,
886         .map_lookup_elem = sock_map_lookup,
887         .map_get_next_key = sock_map_get_next_key,
888         .map_update_elem = sock_map_update_elem,
889         .map_delete_elem = sock_map_delete_elem,
890 };
891 
892 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
893            struct bpf_map *, map, void *, key, u64, flags)
894 {
895         WARN_ON_ONCE(!rcu_read_lock_held());
896         return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
897 }
898 
899 const struct bpf_func_proto bpf_sock_map_update_proto = {
900         .func           = bpf_sock_map_update,
901         .gpl_only       = false,
902         .pkt_access     = true,
903         .ret_type       = RET_INTEGER,
904         .arg1_type      = ARG_PTR_TO_CTX,
905         .arg2_type      = ARG_CONST_MAP_PTR,
906         .arg3_type      = ARG_PTR_TO_MAP_KEY,
907         .arg4_type      = ARG_ANYTHING,
908 };
909 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp