~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/netlink/af_netlink.c

Version: ~ [ linux-5.19-rc3 ] ~ [ linux-5.18.5 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.48 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.123 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.199 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.248 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.284 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.319 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /*
  3  * NETLINK      Kernel-user communication protocol.
  4  *
  5  *              Authors:        Alan Cox <alan@lxorguk.ukuu.org.uk>
  6  *                              Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
  7  *                              Patrick McHardy <kaber@trash.net>
  8  *
  9  * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
 10  *                               added netlink_proto_exit
 11  * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
 12  *                               use nlk_sk, as sk->protinfo is on a diet 8)
 13  * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
 14  *                               - inc module use count of module that owns
 15  *                                 the kernel socket in case userspace opens
 16  *                                 socket of same protocol
 17  *                               - remove all module support, since netlink is
 18  *                                 mandatory if CONFIG_NET=y these days
 19  */
 20 
 21 #include <linux/module.h>
 22 
 23 #include <linux/capability.h>
 24 #include <linux/kernel.h>
 25 #include <linux/init.h>
 26 #include <linux/signal.h>
 27 #include <linux/sched.h>
 28 #include <linux/errno.h>
 29 #include <linux/string.h>
 30 #include <linux/stat.h>
 31 #include <linux/socket.h>
 32 #include <linux/un.h>
 33 #include <linux/fcntl.h>
 34 #include <linux/termios.h>
 35 #include <linux/sockios.h>
 36 #include <linux/net.h>
 37 #include <linux/fs.h>
 38 #include <linux/slab.h>
 39 #include <linux/uaccess.h>
 40 #include <linux/skbuff.h>
 41 #include <linux/netdevice.h>
 42 #include <linux/rtnetlink.h>
 43 #include <linux/proc_fs.h>
 44 #include <linux/seq_file.h>
 45 #include <linux/notifier.h>
 46 #include <linux/security.h>
 47 #include <linux/jhash.h>
 48 #include <linux/jiffies.h>
 49 #include <linux/random.h>
 50 #include <linux/bitops.h>
 51 #include <linux/mm.h>
 52 #include <linux/types.h>
 53 #include <linux/audit.h>
 54 #include <linux/mutex.h>
 55 #include <linux/vmalloc.h>
 56 #include <linux/if_arp.h>
 57 #include <linux/rhashtable.h>
 58 #include <asm/cacheflush.h>
 59 #include <linux/hash.h>
 60 #include <linux/genetlink.h>
 61 #include <linux/net_namespace.h>
 62 #include <linux/nospec.h>
 63 #include <linux/btf_ids.h>
 64 
 65 #include <net/net_namespace.h>
 66 #include <net/netns/generic.h>
 67 #include <net/sock.h>
 68 #include <net/scm.h>
 69 #include <net/netlink.h>
 70 #define CREATE_TRACE_POINTS
 71 #include <trace/events/netlink.h>
 72 
 73 #include "af_netlink.h"
 74 
 75 struct listeners {
 76         struct rcu_head         rcu;
 77         unsigned long           masks[];
 78 };
 79 
 80 /* state bits */
 81 #define NETLINK_S_CONGESTED             0x0
 82 
 83 static inline int netlink_is_kernel(struct sock *sk)
 84 {
 85         return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
 86 }
 87 
 88 struct netlink_table *nl_table __read_mostly;
 89 EXPORT_SYMBOL_GPL(nl_table);
 90 
 91 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
 92 
 93 static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];
 94 
 95 static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
 96         "nlk_cb_mutex-ROUTE",
 97         "nlk_cb_mutex-1",
 98         "nlk_cb_mutex-USERSOCK",
 99         "nlk_cb_mutex-FIREWALL",
100         "nlk_cb_mutex-SOCK_DIAG",
101         "nlk_cb_mutex-NFLOG",
102         "nlk_cb_mutex-XFRM",
103         "nlk_cb_mutex-SELINUX",
104         "nlk_cb_mutex-ISCSI",
105         "nlk_cb_mutex-AUDIT",
106         "nlk_cb_mutex-FIB_LOOKUP",
107         "nlk_cb_mutex-CONNECTOR",
108         "nlk_cb_mutex-NETFILTER",
109         "nlk_cb_mutex-IP6_FW",
110         "nlk_cb_mutex-DNRTMSG",
111         "nlk_cb_mutex-KOBJECT_UEVENT",
112         "nlk_cb_mutex-GENERIC",
113         "nlk_cb_mutex-17",
114         "nlk_cb_mutex-SCSITRANSPORT",
115         "nlk_cb_mutex-ECRYPTFS",
116         "nlk_cb_mutex-RDMA",
117         "nlk_cb_mutex-CRYPTO",
118         "nlk_cb_mutex-SMC",
119         "nlk_cb_mutex-23",
120         "nlk_cb_mutex-24",
121         "nlk_cb_mutex-25",
122         "nlk_cb_mutex-26",
123         "nlk_cb_mutex-27",
124         "nlk_cb_mutex-28",
125         "nlk_cb_mutex-29",
126         "nlk_cb_mutex-30",
127         "nlk_cb_mutex-31",
128         "nlk_cb_mutex-MAX_LINKS"
129 };
130 
131 static int netlink_dump(struct sock *sk);
132 
133 /* nl_table locking explained:
134  * Lookup and traversal are protected with an RCU read-side lock. Insertion
135  * and removal are protected with per bucket lock while using RCU list
136  * modification primitives and may run in parallel to RCU protected lookups.
137  * Destruction of the Netlink socket may only occur *after* nl_table_lock has
138  * been acquired * either during or after the socket has been removed from
139  * the list and after an RCU grace period.
140  */
141 DEFINE_RWLOCK(nl_table_lock);
142 EXPORT_SYMBOL_GPL(nl_table_lock);
143 static atomic_t nl_table_users = ATOMIC_INIT(0);
144 
145 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
146 
147 static BLOCKING_NOTIFIER_HEAD(netlink_chain);
148 
149 
150 static const struct rhashtable_params netlink_rhashtable_params;
151 
152 void do_trace_netlink_extack(const char *msg)
153 {
154         trace_netlink_extack(msg);
155 }
156 EXPORT_SYMBOL(do_trace_netlink_extack);
157 
158 static inline u32 netlink_group_mask(u32 group)
159 {
160         return group ? 1 << (group - 1) : 0;
161 }
162 
163 static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
164                                            gfp_t gfp_mask)
165 {
166         unsigned int len = skb_end_offset(skb);
167         struct sk_buff *new;
168 
169         new = alloc_skb(len, gfp_mask);
170         if (new == NULL)
171                 return NULL;
172 
173         NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
174         NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
175         NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
176 
177         skb_put_data(new, skb->data, len);
178         return new;
179 }
180 
181 static unsigned int netlink_tap_net_id;
182 
183 struct netlink_tap_net {
184         struct list_head netlink_tap_all;
185         struct mutex netlink_tap_lock;
186 };
187 
188 int netlink_add_tap(struct netlink_tap *nt)
189 {
190         struct net *net = dev_net(nt->dev);
191         struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
192 
193         if (unlikely(nt->dev->type != ARPHRD_NETLINK))
194                 return -EINVAL;
195 
196         mutex_lock(&nn->netlink_tap_lock);
197         list_add_rcu(&nt->list, &nn->netlink_tap_all);
198         mutex_unlock(&nn->netlink_tap_lock);
199 
200         __module_get(nt->module);
201 
202         return 0;
203 }
204 EXPORT_SYMBOL_GPL(netlink_add_tap);
205 
206 static int __netlink_remove_tap(struct netlink_tap *nt)
207 {
208         struct net *net = dev_net(nt->dev);
209         struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
210         bool found = false;
211         struct netlink_tap *tmp;
212 
213         mutex_lock(&nn->netlink_tap_lock);
214 
215         list_for_each_entry(tmp, &nn->netlink_tap_all, list) {
216                 if (nt == tmp) {
217                         list_del_rcu(&nt->list);
218                         found = true;
219                         goto out;
220                 }
221         }
222 
223         pr_warn("__netlink_remove_tap: %p not found\n", nt);
224 out:
225         mutex_unlock(&nn->netlink_tap_lock);
226 
227         if (found)
228                 module_put(nt->module);
229 
230         return found ? 0 : -ENODEV;
231 }
232 
233 int netlink_remove_tap(struct netlink_tap *nt)
234 {
235         int ret;
236 
237         ret = __netlink_remove_tap(nt);
238         synchronize_net();
239 
240         return ret;
241 }
242 EXPORT_SYMBOL_GPL(netlink_remove_tap);
243 
244 static __net_init int netlink_tap_init_net(struct net *net)
245 {
246         struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
247 
248         INIT_LIST_HEAD(&nn->netlink_tap_all);
249         mutex_init(&nn->netlink_tap_lock);
250         return 0;
251 }
252 
253 static struct pernet_operations netlink_tap_net_ops = {
254         .init = netlink_tap_init_net,
255         .id   = &netlink_tap_net_id,
256         .size = sizeof(struct netlink_tap_net),
257 };
258 
259 static bool netlink_filter_tap(const struct sk_buff *skb)
260 {
261         struct sock *sk = skb->sk;
262 
263         /* We take the more conservative approach and
264          * whitelist socket protocols that may pass.
265          */
266         switch (sk->sk_protocol) {
267         case NETLINK_ROUTE:
268         case NETLINK_USERSOCK:
269         case NETLINK_SOCK_DIAG:
270         case NETLINK_NFLOG:
271         case NETLINK_XFRM:
272         case NETLINK_FIB_LOOKUP:
273         case NETLINK_NETFILTER:
274         case NETLINK_GENERIC:
275                 return true;
276         }
277 
278         return false;
279 }
280 
281 static int __netlink_deliver_tap_skb(struct sk_buff *skb,
282                                      struct net_device *dev)
283 {
284         struct sk_buff *nskb;
285         struct sock *sk = skb->sk;
286         int ret = -ENOMEM;
287 
288         if (!net_eq(dev_net(dev), sock_net(sk)))
289                 return 0;
290 
291         dev_hold(dev);
292 
293         if (is_vmalloc_addr(skb->head))
294                 nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
295         else
296                 nskb = skb_clone(skb, GFP_ATOMIC);
297         if (nskb) {
298                 nskb->dev = dev;
299                 nskb->protocol = htons((u16) sk->sk_protocol);
300                 nskb->pkt_type = netlink_is_kernel(sk) ?
301                                  PACKET_KERNEL : PACKET_USER;
302                 skb_reset_network_header(nskb);
303                 ret = dev_queue_xmit(nskb);
304                 if (unlikely(ret > 0))
305                         ret = net_xmit_errno(ret);
306         }
307 
308         dev_put(dev);
309         return ret;
310 }
311 
312 static void __netlink_deliver_tap(struct sk_buff *skb, struct netlink_tap_net *nn)
313 {
314         int ret;
315         struct netlink_tap *tmp;
316 
317         if (!netlink_filter_tap(skb))
318                 return;
319 
320         list_for_each_entry_rcu(tmp, &nn->netlink_tap_all, list) {
321                 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
322                 if (unlikely(ret))
323                         break;
324         }
325 }
326 
327 static void netlink_deliver_tap(struct net *net, struct sk_buff *skb)
328 {
329         struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
330 
331         rcu_read_lock();
332 
333         if (unlikely(!list_empty(&nn->netlink_tap_all)))
334                 __netlink_deliver_tap(skb, nn);
335 
336         rcu_read_unlock();
337 }
338 
339 static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
340                                        struct sk_buff *skb)
341 {
342         if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
343                 netlink_deliver_tap(sock_net(dst), skb);
344 }
345 
346 static void netlink_overrun(struct sock *sk)
347 {
348         struct netlink_sock *nlk = nlk_sk(sk);
349 
350         if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
351                 if (!test_and_set_bit(NETLINK_S_CONGESTED,
352                                       &nlk_sk(sk)->state)) {
353                         sk->sk_err = ENOBUFS;
354                         sk_error_report(sk);
355                 }
356         }
357         atomic_inc(&sk->sk_drops);
358 }
359 
360 static void netlink_rcv_wake(struct sock *sk)
361 {
362         struct netlink_sock *nlk = nlk_sk(sk);
363 
364         if (skb_queue_empty_lockless(&sk->sk_receive_queue))
365                 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
366         if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
367                 wake_up_interruptible(&nlk->wait);
368 }
369 
370 static void netlink_skb_destructor(struct sk_buff *skb)
371 {
372         if (is_vmalloc_addr(skb->head)) {
373                 if (!skb->cloned ||
374                     !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
375                         vfree(skb->head);
376 
377                 skb->head = NULL;
378         }
379         if (skb->sk != NULL)
380                 sock_rfree(skb);
381 }
382 
383 static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
384 {
385         WARN_ON(skb->sk != NULL);
386         skb->sk = sk;
387         skb->destructor = netlink_skb_destructor;
388         atomic_add(skb->truesize, &sk->sk_rmem_alloc);
389         sk_mem_charge(sk, skb->truesize);
390 }
391 
392 static void netlink_sock_destruct(struct sock *sk)
393 {
394         struct netlink_sock *nlk = nlk_sk(sk);
395 
396         if (nlk->cb_running) {
397                 if (nlk->cb.done)
398                         nlk->cb.done(&nlk->cb);
399                 module_put(nlk->cb.module);
400                 kfree_skb(nlk->cb.skb);
401         }
402 
403         skb_queue_purge(&sk->sk_receive_queue);
404 
405         if (!sock_flag(sk, SOCK_DEAD)) {
406                 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
407                 return;
408         }
409 
410         WARN_ON(atomic_read(&sk->sk_rmem_alloc));
411         WARN_ON(refcount_read(&sk->sk_wmem_alloc));
412         WARN_ON(nlk_sk(sk)->groups);
413 }
414 
415 static void netlink_sock_destruct_work(struct work_struct *work)
416 {
417         struct netlink_sock *nlk = container_of(work, struct netlink_sock,
418                                                 work);
419 
420         sk_free(&nlk->sk);
421 }
422 
423 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
424  * SMP. Look, when several writers sleep and reader wakes them up, all but one
425  * immediately hit write lock and grab all the cpus. Exclusive sleep solves
426  * this, _but_ remember, it adds useless work on UP machines.
427  */
428 
429 void netlink_table_grab(void)
430         __acquires(nl_table_lock)
431 {
432         might_sleep();
433 
434         write_lock_irq(&nl_table_lock);
435 
436         if (atomic_read(&nl_table_users)) {
437                 DECLARE_WAITQUEUE(wait, current);
438 
439                 add_wait_queue_exclusive(&nl_table_wait, &wait);
440                 for (;;) {
441                         set_current_state(TASK_UNINTERRUPTIBLE);
442                         if (atomic_read(&nl_table_users) == 0)
443                                 break;
444                         write_unlock_irq(&nl_table_lock);
445                         schedule();
446                         write_lock_irq(&nl_table_lock);
447                 }
448 
449                 __set_current_state(TASK_RUNNING);
450                 remove_wait_queue(&nl_table_wait, &wait);
451         }
452 }
453 
454 void netlink_table_ungrab(void)
455         __releases(nl_table_lock)
456 {
457         write_unlock_irq(&nl_table_lock);
458         wake_up(&nl_table_wait);
459 }
460 
461 static inline void
462 netlink_lock_table(void)
463 {
464         unsigned long flags;
465 
466         /* read_lock() synchronizes us to netlink_table_grab */
467 
468         read_lock_irqsave(&nl_table_lock, flags);
469         atomic_inc(&nl_table_users);
470         read_unlock_irqrestore(&nl_table_lock, flags);
471 }
472 
473 static inline void
474 netlink_unlock_table(void)
475 {
476         if (atomic_dec_and_test(&nl_table_users))
477                 wake_up(&nl_table_wait);
478 }
479 
480 struct netlink_compare_arg
481 {
482         possible_net_t pnet;
483         u32 portid;
484 };
485 
486 /* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
487 #define netlink_compare_arg_len \
488         (offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
489 
490 static inline int netlink_compare(struct rhashtable_compare_arg *arg,
491                                   const void *ptr)
492 {
493         const struct netlink_compare_arg *x = arg->key;
494         const struct netlink_sock *nlk = ptr;
495 
496         return nlk->portid != x->portid ||
497                !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
498 }
499 
500 static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
501                                      struct net *net, u32 portid)
502 {
503         memset(arg, 0, sizeof(*arg));
504         write_pnet(&arg->pnet, net);
505         arg->portid = portid;
506 }
507 
508 static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
509                                      struct net *net)
510 {
511         struct netlink_compare_arg arg;
512 
513         netlink_compare_arg_init(&arg, net, portid);
514         return rhashtable_lookup_fast(&table->hash, &arg,
515                                       netlink_rhashtable_params);
516 }
517 
518 static int __netlink_insert(struct netlink_table *table, struct sock *sk)
519 {
520         struct netlink_compare_arg arg;
521 
522         netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
523         return rhashtable_lookup_insert_key(&table->hash, &arg,
524                                             &nlk_sk(sk)->node,
525                                             netlink_rhashtable_params);
526 }
527 
528 static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
529 {
530         struct netlink_table *table = &nl_table[protocol];
531         struct sock *sk;
532 
533         rcu_read_lock();
534         sk = __netlink_lookup(table, portid, net);
535         if (sk)
536                 sock_hold(sk);
537         rcu_read_unlock();
538 
539         return sk;
540 }
541 
542 static const struct proto_ops netlink_ops;
543 
544 static void
545 netlink_update_listeners(struct sock *sk)
546 {
547         struct netlink_table *tbl = &nl_table[sk->sk_protocol];
548         unsigned long mask;
549         unsigned int i;
550         struct listeners *listeners;
551 
552         listeners = nl_deref_protected(tbl->listeners);
553         if (!listeners)
554                 return;
555 
556         for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
557                 mask = 0;
558                 sk_for_each_bound(sk, &tbl->mc_list) {
559                         if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
560                                 mask |= nlk_sk(sk)->groups[i];
561                 }
562                 listeners->masks[i] = mask;
563         }
564         /* this function is only called with the netlink table "grabbed", which
565          * makes sure updates are visible before bind or setsockopt return. */
566 }
567 
568 static int netlink_insert(struct sock *sk, u32 portid)
569 {
570         struct netlink_table *table = &nl_table[sk->sk_protocol];
571         int err;
572 
573         lock_sock(sk);
574 
575         err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY;
576         if (nlk_sk(sk)->bound)
577                 goto err;
578 
579         nlk_sk(sk)->portid = portid;
580         sock_hold(sk);
581 
582         err = __netlink_insert(table, sk);
583         if (err) {
584                 /* In case the hashtable backend returns with -EBUSY
585                  * from here, it must not escape to the caller.
586                  */
587                 if (unlikely(err == -EBUSY))
588                         err = -EOVERFLOW;
589                 if (err == -EEXIST)
590                         err = -EADDRINUSE;
591                 sock_put(sk);
592                 goto err;
593         }
594 
595         /* We need to ensure that the socket is hashed and visible. */
596         smp_wmb();
597         /* Paired with lockless reads from netlink_bind(),
598          * netlink_connect() and netlink_sendmsg().
599          */
600         WRITE_ONCE(nlk_sk(sk)->bound, portid);
601 
602 err:
603         release_sock(sk);
604         return err;
605 }
606 
607 static void netlink_remove(struct sock *sk)
608 {
609         struct netlink_table *table;
610 
611         table = &nl_table[sk->sk_protocol];
612         if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
613                                     netlink_rhashtable_params)) {
614                 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
615                 __sock_put(sk);
616         }
617 
618         netlink_table_grab();
619         if (nlk_sk(sk)->subscriptions) {
620                 __sk_del_bind_node(sk);
621                 netlink_update_listeners(sk);
622         }
623         if (sk->sk_protocol == NETLINK_GENERIC)
624                 atomic_inc(&genl_sk_destructing_cnt);
625         netlink_table_ungrab();
626 }
627 
628 static struct proto netlink_proto = {
629         .name     = "NETLINK",
630         .owner    = THIS_MODULE,
631         .obj_size = sizeof(struct netlink_sock),
632 };
633 
634 static int __netlink_create(struct net *net, struct socket *sock,
635                             struct mutex *cb_mutex, int protocol,
636                             int kern)
637 {
638         struct sock *sk;
639         struct netlink_sock *nlk;
640 
641         sock->ops = &netlink_ops;
642 
643         sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern);
644         if (!sk)
645                 return -ENOMEM;
646 
647         sock_init_data(sock, sk);
648 
649         nlk = nlk_sk(sk);
650         if (cb_mutex) {
651                 nlk->cb_mutex = cb_mutex;
652         } else {
653                 nlk->cb_mutex = &nlk->cb_def_mutex;
654                 mutex_init(nlk->cb_mutex);
655                 lockdep_set_class_and_name(nlk->cb_mutex,
656                                            nlk_cb_mutex_keys + protocol,
657                                            nlk_cb_mutex_key_strings[protocol]);
658         }
659         init_waitqueue_head(&nlk->wait);
660 
661         sk->sk_destruct = netlink_sock_destruct;
662         sk->sk_protocol = protocol;
663         return 0;
664 }
665 
666 static int netlink_create(struct net *net, struct socket *sock, int protocol,
667                           int kern)
668 {
669         struct module *module = NULL;
670         struct mutex *cb_mutex;
671         struct netlink_sock *nlk;
672         int (*bind)(struct net *net, int group);
673         void (*unbind)(struct net *net, int group);
674         int err = 0;
675 
676         sock->state = SS_UNCONNECTED;
677 
678         if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
679                 return -ESOCKTNOSUPPORT;
680 
681         if (protocol < 0 || protocol >= MAX_LINKS)
682                 return -EPROTONOSUPPORT;
683         protocol = array_index_nospec(protocol, MAX_LINKS);
684 
685         netlink_lock_table();
686 #ifdef CONFIG_MODULES
687         if (!nl_table[protocol].registered) {
688                 netlink_unlock_table();
689                 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
690                 netlink_lock_table();
691         }
692 #endif
693         if (nl_table[protocol].registered &&
694             try_module_get(nl_table[protocol].module))
695                 module = nl_table[protocol].module;
696         else
697                 err = -EPROTONOSUPPORT;
698         cb_mutex = nl_table[protocol].cb_mutex;
699         bind = nl_table[protocol].bind;
700         unbind = nl_table[protocol].unbind;
701         netlink_unlock_table();
702 
703         if (err < 0)
704                 goto out;
705 
706         err = __netlink_create(net, sock, cb_mutex, protocol, kern);
707         if (err < 0)
708                 goto out_module;
709 
710         local_bh_disable();
711         sock_prot_inuse_add(net, &netlink_proto, 1);
712         local_bh_enable();
713 
714         nlk = nlk_sk(sock->sk);
715         nlk->module = module;
716         nlk->netlink_bind = bind;
717         nlk->netlink_unbind = unbind;
718 out:
719         return err;
720 
721 out_module:
722         module_put(module);
723         goto out;
724 }
725 
726 static void deferred_put_nlk_sk(struct rcu_head *head)
727 {
728         struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
729         struct sock *sk = &nlk->sk;
730 
731         kfree(nlk->groups);
732         nlk->groups = NULL;
733 
734         if (!refcount_dec_and_test(&sk->sk_refcnt))
735                 return;
736 
737         if (nlk->cb_running && nlk->cb.done) {
738                 INIT_WORK(&nlk->work, netlink_sock_destruct_work);
739                 schedule_work(&nlk->work);
740                 return;
741         }
742 
743         sk_free(sk);
744 }
745 
746 static int netlink_release(struct socket *sock)
747 {
748         struct sock *sk = sock->sk;
749         struct netlink_sock *nlk;
750 
751         if (!sk)
752                 return 0;
753 
754         netlink_remove(sk);
755         sock_orphan(sk);
756         nlk = nlk_sk(sk);
757 
758         /*
759          * OK. Socket is unlinked, any packets that arrive now
760          * will be purged.
761          */
762 
763         /* must not acquire netlink_table_lock in any way again before unbind
764          * and notifying genetlink is done as otherwise it might deadlock
765          */
766         if (nlk->netlink_unbind) {
767                 int i;
768 
769                 for (i = 0; i < nlk->ngroups; i++)
770                         if (test_bit(i, nlk->groups))
771                                 nlk->netlink_unbind(sock_net(sk), i + 1);
772         }
773         if (sk->sk_protocol == NETLINK_GENERIC &&
774             atomic_dec_return(&genl_sk_destructing_cnt) == 0)
775                 wake_up(&genl_sk_destructing_waitq);
776 
777         sock->sk = NULL;
778         wake_up_interruptible_all(&nlk->wait);
779 
780         skb_queue_purge(&sk->sk_write_queue);
781 
782         if (nlk->portid && nlk->bound) {
783                 struct netlink_notify n = {
784                                                 .net = sock_net(sk),
785                                                 .protocol = sk->sk_protocol,
786                                                 .portid = nlk->portid,
787                                           };
788                 blocking_notifier_call_chain(&netlink_chain,
789                                 NETLINK_URELEASE, &n);
790         }
791 
792         module_put(nlk->module);
793 
794         if (netlink_is_kernel(sk)) {
795                 netlink_table_grab();
796                 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
797                 if (--nl_table[sk->sk_protocol].registered == 0) {
798                         struct listeners *old;
799 
800                         old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
801                         RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
802                         kfree_rcu(old, rcu);
803                         nl_table[sk->sk_protocol].module = NULL;
804                         nl_table[sk->sk_protocol].bind = NULL;
805                         nl_table[sk->sk_protocol].unbind = NULL;
806                         nl_table[sk->sk_protocol].flags = 0;
807                         nl_table[sk->sk_protocol].registered = 0;
808                 }
809                 netlink_table_ungrab();
810         }
811 
812         local_bh_disable();
813         sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
814         local_bh_enable();
815         call_rcu(&nlk->rcu, deferred_put_nlk_sk);
816         return 0;
817 }
818 
819 static int netlink_autobind(struct socket *sock)
820 {
821         struct sock *sk = sock->sk;
822         struct net *net = sock_net(sk);
823         struct netlink_table *table = &nl_table[sk->sk_protocol];
824         s32 portid = task_tgid_vnr(current);
825         int err;
826         s32 rover = -4096;
827         bool ok;
828 
829 retry:
830         cond_resched();
831         rcu_read_lock();
832         ok = !__netlink_lookup(table, portid, net);
833         rcu_read_unlock();
834         if (!ok) {
835                 /* Bind collision, search negative portid values. */
836                 if (rover == -4096)
837                         /* rover will be in range [S32_MIN, -4097] */
838                         rover = S32_MIN + prandom_u32_max(-4096 - S32_MIN);
839                 else if (rover >= -4096)
840                         rover = -4097;
841                 portid = rover--;
842                 goto retry;
843         }
844 
845         err = netlink_insert(sk, portid);
846         if (err == -EADDRINUSE)
847                 goto retry;
848 
849         /* If 2 threads race to autobind, that is fine.  */
850         if (err == -EBUSY)
851                 err = 0;
852 
853         return err;
854 }
855 
856 /**
857  * __netlink_ns_capable - General netlink message capability test
858  * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
859  * @user_ns: The user namespace of the capability to use
860  * @cap: The capability to use
861  *
862  * Test to see if the opener of the socket we received the message
863  * from had when the netlink socket was created and the sender of the
864  * message has the capability @cap in the user namespace @user_ns.
865  */
866 bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
867                         struct user_namespace *user_ns, int cap)
868 {
869         return ((nsp->flags & NETLINK_SKB_DST) ||
870                 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
871                 ns_capable(user_ns, cap);
872 }
873 EXPORT_SYMBOL(__netlink_ns_capable);
874 
875 /**
876  * netlink_ns_capable - General netlink message capability test
877  * @skb: socket buffer holding a netlink command from userspace
878  * @user_ns: The user namespace of the capability to use
879  * @cap: The capability to use
880  *
881  * Test to see if the opener of the socket we received the message
882  * from had when the netlink socket was created and the sender of the
883  * message has the capability @cap in the user namespace @user_ns.
884  */
885 bool netlink_ns_capable(const struct sk_buff *skb,
886                         struct user_namespace *user_ns, int cap)
887 {
888         return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
889 }
890 EXPORT_SYMBOL(netlink_ns_capable);
891 
892 /**
893  * netlink_capable - Netlink global message capability test
894  * @skb: socket buffer holding a netlink command from userspace
895  * @cap: The capability to use
896  *
897  * Test to see if the opener of the socket we received the message
898  * from had when the netlink socket was created and the sender of the
899  * message has the capability @cap in all user namespaces.
900  */
901 bool netlink_capable(const struct sk_buff *skb, int cap)
902 {
903         return netlink_ns_capable(skb, &init_user_ns, cap);
904 }
905 EXPORT_SYMBOL(netlink_capable);
906 
907 /**
908  * netlink_net_capable - Netlink network namespace message capability test
909  * @skb: socket buffer holding a netlink command from userspace
910  * @cap: The capability to use
911  *
912  * Test to see if the opener of the socket we received the message
913  * from had when the netlink socket was created and the sender of the
914  * message has the capability @cap over the network namespace of
915  * the socket we received the message from.
916  */
917 bool netlink_net_capable(const struct sk_buff *skb, int cap)
918 {
919         return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
920 }
921 EXPORT_SYMBOL(netlink_net_capable);
922 
923 static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
924 {
925         return (nl_table[sock->sk->sk_protocol].flags & flag) ||
926                 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
927 }
928 
929 static void
930 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
931 {
932         struct netlink_sock *nlk = nlk_sk(sk);
933 
934         if (nlk->subscriptions && !subscriptions)
935                 __sk_del_bind_node(sk);
936         else if (!nlk->subscriptions && subscriptions)
937                 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
938         nlk->subscriptions = subscriptions;
939 }
940 
941 static int netlink_realloc_groups(struct sock *sk)
942 {
943         struct netlink_sock *nlk = nlk_sk(sk);
944         unsigned int groups;
945         unsigned long *new_groups;
946         int err = 0;
947 
948         netlink_table_grab();
949 
950         groups = nl_table[sk->sk_protocol].groups;
951         if (!nl_table[sk->sk_protocol].registered) {
952                 err = -ENOENT;
953                 goto out_unlock;
954         }
955 
956         if (nlk->ngroups >= groups)
957                 goto out_unlock;
958 
959         new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
960         if (new_groups == NULL) {
961                 err = -ENOMEM;
962                 goto out_unlock;
963         }
964         memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
965                NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
966 
967         nlk->groups = new_groups;
968         nlk->ngroups = groups;
969  out_unlock:
970         netlink_table_ungrab();
971         return err;
972 }
973 
974 static void netlink_undo_bind(int group, long unsigned int groups,
975                               struct sock *sk)
976 {
977         struct netlink_sock *nlk = nlk_sk(sk);
978         int undo;
979 
980         if (!nlk->netlink_unbind)
981                 return;
982 
983         for (undo = 0; undo < group; undo++)
984                 if (test_bit(undo, &groups))
985                         nlk->netlink_unbind(sock_net(sk), undo + 1);
986 }
987 
988 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
989                         int addr_len)
990 {
991         struct sock *sk = sock->sk;
992         struct net *net = sock_net(sk);
993         struct netlink_sock *nlk = nlk_sk(sk);
994         struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
995         int err = 0;
996         unsigned long groups;
997         bool bound;
998 
999         if (addr_len < sizeof(struct sockaddr_nl))
1000                 return -EINVAL;
1001 
1002         if (nladdr->nl_family != AF_NETLINK)
1003                 return -EINVAL;
1004         groups = nladdr->nl_groups;
1005 
1006         /* Only superuser is allowed to listen multicasts */
1007         if (groups) {
1008                 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1009                         return -EPERM;
1010                 err = netlink_realloc_groups(sk);
1011                 if (err)
1012                         return err;
1013         }
1014 
1015         if (nlk->ngroups < BITS_PER_LONG)
1016                 groups &= (1UL << nlk->ngroups) - 1;
1017 
1018         /* Paired with WRITE_ONCE() in netlink_insert() */
1019         bound = READ_ONCE(nlk->bound);
1020         if (bound) {
1021                 /* Ensure nlk->portid is up-to-date. */
1022                 smp_rmb();
1023 
1024                 if (nladdr->nl_pid != nlk->portid)
1025                         return -EINVAL;
1026         }
1027 
1028         if (nlk->netlink_bind && groups) {
1029                 int group;
1030 
1031                 /* nl_groups is a u32, so cap the maximum groups we can bind */
1032                 for (group = 0; group < BITS_PER_TYPE(u32); group++) {
1033                         if (!test_bit(group, &groups))
1034                                 continue;
1035                         err = nlk->netlink_bind(net, group + 1);
1036                         if (!err)
1037                                 continue;
1038                         netlink_undo_bind(group, groups, sk);
1039                         return err;
1040                 }
1041         }
1042 
1043         /* No need for barriers here as we return to user-space without
1044          * using any of the bound attributes.
1045          */
1046         netlink_lock_table();
1047         if (!bound) {
1048                 err = nladdr->nl_pid ?
1049                         netlink_insert(sk, nladdr->nl_pid) :
1050                         netlink_autobind(sock);
1051                 if (err) {
1052                         netlink_undo_bind(BITS_PER_TYPE(u32), groups, sk);
1053                         goto unlock;
1054                 }
1055         }
1056 
1057         if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1058                 goto unlock;
1059         netlink_unlock_table();
1060 
1061         netlink_table_grab();
1062         netlink_update_subscriptions(sk, nlk->subscriptions +
1063                                          hweight32(groups) -
1064                                          hweight32(nlk->groups[0]));
1065         nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
1066         netlink_update_listeners(sk);
1067         netlink_table_ungrab();
1068 
1069         return 0;
1070 
1071 unlock:
1072         netlink_unlock_table();
1073         return err;
1074 }
1075 
1076 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1077                            int alen, int flags)
1078 {
1079         int err = 0;
1080         struct sock *sk = sock->sk;
1081         struct netlink_sock *nlk = nlk_sk(sk);
1082         struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1083 
1084         if (alen < sizeof(addr->sa_family))
1085                 return -EINVAL;
1086 
1087         if (addr->sa_family == AF_UNSPEC) {
1088                 sk->sk_state    = NETLINK_UNCONNECTED;
1089                 nlk->dst_portid = 0;
1090                 nlk->dst_group  = 0;
1091                 return 0;
1092         }
1093         if (addr->sa_family != AF_NETLINK)
1094                 return -EINVAL;
1095 
1096         if (alen < sizeof(struct sockaddr_nl))
1097                 return -EINVAL;
1098 
1099         if ((nladdr->nl_groups || nladdr->nl_pid) &&
1100             !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1101                 return -EPERM;
1102 
1103         /* No need for barriers here as we return to user-space without
1104          * using any of the bound attributes.
1105          * Paired with WRITE_ONCE() in netlink_insert().
1106          */
1107         if (!READ_ONCE(nlk->bound))
1108                 err = netlink_autobind(sock);
1109 
1110         if (err == 0) {
1111                 sk->sk_state    = NETLINK_CONNECTED;
1112                 nlk->dst_portid = nladdr->nl_pid;
1113                 nlk->dst_group  = ffs(nladdr->nl_groups);
1114         }
1115 
1116         return err;
1117 }
1118 
1119 static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1120                            int peer)
1121 {
1122         struct sock *sk = sock->sk;
1123         struct netlink_sock *nlk = nlk_sk(sk);
1124         DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
1125 
1126         nladdr->nl_family = AF_NETLINK;
1127         nladdr->nl_pad = 0;
1128 
1129         if (peer) {
1130                 nladdr->nl_pid = nlk->dst_portid;
1131                 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1132         } else {
1133                 nladdr->nl_pid = nlk->portid;
1134                 netlink_lock_table();
1135                 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1136                 netlink_unlock_table();
1137         }
1138         return sizeof(*nladdr);
1139 }
1140 
1141 static int netlink_ioctl(struct socket *sock, unsigned int cmd,
1142                          unsigned long arg)
1143 {
1144         /* try to hand this ioctl down to the NIC drivers.
1145          */
1146         return -ENOIOCTLCMD;
1147 }
1148 
1149 static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1150 {
1151         struct sock *sock;
1152         struct netlink_sock *nlk;
1153 
1154         sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1155         if (!sock)
1156                 return ERR_PTR(-ECONNREFUSED);
1157 
1158         /* Don't bother queuing skb if kernel socket has no input function */
1159         nlk = nlk_sk(sock);
1160         if (sock->sk_state == NETLINK_CONNECTED &&
1161             nlk->dst_portid != nlk_sk(ssk)->portid) {
1162                 sock_put(sock);
1163                 return ERR_PTR(-ECONNREFUSED);
1164         }
1165         return sock;
1166 }
1167 
1168 struct sock *netlink_getsockbyfilp(struct file *filp)
1169 {
1170         struct inode *inode = file_inode(filp);
1171         struct sock *sock;
1172 
1173         if (!S_ISSOCK(inode->i_mode))
1174                 return ERR_PTR(-ENOTSOCK);
1175 
1176         sock = SOCKET_I(inode)->sk;
1177         if (sock->sk_family != AF_NETLINK)
1178                 return ERR_PTR(-EINVAL);
1179 
1180         sock_hold(sock);
1181         return sock;
1182 }
1183 
1184 static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1185                                                int broadcast)
1186 {
1187         struct sk_buff *skb;
1188         void *data;
1189 
1190         if (size <= NLMSG_GOODSIZE || broadcast)
1191                 return alloc_skb(size, GFP_KERNEL);
1192 
1193         size = SKB_DATA_ALIGN(size) +
1194                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1195 
1196         data = vmalloc(size);
1197         if (data == NULL)
1198                 return NULL;
1199 
1200         skb = __build_skb(data, size);
1201         if (skb == NULL)
1202                 vfree(data);
1203         else
1204                 skb->destructor = netlink_skb_destructor;
1205 
1206         return skb;
1207 }
1208 
1209 /*
1210  * Attach a skb to a netlink socket.
1211  * The caller must hold a reference to the destination socket. On error, the
1212  * reference is dropped. The skb is not send to the destination, just all
1213  * all error checks are performed and memory in the queue is reserved.
1214  * Return values:
1215  * < 0: error. skb freed, reference to sock dropped.
1216  * 0: continue
1217  * 1: repeat lookup - reference dropped while waiting for socket memory.
1218  */
1219 int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
1220                       long *timeo, struct sock *ssk)
1221 {
1222         struct netlink_sock *nlk;
1223 
1224         nlk = nlk_sk(sk);
1225 
1226         if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1227              test_bit(NETLINK_S_CONGESTED, &nlk->state))) {
1228                 DECLARE_WAITQUEUE(wait, current);
1229                 if (!*timeo) {
1230                         if (!ssk || netlink_is_kernel(ssk))
1231                                 netlink_overrun(sk);
1232                         sock_put(sk);
1233                         kfree_skb(skb);
1234                         return -EAGAIN;
1235                 }
1236 
1237                 __set_current_state(TASK_INTERRUPTIBLE);
1238                 add_wait_queue(&nlk->wait, &wait);
1239 
1240                 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1241                      test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
1242                     !sock_flag(sk, SOCK_DEAD))
1243                         *timeo = schedule_timeout(*timeo);
1244 
1245                 __set_current_state(TASK_RUNNING);
1246                 remove_wait_queue(&nlk->wait, &wait);
1247                 sock_put(sk);
1248 
1249                 if (signal_pending(current)) {
1250                         kfree_skb(skb);
1251                         return sock_intr_errno(*timeo);
1252                 }
1253                 return 1;
1254         }
1255         netlink_skb_set_owner_r(skb, sk);
1256         return 0;
1257 }
1258 
1259 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1260 {
1261         int len = skb->len;
1262 
1263         netlink_deliver_tap(sock_net(sk), skb);
1264 
1265         skb_queue_tail(&sk->sk_receive_queue, skb);
1266         sk->sk_data_ready(sk);
1267         return len;
1268 }
1269 
1270 int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1271 {
1272         int len = __netlink_sendskb(sk, skb);
1273 
1274         sock_put(sk);
1275         return len;
1276 }
1277 
1278 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1279 {
1280         kfree_skb(skb);
1281         sock_put(sk);
1282 }
1283 
1284 static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1285 {
1286         int delta;
1287 
1288         WARN_ON(skb->sk != NULL);
1289         delta = skb->end - skb->tail;
1290         if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1291                 return skb;
1292 
1293         if (skb_shared(skb)) {
1294                 struct sk_buff *nskb = skb_clone(skb, allocation);
1295                 if (!nskb)
1296                         return skb;
1297                 consume_skb(skb);
1298                 skb = nskb;
1299         }
1300 
1301         pskb_expand_head(skb, 0, -delta,
1302                          (allocation & ~__GFP_DIRECT_RECLAIM) |
1303                          __GFP_NOWARN | __GFP_NORETRY);
1304         return skb;
1305 }
1306 
1307 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1308                                   struct sock *ssk)
1309 {
1310         int ret;
1311         struct netlink_sock *nlk = nlk_sk(sk);
1312 
1313         ret = -ECONNREFUSED;
1314         if (nlk->netlink_rcv != NULL) {
1315                 ret = skb->len;
1316                 netlink_skb_set_owner_r(skb, sk);
1317                 NETLINK_CB(skb).sk = ssk;
1318                 netlink_deliver_tap_kernel(sk, ssk, skb);
1319                 nlk->netlink_rcv(skb);
1320                 consume_skb(skb);
1321         } else {
1322                 kfree_skb(skb);
1323         }
1324         sock_put(sk);
1325         return ret;
1326 }
1327 
1328 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
1329                     u32 portid, int nonblock)
1330 {
1331         struct sock *sk;
1332         int err;
1333         long timeo;
1334 
1335         skb = netlink_trim(skb, gfp_any());
1336 
1337         timeo = sock_sndtimeo(ssk, nonblock);
1338 retry:
1339         sk = netlink_getsockbyportid(ssk, portid);
1340         if (IS_ERR(sk)) {
1341                 kfree_skb(skb);
1342                 return PTR_ERR(sk);
1343         }
1344         if (netlink_is_kernel(sk))
1345                 return netlink_unicast_kernel(sk, skb, ssk);
1346 
1347         if (sk_filter(sk, skb)) {
1348                 err = skb->len;
1349                 kfree_skb(skb);
1350                 sock_put(sk);
1351                 return err;
1352         }
1353 
1354         err = netlink_attachskb(sk, skb, &timeo, ssk);
1355         if (err == 1)
1356                 goto retry;
1357         if (err)
1358                 return err;
1359 
1360         return netlink_sendskb(sk, skb);
1361 }
1362 EXPORT_SYMBOL(netlink_unicast);
1363 
1364 int netlink_has_listeners(struct sock *sk, unsigned int group)
1365 {
1366         int res = 0;
1367         struct listeners *listeners;
1368 
1369         BUG_ON(!netlink_is_kernel(sk));
1370 
1371         rcu_read_lock();
1372         listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1373 
1374         if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
1375                 res = test_bit(group - 1, listeners->masks);
1376 
1377         rcu_read_unlock();
1378 
1379         return res;
1380 }
1381 EXPORT_SYMBOL_GPL(netlink_has_listeners);
1382 
1383 bool netlink_strict_get_check(struct sk_buff *skb)
1384 {
1385         const struct netlink_sock *nlk = nlk_sk(NETLINK_CB(skb).sk);
1386 
1387         return nlk->flags & NETLINK_F_STRICT_CHK;
1388 }
1389 EXPORT_SYMBOL_GPL(netlink_strict_get_check);
1390 
1391 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1392 {
1393         struct netlink_sock *nlk = nlk_sk(sk);
1394 
1395         if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
1396             !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
1397                 netlink_skb_set_owner_r(skb, sk);
1398                 __netlink_sendskb(sk, skb);
1399                 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1400         }
1401         return -1;
1402 }
1403 
1404 struct netlink_broadcast_data {
1405         struct sock *exclude_sk;
1406         struct net *net;
1407         u32 portid;
1408         u32 group;
1409         int failure;
1410         int delivery_failure;
1411         int congested;
1412         int delivered;
1413         gfp_t allocation;
1414         struct sk_buff *skb, *skb2;
1415         int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1416         void *tx_data;
1417 };
1418 
1419 static void do_one_broadcast(struct sock *sk,
1420                                     struct netlink_broadcast_data *p)
1421 {
1422         struct netlink_sock *nlk = nlk_sk(sk);
1423         int val;
1424 
1425         if (p->exclude_sk == sk)
1426                 return;
1427 
1428         if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1429             !test_bit(p->group - 1, nlk->groups))
1430                 return;
1431 
1432         if (!net_eq(sock_net(sk), p->net)) {
1433                 if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID))
1434                         return;
1435 
1436                 if (!peernet_has_id(sock_net(sk), p->net))
1437                         return;
1438 
1439                 if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns,
1440                                      CAP_NET_BROADCAST))
1441                         return;
1442         }
1443 
1444         if (p->failure) {
1445                 netlink_overrun(sk);
1446                 return;
1447         }
1448 
1449         sock_hold(sk);
1450         if (p->skb2 == NULL) {
1451                 if (skb_shared(p->skb)) {
1452                         p->skb2 = skb_clone(p->skb, p->allocation);
1453                 } else {
1454                         p->skb2 = skb_get(p->skb);
1455                         /*
1456                          * skb ownership may have been set when
1457                          * delivered to a previous socket.
1458                          */
1459                         skb_orphan(p->skb2);
1460                 }
1461         }
1462         if (p->skb2 == NULL) {
1463                 netlink_overrun(sk);
1464                 /* Clone failed. Notify ALL listeners. */
1465                 p->failure = 1;
1466                 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
1467                         p->delivery_failure = 1;
1468                 goto out;
1469         }
1470         if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1471                 kfree_skb(p->skb2);
1472                 p->skb2 = NULL;
1473                 goto out;
1474         }
1475         if (sk_filter(sk, p->skb2)) {
1476                 kfree_skb(p->skb2);
1477                 p->skb2 = NULL;
1478                 goto out;
1479         }
1480         NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
1481         if (NETLINK_CB(p->skb2).nsid != NETNSA_NSID_NOT_ASSIGNED)
1482                 NETLINK_CB(p->skb2).nsid_is_set = true;
1483         val = netlink_broadcast_deliver(sk, p->skb2);
1484         if (val < 0) {
1485                 netlink_overrun(sk);
1486                 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
1487                         p->delivery_failure = 1;
1488         } else {
1489                 p->congested |= val;
1490                 p->delivered = 1;
1491                 p->skb2 = NULL;
1492         }
1493 out:
1494         sock_put(sk);
1495 }
1496 
1497 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
1498         u32 group, gfp_t allocation,
1499         int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1500         void *filter_data)
1501 {
1502         struct net *net = sock_net(ssk);
1503         struct netlink_broadcast_data info;
1504         struct sock *sk;
1505 
1506         skb = netlink_trim(skb, allocation);
1507 
1508         info.exclude_sk = ssk;
1509         info.net = net;
1510         info.portid = portid;
1511         info.group = group;
1512         info.failure = 0;
1513         info.delivery_failure = 0;
1514         info.congested = 0;
1515         info.delivered = 0;
1516         info.allocation = allocation;
1517         info.skb = skb;
1518         info.skb2 = NULL;
1519         info.tx_filter = filter;
1520         info.tx_data = filter_data;
1521 
1522         /* While we sleep in clone, do not allow to change socket list */
1523 
1524         netlink_lock_table();
1525 
1526         sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1527                 do_one_broadcast(sk, &info);
1528 
1529         consume_skb(skb);
1530 
1531         netlink_unlock_table();
1532 
1533         if (info.delivery_failure) {
1534                 kfree_skb(info.skb2);
1535                 return -ENOBUFS;
1536         }
1537         consume_skb(info.skb2);
1538 
1539         if (info.delivered) {
1540                 if (info.congested && gfpflags_allow_blocking(allocation))
1541                         yield();
1542                 return 0;
1543         }
1544         return -ESRCH;
1545 }
1546 EXPORT_SYMBOL(netlink_broadcast_filtered);
1547 
1548 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
1549                       u32 group, gfp_t allocation)
1550 {
1551         return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
1552                 NULL, NULL);
1553 }
1554 EXPORT_SYMBOL(netlink_broadcast);
1555 
1556 struct netlink_set_err_data {
1557         struct sock *exclude_sk;
1558         u32 portid;
1559         u32 group;
1560         int code;
1561 };
1562 
1563 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1564 {
1565         struct netlink_sock *nlk = nlk_sk(sk);
1566         int ret = 0;
1567 
1568         if (sk == p->exclude_sk)
1569                 goto out;
1570 
1571         if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
1572                 goto out;
1573 
1574         if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1575             !test_bit(p->group - 1, nlk->groups))
1576                 goto out;
1577 
1578         if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
1579                 ret = 1;
1580                 goto out;
1581         }
1582 
1583         sk->sk_err = p->code;
1584         sk_error_report(sk);
1585 out:
1586         return ret;
1587 }
1588 
1589 /**
1590  * netlink_set_err - report error to broadcast listeners
1591  * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
1592  * @portid: the PORTID of a process that we want to skip (if any)
1593  * @group: the broadcast group that will notice the error
1594  * @code: error code, must be negative (as usual in kernelspace)
1595  *
1596  * This function returns the number of broadcast listeners that have set the
1597  * NETLINK_NO_ENOBUFS socket option.
1598  */
1599 int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1600 {
1601         struct netlink_set_err_data info;
1602         struct sock *sk;
1603         int ret = 0;
1604 
1605         info.exclude_sk = ssk;
1606         info.portid = portid;
1607         info.group = group;
1608         /* sk->sk_err wants a positive error value */
1609         info.code = -code;
1610 
1611         read_lock(&nl_table_lock);
1612 
1613         sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1614                 ret += do_one_set_err(sk, &info);
1615 
1616         read_unlock(&nl_table_lock);
1617         return ret;
1618 }
1619 EXPORT_SYMBOL(netlink_set_err);
1620 
1621 /* must be called with netlink table grabbed */
1622 static void netlink_update_socket_mc(struct netlink_sock *nlk,
1623                                      unsigned int group,
1624                                      int is_new)
1625 {
1626         int old, new = !!is_new, subscriptions;
1627 
1628         old = test_bit(group - 1, nlk->groups);
1629         subscriptions = nlk->subscriptions - old + new;
1630         if (new)
1631                 __set_bit(group - 1, nlk->groups);
1632         else
1633                 __clear_bit(group - 1, nlk->groups);
1634         netlink_update_subscriptions(&nlk->sk, subscriptions);
1635         netlink_update_listeners(&nlk->sk);
1636 }
1637 
1638 static int netlink_setsockopt(struct socket *sock, int level, int optname,
1639                               sockptr_t optval, unsigned int optlen)
1640 {
1641         struct sock *sk = sock->sk;
1642         struct netlink_sock *nlk = nlk_sk(sk);
1643         unsigned int val = 0;
1644         int err;
1645 
1646         if (level != SOL_NETLINK)
1647                 return -ENOPROTOOPT;
1648 
1649         if (optlen >= sizeof(int) &&
1650             copy_from_sockptr(&val, optval, sizeof(val)))
1651                 return -EFAULT;
1652 
1653         switch (optname) {
1654         case NETLINK_PKTINFO:
1655                 if (val)
1656                         nlk->flags |= NETLINK_F_RECV_PKTINFO;
1657                 else
1658                         nlk->flags &= ~NETLINK_F_RECV_PKTINFO;
1659                 err = 0;
1660                 break;
1661         case NETLINK_ADD_MEMBERSHIP:
1662         case NETLINK_DROP_MEMBERSHIP: {
1663                 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1664                         return -EPERM;
1665                 err = netlink_realloc_groups(sk);
1666                 if (err)
1667                         return err;
1668                 if (!val || val - 1 >= nlk->ngroups)
1669                         return -EINVAL;
1670                 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
1671                         err = nlk->netlink_bind(sock_net(sk), val);
1672                         if (err)
1673                                 return err;
1674                 }
1675                 netlink_table_grab();
1676                 netlink_update_socket_mc(nlk, val,
1677                                          optname == NETLINK_ADD_MEMBERSHIP);
1678                 netlink_table_ungrab();
1679                 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
1680                         nlk->netlink_unbind(sock_net(sk), val);
1681 
1682                 err = 0;
1683                 break;
1684         }
1685         case NETLINK_BROADCAST_ERROR:
1686                 if (val)
1687                         nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR;
1688                 else
1689                         nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR;
1690                 err = 0;
1691                 break;
1692         case NETLINK_NO_ENOBUFS:
1693                 if (val) {
1694                         nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS;
1695                         clear_bit(NETLINK_S_CONGESTED, &nlk->state);
1696                         wake_up_interruptible(&nlk->wait);
1697                 } else {
1698                         nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS;
1699                 }
1700                 err = 0;
1701                 break;
1702         case NETLINK_LISTEN_ALL_NSID:
1703                 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
1704                         return -EPERM;
1705 
1706                 if (val)
1707                         nlk->flags |= NETLINK_F_LISTEN_ALL_NSID;
1708                 else
1709                         nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
1710                 err = 0;
1711                 break;
1712         case NETLINK_CAP_ACK:
1713                 if (val)
1714                         nlk->flags |= NETLINK_F_CAP_ACK;
1715                 else
1716                         nlk->flags &= ~NETLINK_F_CAP_ACK;
1717                 err = 0;
1718                 break;
1719         case NETLINK_EXT_ACK:
1720                 if (val)
1721                         nlk->flags |= NETLINK_F_EXT_ACK;
1722                 else
1723                         nlk->flags &= ~NETLINK_F_EXT_ACK;
1724                 err = 0;
1725                 break;
1726         case NETLINK_GET_STRICT_CHK:
1727                 if (val)
1728                         nlk->flags |= NETLINK_F_STRICT_CHK;
1729                 else
1730                         nlk->flags &= ~NETLINK_F_STRICT_CHK;
1731                 err = 0;
1732                 break;
1733         default:
1734                 err = -ENOPROTOOPT;
1735         }
1736         return err;
1737 }
1738 
1739 static int netlink_getsockopt(struct socket *sock, int level, int optname,
1740                               char __user *optval, int __user *optlen)
1741 {
1742         struct sock *sk = sock->sk;
1743         struct netlink_sock *nlk = nlk_sk(sk);
1744         int len, val, err;
1745 
1746         if (level != SOL_NETLINK)
1747                 return -ENOPROTOOPT;
1748 
1749         if (get_user(len, optlen))
1750                 return -EFAULT;
1751         if (len < 0)
1752                 return -EINVAL;
1753 
1754         switch (optname) {
1755         case NETLINK_PKTINFO:
1756                 if (len < sizeof(int))
1757                         return -EINVAL;
1758                 len = sizeof(int);
1759                 val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
1760                 if (put_user(len, optlen) ||
1761                     put_user(val, optval))
1762                         return -EFAULT;
1763                 err = 0;
1764                 break;
1765         case NETLINK_BROADCAST_ERROR:
1766                 if (len < sizeof(int))
1767                         return -EINVAL;
1768                 len = sizeof(int);
1769                 val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
1770                 if (put_user(len, optlen) ||
1771                     put_user(val, optval))
1772                         return -EFAULT;
1773                 err = 0;
1774                 break;
1775         case NETLINK_NO_ENOBUFS:
1776                 if (len < sizeof(int))
1777                         return -EINVAL;
1778                 len = sizeof(int);
1779                 val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
1780                 if (put_user(len, optlen) ||
1781                     put_user(val, optval))
1782                         return -EFAULT;
1783                 err = 0;
1784                 break;
1785         case NETLINK_LIST_MEMBERSHIPS: {
1786                 int pos, idx, shift;
1787 
1788                 err = 0;
1789                 netlink_lock_table();
1790                 for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
1791                         if (len - pos < sizeof(u32))
1792                                 break;
1793 
1794                         idx = pos / sizeof(unsigned long);
1795                         shift = (pos % sizeof(unsigned long)) * 8;
1796                         if (put_user((u32)(nlk->groups[idx] >> shift),
1797                                      (u32 __user *)(optval + pos))) {
1798                                 err = -EFAULT;
1799                                 break;
1800                         }
1801                 }
1802                 if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
1803                         err = -EFAULT;
1804                 netlink_unlock_table();
1805                 break;
1806         }
1807         case NETLINK_CAP_ACK:
1808                 if (len < sizeof(int))
1809                         return -EINVAL;
1810                 len = sizeof(int);
1811                 val = nlk->flags & NETLINK_F_CAP_ACK ? 1 : 0;
1812                 if (put_user(len, optlen) ||
1813                     put_user(val, optval))
1814                         return -EFAULT;
1815                 err = 0;
1816                 break;
1817         case NETLINK_EXT_ACK:
1818                 if (len < sizeof(int))
1819                         return -EINVAL;
1820                 len = sizeof(int);
1821                 val = nlk->flags & NETLINK_F_EXT_ACK ? 1 : 0;
1822                 if (put_user(len, optlen) || put_user(val, optval))
1823                         return -EFAULT;
1824                 err = 0;
1825                 break;
1826         case NETLINK_GET_STRICT_CHK:
1827                 if (len < sizeof(int))
1828                         return -EINVAL;
1829                 len = sizeof(int);
1830                 val = nlk->flags & NETLINK_F_STRICT_CHK ? 1 : 0;
1831                 if (put_user(len, optlen) || put_user(val, optval))
1832                         return -EFAULT;
1833                 err = 0;
1834                 break;
1835         default:
1836                 err = -ENOPROTOOPT;
1837         }
1838         return err;
1839 }
1840 
1841 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1842 {
1843         struct nl_pktinfo info;
1844 
1845         info.group = NETLINK_CB(skb).dst_group;
1846         put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1847 }
1848 
1849 static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg,
1850                                          struct sk_buff *skb)
1851 {
1852         if (!NETLINK_CB(skb).nsid_is_set)
1853                 return;
1854 
1855         put_cmsg(msg, SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, sizeof(int),
1856                  &NETLINK_CB(skb).nsid);
1857 }
1858 
1859 static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1860 {
1861         struct sock *sk = sock->sk;
1862         struct netlink_sock *nlk = nlk_sk(sk);
1863         DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
1864         u32 dst_portid;
1865         u32 dst_group;
1866         struct sk_buff *skb;
1867         int err;
1868         struct scm_cookie scm;
1869         u32 netlink_skb_flags = 0;
1870 
1871         if (msg->msg_flags & MSG_OOB)
1872                 return -EOPNOTSUPP;
1873 
1874         err = scm_send(sock, msg, &scm, true);
1875         if (err < 0)
1876                 return err;
1877 
1878         if (msg->msg_namelen) {
1879                 err = -EINVAL;
1880                 if (msg->msg_namelen < sizeof(struct sockaddr_nl))
1881                         goto out;
1882                 if (addr->nl_family != AF_NETLINK)
1883                         goto out;
1884                 dst_portid = addr->nl_pid;
1885                 dst_group = ffs(addr->nl_groups);
1886                 err =  -EPERM;
1887                 if ((dst_group || dst_portid) &&
1888                     !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1889                         goto out;
1890                 netlink_skb_flags |= NETLINK_SKB_DST;
1891         } else {
1892                 dst_portid = nlk->dst_portid;
1893                 dst_group = nlk->dst_group;
1894         }
1895 
1896         /* Paired with WRITE_ONCE() in netlink_insert() */
1897         if (!READ_ONCE(nlk->bound)) {
1898                 err = netlink_autobind(sock);
1899                 if (err)
1900                         goto out;
1901         } else {
1902                 /* Ensure nlk is hashed and visible. */
1903                 smp_rmb();
1904         }
1905 
1906         err = -EMSGSIZE;
1907         if (len > sk->sk_sndbuf - 32)
1908                 goto out;
1909         err = -ENOBUFS;
1910         skb = netlink_alloc_large_skb(len, dst_group);
1911         if (skb == NULL)
1912                 goto out;
1913 
1914         NETLINK_CB(skb).portid  = nlk->portid;
1915         NETLINK_CB(skb).dst_group = dst_group;
1916         NETLINK_CB(skb).creds   = scm.creds;
1917         NETLINK_CB(skb).flags   = netlink_skb_flags;
1918 
1919         err = -EFAULT;
1920         if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1921                 kfree_skb(skb);
1922                 goto out;
1923         }
1924 
1925         err = security_netlink_send(sk, skb);
1926         if (err) {
1927                 kfree_skb(skb);
1928                 goto out;
1929         }
1930 
1931         if (dst_group) {
1932                 refcount_inc(&skb->users);
1933                 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
1934         }
1935         err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags & MSG_DONTWAIT);
1936 
1937 out:
1938         scm_destroy(&scm);
1939         return err;
1940 }
1941 
1942 static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1943                            int flags)
1944 {
1945         struct scm_cookie scm;
1946         struct sock *sk = sock->sk;
1947         struct netlink_sock *nlk = nlk_sk(sk);
1948         int noblock = flags & MSG_DONTWAIT;
1949         size_t copied;
1950         struct sk_buff *skb, *data_skb;
1951         int err, ret;
1952 
1953         if (flags & MSG_OOB)
1954                 return -EOPNOTSUPP;
1955 
1956         copied = 0;
1957 
1958         skb = skb_recv_datagram(sk, flags, noblock, &err);
1959         if (skb == NULL)
1960                 goto out;
1961 
1962         data_skb = skb;
1963 
1964 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
1965         if (unlikely(skb_shinfo(skb)->frag_list)) {
1966                 /*
1967                  * If this skb has a frag_list, then here that means that we
1968                  * will have to use the frag_list skb's data for compat tasks
1969                  * and the regular skb's data for normal (non-compat) tasks.
1970                  *
1971                  * If we need to send the compat skb, assign it to the
1972                  * 'data_skb' variable so that it will be used below for data
1973                  * copying. We keep 'skb' for everything else, including
1974                  * freeing both later.
1975                  */
1976                 if (flags & MSG_CMSG_COMPAT)
1977                         data_skb = skb_shinfo(skb)->frag_list;
1978         }
1979 #endif
1980 
1981         /* Record the max length of recvmsg() calls for future allocations */
1982         nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
1983         nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
1984                                      SKB_WITH_OVERHEAD(32768));
1985 
1986         copied = data_skb->len;
1987         if (len < copied) {
1988                 msg->msg_flags |= MSG_TRUNC;
1989                 copied = len;
1990         }
1991 
1992         skb_reset_transport_header(data_skb);
1993         err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
1994 
1995         if (msg->msg_name) {
1996                 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
1997                 addr->nl_family = AF_NETLINK;
1998                 addr->nl_pad    = 0;
1999                 addr->nl_pid    = NETLINK_CB(skb).portid;
2000                 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
2001                 msg->msg_namelen = sizeof(*addr);
2002         }
2003 
2004         if (nlk->flags & NETLINK_F_RECV_PKTINFO)
2005                 netlink_cmsg_recv_pktinfo(msg, skb);
2006         if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
2007                 netlink_cmsg_listen_all_nsid(sk, msg, skb);
2008 
2009         memset(&scm, 0, sizeof(scm));
2010         scm.creds = *NETLINK_CREDS(skb);
2011         if (flags & MSG_TRUNC)
2012                 copied = data_skb->len;
2013 
2014         skb_free_datagram(sk, skb);
2015 
2016         if (nlk->cb_running &&
2017             atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
2018                 ret = netlink_dump(sk);
2019                 if (ret) {
2020                         sk->sk_err = -ret;
2021                         sk_error_report(sk);
2022                 }
2023         }
2024 
2025         scm_recv(sock, msg, &scm, flags);
2026 out:
2027         netlink_rcv_wake(sk);
2028         return err ? : copied;
2029 }
2030 
2031 static void netlink_data_ready(struct sock *sk)
2032 {
2033         BUG();
2034 }
2035 
2036 /*
2037  *      We export these functions to other modules. They provide a
2038  *      complete set of kernel non-blocking support for message
2039  *      queueing.
2040  */
2041 
2042 struct sock *
2043 __netlink_kernel_create(struct net *net, int unit, struct module *module,
2044                         struct netlink_kernel_cfg *cfg)
2045 {
2046         struct socket *sock;
2047         struct sock *sk;
2048         struct netlink_sock *nlk;
2049         struct listeners *listeners = NULL;
2050         struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2051         unsigned int groups;
2052 
2053         BUG_ON(!nl_table);
2054 
2055         if (unit < 0 || unit >= MAX_LINKS)
2056                 return NULL;
2057 
2058         if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2059                 return NULL;
2060 
2061         if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0)
2062                 goto out_sock_release_nosk;
2063 
2064         sk = sock->sk;
2065 
2066         if (!cfg || cfg->groups < 32)
2067                 groups = 32;
2068         else
2069                 groups = cfg->groups;
2070 
2071         listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2072         if (!listeners)
2073                 goto out_sock_release;
2074 
2075         sk->sk_data_ready = netlink_data_ready;
2076         if (cfg && cfg->input)
2077                 nlk_sk(sk)->netlink_rcv = cfg->input;
2078 
2079         if (netlink_insert(sk, 0))
2080                 goto out_sock_release;
2081 
2082         nlk = nlk_sk(sk);
2083         nlk->flags |= NETLINK_F_KERNEL_SOCKET;
2084 
2085         netlink_table_grab();
2086         if (!nl_table[unit].registered) {
2087                 nl_table[unit].groups = groups;
2088                 rcu_assign_pointer(nl_table[unit].listeners, listeners);
2089                 nl_table[unit].cb_mutex = cb_mutex;
2090                 nl_table[unit].module = module;
2091                 if (cfg) {
2092                         nl_table[unit].bind = cfg->bind;
2093                         nl_table[unit].unbind = cfg->unbind;
2094                         nl_table[unit].flags = cfg->flags;
2095                         if (cfg->compare)
2096                                 nl_table[unit].compare = cfg->compare;
2097                 }
2098                 nl_table[unit].registered = 1;
2099         } else {
2100                 kfree(listeners);
2101                 nl_table[unit].registered++;
2102         }
2103         netlink_table_ungrab();
2104         return sk;
2105 
2106 out_sock_release:
2107         kfree(listeners);
2108         netlink_kernel_release(sk);
2109         return NULL;
2110 
2111 out_sock_release_nosk:
2112         sock_release(sock);
2113         return NULL;
2114 }
2115 EXPORT_SYMBOL(__netlink_kernel_create);
2116 
2117 void
2118 netlink_kernel_release(struct sock *sk)
2119 {
2120         if (sk == NULL || sk->sk_socket == NULL)
2121                 return;
2122 
2123         sock_release(sk->sk_socket);
2124 }
2125 EXPORT_SYMBOL(netlink_kernel_release);
2126 
2127 int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
2128 {
2129         struct listeners *new, *old;
2130         struct netlink_table *tbl = &nl_table[sk->sk_protocol];
2131 
2132         if (groups < 32)
2133                 groups = 32;
2134 
2135         if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
2136                 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2137                 if (!new)
2138                         return -ENOMEM;
2139                 old = nl_deref_protected(tbl->listeners);
2140                 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2141                 rcu_assign_pointer(tbl->listeners, new);
2142 
2143                 kfree_rcu(old, rcu);
2144         }
2145         tbl->groups = groups;
2146 
2147         return 0;
2148 }
2149 
2150 /**
2151  * netlink_change_ngroups - change number of multicast groups
2152  *
2153  * This changes the number of multicast groups that are available
2154  * on a certain netlink family. Note that it is not possible to
2155  * change the number of groups to below 32. Also note that it does
2156  * not implicitly call netlink_clear_multicast_users() when the
2157  * number of groups is reduced.
2158  *
2159  * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2160  * @groups: The new number of groups.
2161  */
2162 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2163 {
2164         int err;
2165 
2166         netlink_table_grab();
2167         err = __netlink_change_ngroups(sk, groups);
2168         netlink_table_ungrab();
2169 
2170         return err;
2171 }
2172 
2173 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2174 {
2175         struct sock *sk;
2176         struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2177 
2178         sk_for_each_bound(sk, &tbl->mc_list)
2179                 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2180 }
2181 
2182 struct nlmsghdr *
2183 __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
2184 {
2185         struct nlmsghdr *nlh;
2186         int size = nlmsg_msg_size(len);
2187 
2188         nlh = skb_put(skb, NLMSG_ALIGN(size));
2189         nlh->nlmsg_type = type;
2190         nlh->nlmsg_len = size;
2191         nlh->nlmsg_flags = flags;
2192         nlh->nlmsg_pid = portid;
2193         nlh->nlmsg_seq = seq;
2194         if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
2195                 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
2196         return nlh;
2197 }
2198 EXPORT_SYMBOL(__nlmsg_put);
2199 
2200 /*
2201  * It looks a bit ugly.
2202  * It would be better to create kernel thread.
2203  */
2204 
2205 static int netlink_dump_done(struct netlink_sock *nlk, struct sk_buff *skb,
2206                              struct netlink_callback *cb,
2207                              struct netlink_ext_ack *extack)
2208 {
2209         struct nlmsghdr *nlh;
2210 
2211         nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(nlk->dump_done_errno),
2212                                NLM_F_MULTI | cb->answer_flags);
2213         if (WARN_ON(!nlh))
2214                 return -ENOBUFS;
2215 
2216         nl_dump_check_consistent(cb, nlh);
2217         memcpy(nlmsg_data(nlh), &nlk->dump_done_errno, sizeof(nlk->dump_done_errno));
2218 
2219         if (extack->_msg && nlk->flags & NETLINK_F_EXT_ACK) {
2220                 nlh->nlmsg_flags |= NLM_F_ACK_TLVS;
2221                 if (!nla_put_string(skb, NLMSGERR_ATTR_MSG, extack->_msg))
2222                         nlmsg_end(skb, nlh);
2223         }
2224 
2225         return 0;
2226 }
2227 
2228 static int netlink_dump(struct sock *sk)
2229 {
2230         struct netlink_sock *nlk = nlk_sk(sk);
2231         struct netlink_ext_ack extack = {};
2232         struct netlink_callback *cb;
2233         struct sk_buff *skb = NULL;
2234         struct module *module;
2235         int err = -ENOBUFS;
2236         int alloc_min_size;
2237         int alloc_size;
2238 
2239         mutex_lock(nlk->cb_mutex);
2240         if (!nlk->cb_running) {
2241                 err = -EINVAL;
2242                 goto errout_skb;
2243         }
2244 
2245         if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2246                 goto errout_skb;
2247 
2248         /* NLMSG_GOODSIZE is small to avoid high order allocations being
2249          * required, but it makes sense to _attempt_ a 16K bytes allocation
2250          * to reduce number of system calls on dump operations, if user
2251          * ever provided a big enough buffer.
2252          */
2253         cb = &nlk->cb;
2254         alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2255 
2256         if (alloc_min_size < nlk->max_recvmsg_len) {
2257                 alloc_size = nlk->max_recvmsg_len;
2258                 skb = alloc_skb(alloc_size,
2259                                 (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
2260                                 __GFP_NOWARN | __GFP_NORETRY);
2261         }
2262         if (!skb) {
2263                 alloc_size = alloc_min_size;
2264                 skb = alloc_skb(alloc_size, GFP_KERNEL);
2265         }
2266         if (!skb)
2267                 goto errout_skb;
2268 
2269         /* Trim skb to allocated size. User is expected to provide buffer as
2270          * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at
2271          * netlink_recvmsg())). dump will pack as many smaller messages as
2272          * could fit within the allocated skb. skb is typically allocated
2273          * with larger space than required (could be as much as near 2x the
2274          * requested size with align to next power of 2 approach). Allowing
2275          * dump to use the excess space makes it difficult for a user to have a
2276          * reasonable static buffer based on the expected largest dump of a
2277          * single netdev. The outcome is MSG_TRUNC error.
2278          */
2279         skb_reserve(skb, skb_tailroom(skb) - alloc_size);
2280         netlink_skb_set_owner_r(skb, sk);
2281 
2282         if (nlk->dump_done_errno > 0) {
2283                 cb->extack = &extack;
2284                 nlk->dump_done_errno = cb->dump(skb, cb);
2285                 cb->extack = NULL;
2286         }
2287 
2288         if (nlk->dump_done_errno > 0 ||
2289             skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
2290                 mutex_unlock(nlk->cb_mutex);
2291 
2292                 if (sk_filter(sk, skb))
2293                         kfree_skb(skb);
2294                 else
2295                         __netlink_sendskb(sk, skb);
2296                 return 0;
2297         }
2298 
2299         if (netlink_dump_done(nlk, skb, cb, &extack))
2300                 goto errout_skb;
2301 
2302 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2303         /* frag_list skb's data is used for compat tasks
2304          * and the regular skb's data for normal (non-compat) tasks.
2305          * See netlink_recvmsg().
2306          */
2307         if (unlikely(skb_shinfo(skb)->frag_list)) {
2308                 if (netlink_dump_done(nlk, skb_shinfo(skb)->frag_list, cb, &extack))
2309                         goto errout_skb;
2310         }
2311 #endif
2312 
2313         if (sk_filter(sk, skb))
2314                 kfree_skb(skb);
2315         else
2316                 __netlink_sendskb(sk, skb);
2317 
2318         if (cb->done)
2319                 cb->done(cb);
2320 
2321         nlk->cb_running = false;
2322         module = cb->module;
2323         skb = cb->skb;
2324         mutex_unlock(nlk->cb_mutex);
2325         module_put(module);
2326         consume_skb(skb);
2327         return 0;
2328 
2329 errout_skb:
2330         mutex_unlock(nlk->cb_mutex);
2331         kfree_skb(skb);
2332         return err;
2333 }
2334 
2335 int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2336                          const struct nlmsghdr *nlh,
2337                          struct netlink_dump_control *control)
2338 {
2339         struct netlink_sock *nlk, *nlk2;
2340         struct netlink_callback *cb;
2341         struct sock *sk;
2342         int ret;
2343 
2344         refcount_inc(&skb->users);
2345 
2346         sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2347         if (sk == NULL) {
2348                 ret = -ECONNREFUSED;
2349                 goto error_free;
2350         }
2351 
2352         nlk = nlk_sk(sk);
2353         mutex_lock(nlk->cb_mutex);
2354         /* A dump is in progress... */
2355         if (nlk->cb_running) {
2356                 ret = -EBUSY;
2357                 goto error_unlock;
2358         }
2359         /* add reference of module which cb->dump belongs to */
2360         if (!try_module_get(control->module)) {
2361                 ret = -EPROTONOSUPPORT;
2362                 goto error_unlock;
2363         }
2364 
2365         cb = &nlk->cb;
2366         memset(cb, 0, sizeof(*cb));
2367         cb->dump = control->dump;
2368         cb->done = control->done;
2369         cb->nlh = nlh;
2370         cb->data = control->data;
2371         cb->module = control->module;
2372         cb->min_dump_alloc = control->min_dump_alloc;
2373         cb->skb = skb;
2374 
2375         nlk2 = nlk_sk(NETLINK_CB(skb).sk);
2376         cb->strict_check = !!(nlk2->flags & NETLINK_F_STRICT_CHK);
2377 
2378         if (control->start) {
2379                 ret = control->start(cb);
2380                 if (ret)
2381                         goto error_put;
2382         }
2383 
2384         nlk->cb_running = true;
2385         nlk->dump_done_errno = INT_MAX;
2386 
2387         mutex_unlock(nlk->cb_mutex);
2388 
2389         ret = netlink_dump(sk);
2390 
2391         sock_put(sk);
2392 
2393         if (ret)
2394                 return ret;
2395 
2396         /* We successfully started a dump, by returning -EINTR we
2397          * signal not to send ACK even if it was requested.
2398          */
2399         return -EINTR;
2400 
2401 error_put:
2402         module_put(control->module);
2403 error_unlock:
2404         sock_put(sk);
2405         mutex_unlock(nlk->cb_mutex);
2406 error_free:
2407         kfree_skb(skb);
2408         return ret;
2409 }
2410 EXPORT_SYMBOL(__netlink_dump_start);
2411 
2412 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
2413                  const struct netlink_ext_ack *extack)
2414 {
2415         struct sk_buff *skb;
2416         struct nlmsghdr *rep;
2417         struct nlmsgerr *errmsg;
2418         size_t payload = sizeof(*errmsg);
2419         size_t tlvlen = 0;
2420         struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
2421         unsigned int flags = 0;
2422         bool nlk_has_extack = nlk->flags & NETLINK_F_EXT_ACK;
2423 
2424         /* Error messages get the original request appened, unless the user
2425          * requests to cap the error message, and get extra error data if
2426          * requested.
2427          */
2428         if (nlk_has_extack && extack && extack->_msg)
2429                 tlvlen += nla_total_size(strlen(extack->_msg) + 1);
2430 
2431         if (err && !(nlk->flags & NETLINK_F_CAP_ACK))
2432                 payload += nlmsg_len(nlh);
2433         else
2434                 flags |= NLM_F_CAPPED;
2435         if (err && nlk_has_extack && extack && extack->bad_attr)
2436                 tlvlen += nla_total_size(sizeof(u32));
2437         if (nlk_has_extack && extack && extack->cookie_len)
2438                 tlvlen += nla_total_size(extack->cookie_len);
2439         if (err && nlk_has_extack && extack && extack->policy)
2440                 tlvlen += netlink_policy_dump_attr_size_estimate(extack->policy);
2441 
2442         if (tlvlen)
2443                 flags |= NLM_F_ACK_TLVS;
2444 
2445         skb = nlmsg_new(payload + tlvlen, GFP_KERNEL);
2446         if (!skb) {
2447                 NETLINK_CB(in_skb).sk->sk_err = ENOBUFS;
2448                 sk_error_report(NETLINK_CB(in_skb).sk);
2449                 return;
2450         }
2451 
2452         rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2453                           NLMSG_ERROR, payload, flags);
2454         errmsg = nlmsg_data(rep);
2455         errmsg->error = err;
2456         memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh));
2457 
2458         if (nlk_has_extack && extack) {
2459                 if (extack->_msg) {
2460                         WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG,
2461                                                extack->_msg));
2462                 }
2463                 if (err && extack->bad_attr &&
2464                     !WARN_ON((u8 *)extack->bad_attr < in_skb->data ||
2465                              (u8 *)extack->bad_attr >= in_skb->data +
2466                                                        in_skb->len))
2467                         WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS,
2468                                             (u8 *)extack->bad_attr -
2469                                             (u8 *)nlh));
2470                 if (extack->cookie_len)
2471                         WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE,
2472                                         extack->cookie_len, extack->cookie));
2473                 if (extack->policy)
2474                         netlink_policy_dump_write_attr(skb, extack->policy,
2475                                                        NLMSGERR_ATTR_POLICY);
2476         }
2477 
2478         nlmsg_end(skb, rep);
2479 
2480         nlmsg_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid);
2481 }
2482 EXPORT_SYMBOL(netlink_ack);
2483 
2484 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2485                                                    struct nlmsghdr *,
2486                                                    struct netlink_ext_ack *))
2487 {
2488         struct netlink_ext_ack extack;
2489         struct nlmsghdr *nlh;
2490         int err;
2491 
2492         while (skb->len >= nlmsg_total_size(0)) {
2493                 int msglen;
2494 
2495                 memset(&extack, 0, sizeof(extack));
2496                 nlh = nlmsg_hdr(skb);
2497                 err = 0;
2498 
2499                 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
2500                         return 0;
2501 
2502                 /* Only requests are handled by the kernel */
2503                 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
2504                         goto ack;
2505 
2506                 /* Skip control messages */
2507                 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
2508                         goto ack;
2509 
2510                 err = cb(skb, nlh, &extack);
2511                 if (err == -EINTR)
2512                         goto skip;
2513 
2514 ack:
2515                 if (nlh->nlmsg_flags & NLM_F_ACK || err)
2516                         netlink_ack(skb, nlh, err, &extack);
2517 
2518 skip:
2519                 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
2520                 if (msglen > skb->len)
2521                         msglen = skb->len;
2522                 skb_pull(skb, msglen);
2523         }
2524 
2525         return 0;
2526 }
2527 EXPORT_SYMBOL(netlink_rcv_skb);
2528 
2529 /**
2530  * nlmsg_notify - send a notification netlink message
2531  * @sk: netlink socket to use
2532  * @skb: notification message
2533  * @portid: destination netlink portid for reports or 0
2534  * @group: destination multicast group or 0
2535  * @report: 1 to report back, 0 to disable
2536  * @flags: allocation flags
2537  */
2538 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
2539                  unsigned int group, int report, gfp_t flags)
2540 {
2541         int err = 0;
2542 
2543         if (group) {
2544                 int exclude_portid = 0;
2545 
2546                 if (report) {
2547                         refcount_inc(&skb->users);
2548                         exclude_portid = portid;
2549                 }
2550 
2551                 /* errors reported via destination sk->sk_err, but propagate
2552                  * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
2553                 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
2554                 if (err == -ESRCH)
2555                         err = 0;
2556         }
2557 
2558         if (report) {
2559                 int err2;
2560 
2561                 err2 = nlmsg_unicast(sk, skb, portid);
2562                 if (!err)
2563                         err = err2;
2564         }
2565 
2566         return err;
2567 }
2568 EXPORT_SYMBOL(nlmsg_notify);
2569 
2570 #ifdef CONFIG_PROC_FS
2571 struct nl_seq_iter {
2572         struct seq_net_private p;
2573         struct rhashtable_iter hti;
2574         int link;
2575 };
2576 
2577 static void netlink_walk_start(struct nl_seq_iter *iter)
2578 {
2579         rhashtable_walk_enter(&nl_table[iter->link].hash, &iter->hti);
2580         rhashtable_walk_start(&iter->hti);
2581 }
2582 
2583 static void netlink_walk_stop(struct nl_seq_iter *iter)
2584 {
2585         rhashtable_walk_stop(&iter->hti);
2586         rhashtable_walk_exit(&iter->hti);
2587 }
2588 
2589 static void *__netlink_seq_next(struct seq_file *seq)
2590 {
2591         struct nl_seq_iter *iter = seq->private;
2592         struct netlink_sock *nlk;
2593 
2594         do {
2595                 for (;;) {
2596                         nlk = rhashtable_walk_next(&iter->hti);
2597 
2598                         if (IS_ERR(nlk)) {
2599                                 if (PTR_ERR(nlk) == -EAGAIN)
2600                                         continue;
2601 
2602                                 return nlk;
2603                         }
2604 
2605                         if (nlk)
2606                                 break;
2607 
2608                         netlink_walk_stop(iter);
2609                         if (++iter->link >= MAX_LINKS)
2610                                 return NULL;
2611 
2612                         netlink_walk_start(iter);
2613                 }
2614         } while (sock_net(&nlk->sk) != seq_file_net(seq));
2615 
2616         return nlk;
2617 }
2618 
2619 static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
2620         __acquires(RCU)
2621 {
2622         struct nl_seq_iter *iter = seq->private;
2623         void *obj = SEQ_START_TOKEN;
2624         loff_t pos;
2625 
2626         iter->link = 0;
2627 
2628         netlink_walk_start(iter);
2629 
2630         for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
2631                 obj = __netlink_seq_next(seq);
2632 
2633         return obj;
2634 }
2635 
2636 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2637 {
2638         ++*pos;
2639         return __netlink_seq_next(seq);
2640 }
2641 
2642 static void netlink_native_seq_stop(struct seq_file *seq, void *v)
2643 {
2644         struct nl_seq_iter *iter = seq->private;
2645 
2646         if (iter->link >= MAX_LINKS)
2647                 return;
2648 
2649         netlink_walk_stop(iter);
2650 }
2651 
2652 
2653 static int netlink_native_seq_show(struct seq_file *seq, void *v)
2654 {
2655         if (v == SEQ_START_TOKEN) {
2656                 seq_puts(seq,
2657                          "sk               Eth Pid        Groups   "
2658                          "Rmem     Wmem     Dump  Locks    Drops    Inode\n");
2659         } else {
2660                 struct sock *s = v;
2661                 struct netlink_sock *nlk = nlk_sk(s);
2662 
2663                 seq_printf(seq, "%pK %-3d %-10u %08x %-8d %-8d %-5d %-8d %-8u %-8lu\n",
2664                            s,
2665                            s->sk_protocol,
2666                            nlk->portid,
2667                            nlk->groups ? (u32)nlk->groups[0] : 0,
2668                            sk_rmem_alloc_get(s),
2669                            sk_wmem_alloc_get(s),
2670                            nlk->cb_running,
2671                            refcount_read(&s->sk_refcnt),
2672                            atomic_read(&s->sk_drops),
2673                            sock_i_ino(s)
2674                         );
2675 
2676         }
2677         return 0;
2678 }
2679 
2680 #ifdef CONFIG_BPF_SYSCALL
2681 struct bpf_iter__netlink {
2682         __bpf_md_ptr(struct bpf_iter_meta *, meta);
2683         __bpf_md_ptr(struct netlink_sock *, sk);
2684 };
2685 
2686 DEFINE_BPF_ITER_FUNC(netlink, struct bpf_iter_meta *meta, struct netlink_sock *sk)
2687 
2688 static int netlink_prog_seq_show(struct bpf_prog *prog,
2689                                   struct bpf_iter_meta *meta,
2690                                   void *v)
2691 {
2692         struct bpf_iter__netlink ctx;
2693 
2694         meta->seq_num--;  /* skip SEQ_START_TOKEN */
2695         ctx.meta = meta;
2696         ctx.sk = nlk_sk((struct sock *)v);
2697         return bpf_iter_run_prog(prog, &ctx);
2698 }
2699 
2700 static int netlink_seq_show(struct seq_file *seq, void *v)
2701 {
2702         struct bpf_iter_meta meta;
2703         struct bpf_prog *prog;
2704 
2705         meta.seq = seq;
2706         prog = bpf_iter_get_info(&meta, false);
2707         if (!prog)
2708                 return netlink_native_seq_show(seq, v);
2709 
2710         if (v != SEQ_START_TOKEN)
2711                 return netlink_prog_seq_show(prog, &meta, v);
2712 
2713         return 0;
2714 }
2715 
2716 static void netlink_seq_stop(struct seq_file *seq, void *v)
2717 {
2718         struct bpf_iter_meta meta;
2719         struct bpf_prog *prog;
2720 
2721         if (!v) {
2722                 meta.seq = seq;
2723                 prog = bpf_iter_get_info(&meta, true);
2724                 if (prog)
2725                         (void)netlink_prog_seq_show(prog, &meta, v);
2726         }
2727 
2728         netlink_native_seq_stop(seq, v);
2729 }
2730 #else
2731 static int netlink_seq_show(struct seq_file *seq, void *v)
2732 {
2733         return netlink_native_seq_show(seq, v);
2734 }
2735 
2736 static void netlink_seq_stop(struct seq_file *seq, void *v)
2737 {
2738         netlink_native_seq_stop(seq, v);
2739 }
2740 #endif
2741 
2742 static const struct seq_operations netlink_seq_ops = {
2743         .start  = netlink_seq_start,
2744         .next   = netlink_seq_next,
2745         .stop   = netlink_seq_stop,
2746         .show   = netlink_seq_show,
2747 };
2748 #endif
2749 
2750 int netlink_register_notifier(struct notifier_block *nb)
2751 {
2752         return blocking_notifier_chain_register(&netlink_chain, nb);
2753 }
2754 EXPORT_SYMBOL(netlink_register_notifier);
2755 
2756 int netlink_unregister_notifier(struct notifier_block *nb)
2757 {
2758         return blocking_notifier_chain_unregister(&netlink_chain, nb);
2759 }
2760 EXPORT_SYMBOL(netlink_unregister_notifier);
2761 
2762 static const struct proto_ops netlink_ops = {
2763         .family =       PF_NETLINK,
2764         .owner =        THIS_MODULE,
2765         .release =      netlink_release,
2766         .bind =         netlink_bind,
2767         .connect =      netlink_connect,
2768         .socketpair =   sock_no_socketpair,
2769         .accept =       sock_no_accept,
2770         .getname =      netlink_getname,
2771         .poll =         datagram_poll,
2772         .ioctl =        netlink_ioctl,
2773         .listen =       sock_no_listen,
2774         .shutdown =     sock_no_shutdown,
2775         .setsockopt =   netlink_setsockopt,
2776         .getsockopt =   netlink_getsockopt,
2777         .sendmsg =      netlink_sendmsg,
2778         .recvmsg =      netlink_recvmsg,
2779         .mmap =         sock_no_mmap,
2780         .sendpage =     sock_no_sendpage,
2781 };
2782 
2783 static const struct net_proto_family netlink_family_ops = {
2784         .family = PF_NETLINK,
2785         .create = netlink_create,
2786         .owner  = THIS_MODULE,  /* for consistency 8) */
2787 };
2788 
2789 static int __net_init netlink_net_init(struct net *net)
2790 {
2791 #ifdef CONFIG_PROC_FS
2792         if (!proc_create_net("netlink", 0, net->proc_net, &netlink_seq_ops,
2793                         sizeof(struct nl_seq_iter)))
2794                 return -ENOMEM;
2795 #endif
2796         return 0;
2797 }
2798 
2799 static void __net_exit netlink_net_exit(struct net *net)
2800 {
2801 #ifdef CONFIG_PROC_FS
2802         remove_proc_entry("netlink", net->proc_net);
2803 #endif
2804 }
2805 
2806 static void __init netlink_add_usersock_entry(void)
2807 {
2808         struct listeners *listeners;
2809         int groups = 32;
2810 
2811         listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2812         if (!listeners)
2813                 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
2814 
2815         netlink_table_grab();
2816 
2817         nl_table[NETLINK_USERSOCK].groups = groups;
2818         rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2819         nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2820         nl_table[NETLINK_USERSOCK].registered = 1;
2821         nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
2822 
2823         netlink_table_ungrab();
2824 }
2825 
2826 static struct pernet_operations __net_initdata netlink_net_ops = {
2827         .init = netlink_net_init,
2828         .exit = netlink_net_exit,
2829 };
2830 
2831 static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
2832 {
2833         const struct netlink_sock *nlk = data;
2834         struct netlink_compare_arg arg;
2835 
2836         netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
2837         return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
2838 }
2839 
2840 static const struct rhashtable_params netlink_rhashtable_params = {
2841         .head_offset = offsetof(struct netlink_sock, node),
2842         .key_len = netlink_compare_arg_len,
2843         .obj_hashfn = netlink_hash,
2844         .obj_cmpfn = netlink_compare,
2845         .automatic_shrinking = true,
2846 };
2847 
2848 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
2849 BTF_ID_LIST(btf_netlink_sock_id)
2850 BTF_ID(struct, netlink_sock)
2851 
2852 static const struct bpf_iter_seq_info netlink_seq_info = {
2853         .seq_ops                = &netlink_seq_ops,
2854         .init_seq_private       = bpf_iter_init_seq_net,
2855         .fini_seq_private       = bpf_iter_fini_seq_net,
2856         .seq_priv_size          = sizeof(struct nl_seq_iter),
2857 };
2858 
2859 static struct bpf_iter_reg netlink_reg_info = {
2860         .target                 = "netlink",
2861         .ctx_arg_info_size      = 1,
2862         .ctx_arg_info           = {
2863                 { offsetof(struct bpf_iter__netlink, sk),
2864                   PTR_TO_BTF_ID_OR_NULL },
2865         },
2866         .seq_info               = &netlink_seq_info,
2867 };
2868 
2869 static int __init bpf_iter_register(void)
2870 {
2871         netlink_reg_info.ctx_arg_info[0].btf_id = *btf_netlink_sock_id;
2872         return bpf_iter_reg_target(&netlink_reg_info);
2873 }
2874 #endif
2875 
2876 static int __init netlink_proto_init(void)
2877 {
2878         int i;
2879         int err = proto_register(&netlink_proto, 0);
2880 
2881         if (err != 0)
2882                 goto out;
2883 
2884 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
2885         err = bpf_iter_register();
2886         if (err)
2887                 goto out;
2888 #endif
2889 
2890         BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof_field(struct sk_buff, cb));
2891 
2892         nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
2893         if (!nl_table)
2894                 goto panic;
2895 
2896         for (i = 0; i < MAX_LINKS; i++) {
2897                 if (rhashtable_init(&nl_table[i].hash,
2898                                     &netlink_rhashtable_params) < 0) {
2899                         while (--i > 0)
2900                                 rhashtable_destroy(&nl_table[i].hash);
2901                         kfree(nl_table);
2902                         goto panic;
2903                 }
2904         }
2905 
2906         netlink_add_usersock_entry();
2907 
2908         sock_register(&netlink_family_ops);
2909         register_pernet_subsys(&netlink_net_ops);
2910         register_pernet_subsys(&netlink_tap_net_ops);
2911         /* The netlink device handler may be needed early. */
2912         rtnetlink_init();
2913 out:
2914         return err;
2915 panic:
2916         panic("netlink_init: Cannot allocate nl_table\n");
2917 }
2918 
2919 core_initcall(netlink_proto_init);
2920 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp