~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/netlink/af_netlink.c

Version: ~ [ linux-5.19-rc3 ] ~ [ linux-5.18.5 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.48 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.123 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.199 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.248 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.284 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.319 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * NETLINK      Kernel-user communication protocol.
  3  *
  4  *              Authors:        Alan Cox <alan@lxorguk.ukuu.org.uk>
  5  *                              Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
  6  *                              Patrick McHardy <kaber@trash.net>
  7  *
  8  *              This program is free software; you can redistribute it and/or
  9  *              modify it under the terms of the GNU General Public License
 10  *              as published by the Free Software Foundation; either version
 11  *              2 of the License, or (at your option) any later version.
 12  *
 13  * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
 14  *                               added netlink_proto_exit
 15  * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
 16  *                               use nlk_sk, as sk->protinfo is on a diet 8)
 17  * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
 18  *                               - inc module use count of module that owns
 19  *                                 the kernel socket in case userspace opens
 20  *                                 socket of same protocol
 21  *                               - remove all module support, since netlink is
 22  *                                 mandatory if CONFIG_NET=y these days
 23  */
 24 
 25 #include <linux/module.h>
 26 
 27 #include <linux/capability.h>
 28 #include <linux/kernel.h>
 29 #include <linux/init.h>
 30 #include <linux/signal.h>
 31 #include <linux/sched.h>
 32 #include <linux/errno.h>
 33 #include <linux/string.h>
 34 #include <linux/stat.h>
 35 #include <linux/socket.h>
 36 #include <linux/un.h>
 37 #include <linux/fcntl.h>
 38 #include <linux/termios.h>
 39 #include <linux/sockios.h>
 40 #include <linux/net.h>
 41 #include <linux/fs.h>
 42 #include <linux/slab.h>
 43 #include <asm/uaccess.h>
 44 #include <linux/skbuff.h>
 45 #include <linux/netdevice.h>
 46 #include <linux/rtnetlink.h>
 47 #include <linux/proc_fs.h>
 48 #include <linux/seq_file.h>
 49 #include <linux/notifier.h>
 50 #include <linux/security.h>
 51 #include <linux/jhash.h>
 52 #include <linux/jiffies.h>
 53 #include <linux/random.h>
 54 #include <linux/bitops.h>
 55 #include <linux/mm.h>
 56 #include <linux/types.h>
 57 #include <linux/audit.h>
 58 #include <linux/mutex.h>
 59 #include <linux/vmalloc.h>
 60 #include <linux/if_arp.h>
 61 #include <linux/rhashtable.h>
 62 #include <asm/cacheflush.h>
 63 #include <linux/hash.h>
 64 #include <linux/genetlink.h>
 65 
 66 #include <net/net_namespace.h>
 67 #include <net/sock.h>
 68 #include <net/scm.h>
 69 #include <net/netlink.h>
 70 
 71 #include "af_netlink.h"
 72 
 73 struct listeners {
 74         struct rcu_head         rcu;
 75         unsigned long           masks[0];
 76 };
 77 
 78 /* state bits */
 79 #define NETLINK_S_CONGESTED             0x0
 80 
 81 /* flags */
 82 #define NETLINK_F_KERNEL_SOCKET         0x1
 83 #define NETLINK_F_RECV_PKTINFO          0x2
 84 #define NETLINK_F_BROADCAST_SEND_ERROR  0x4
 85 #define NETLINK_F_RECV_NO_ENOBUFS       0x8
 86 #define NETLINK_F_LISTEN_ALL_NSID       0x10
 87 #define NETLINK_F_CAP_ACK               0x20
 88 
 89 static inline int netlink_is_kernel(struct sock *sk)
 90 {
 91         return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
 92 }
 93 
 94 struct netlink_table *nl_table __read_mostly;
 95 EXPORT_SYMBOL_GPL(nl_table);
 96 
 97 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
 98 
 99 static int netlink_dump(struct sock *sk);
100 static void netlink_skb_destructor(struct sk_buff *skb);
101 
102 /* nl_table locking explained:
103  * Lookup and traversal are protected with an RCU read-side lock. Insertion
104  * and removal are protected with per bucket lock while using RCU list
105  * modification primitives and may run in parallel to RCU protected lookups.
106  * Destruction of the Netlink socket may only occur *after* nl_table_lock has
107  * been acquired * either during or after the socket has been removed from
108  * the list and after an RCU grace period.
109  */
110 DEFINE_RWLOCK(nl_table_lock);
111 EXPORT_SYMBOL_GPL(nl_table_lock);
112 static atomic_t nl_table_users = ATOMIC_INIT(0);
113 
114 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
115 
116 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
117 
118 static DEFINE_SPINLOCK(netlink_tap_lock);
119 static struct list_head netlink_tap_all __read_mostly;
120 
121 static const struct rhashtable_params netlink_rhashtable_params;
122 
123 static inline u32 netlink_group_mask(u32 group)
124 {
125         return group ? 1 << (group - 1) : 0;
126 }
127 
128 static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
129                                            gfp_t gfp_mask)
130 {
131         unsigned int len = skb_end_offset(skb);
132         struct sk_buff *new;
133 
134         new = alloc_skb(len, gfp_mask);
135         if (new == NULL)
136                 return NULL;
137 
138         NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
139         NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
140         NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
141 
142         memcpy(skb_put(new, len), skb->data, len);
143         return new;
144 }
145 
146 int netlink_add_tap(struct netlink_tap *nt)
147 {
148         if (unlikely(nt->dev->type != ARPHRD_NETLINK))
149                 return -EINVAL;
150 
151         spin_lock(&netlink_tap_lock);
152         list_add_rcu(&nt->list, &netlink_tap_all);
153         spin_unlock(&netlink_tap_lock);
154 
155         __module_get(nt->module);
156 
157         return 0;
158 }
159 EXPORT_SYMBOL_GPL(netlink_add_tap);
160 
161 static int __netlink_remove_tap(struct netlink_tap *nt)
162 {
163         bool found = false;
164         struct netlink_tap *tmp;
165 
166         spin_lock(&netlink_tap_lock);
167 
168         list_for_each_entry(tmp, &netlink_tap_all, list) {
169                 if (nt == tmp) {
170                         list_del_rcu(&nt->list);
171                         found = true;
172                         goto out;
173                 }
174         }
175 
176         pr_warn("__netlink_remove_tap: %p not found\n", nt);
177 out:
178         spin_unlock(&netlink_tap_lock);
179 
180         if (found)
181                 module_put(nt->module);
182 
183         return found ? 0 : -ENODEV;
184 }
185 
186 int netlink_remove_tap(struct netlink_tap *nt)
187 {
188         int ret;
189 
190         ret = __netlink_remove_tap(nt);
191         synchronize_net();
192 
193         return ret;
194 }
195 EXPORT_SYMBOL_GPL(netlink_remove_tap);
196 
197 static bool netlink_filter_tap(const struct sk_buff *skb)
198 {
199         struct sock *sk = skb->sk;
200 
201         /* We take the more conservative approach and
202          * whitelist socket protocols that may pass.
203          */
204         switch (sk->sk_protocol) {
205         case NETLINK_ROUTE:
206         case NETLINK_USERSOCK:
207         case NETLINK_SOCK_DIAG:
208         case NETLINK_NFLOG:
209         case NETLINK_XFRM:
210         case NETLINK_FIB_LOOKUP:
211         case NETLINK_NETFILTER:
212         case NETLINK_GENERIC:
213                 return true;
214         }
215 
216         return false;
217 }
218 
219 static int __netlink_deliver_tap_skb(struct sk_buff *skb,
220                                      struct net_device *dev)
221 {
222         struct sk_buff *nskb;
223         struct sock *sk = skb->sk;
224         int ret = -ENOMEM;
225 
226         dev_hold(dev);
227 
228         if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head))
229                 nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
230         else
231                 nskb = skb_clone(skb, GFP_ATOMIC);
232         if (nskb) {
233                 nskb->dev = dev;
234                 nskb->protocol = htons((u16) sk->sk_protocol);
235                 nskb->pkt_type = netlink_is_kernel(sk) ?
236                                  PACKET_KERNEL : PACKET_USER;
237                 skb_reset_network_header(nskb);
238                 ret = dev_queue_xmit(nskb);
239                 if (unlikely(ret > 0))
240                         ret = net_xmit_errno(ret);
241         }
242 
243         dev_put(dev);
244         return ret;
245 }
246 
247 static void __netlink_deliver_tap(struct sk_buff *skb)
248 {
249         int ret;
250         struct netlink_tap *tmp;
251 
252         if (!netlink_filter_tap(skb))
253                 return;
254 
255         list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
256                 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
257                 if (unlikely(ret))
258                         break;
259         }
260 }
261 
262 static void netlink_deliver_tap(struct sk_buff *skb)
263 {
264         rcu_read_lock();
265 
266         if (unlikely(!list_empty(&netlink_tap_all)))
267                 __netlink_deliver_tap(skb);
268 
269         rcu_read_unlock();
270 }
271 
272 static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
273                                        struct sk_buff *skb)
274 {
275         if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
276                 netlink_deliver_tap(skb);
277 }
278 
279 static void netlink_overrun(struct sock *sk)
280 {
281         struct netlink_sock *nlk = nlk_sk(sk);
282 
283         if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
284                 if (!test_and_set_bit(NETLINK_S_CONGESTED,
285                                       &nlk_sk(sk)->state)) {
286                         sk->sk_err = ENOBUFS;
287                         sk->sk_error_report(sk);
288                 }
289         }
290         atomic_inc(&sk->sk_drops);
291 }
292 
293 static void netlink_rcv_wake(struct sock *sk)
294 {
295         struct netlink_sock *nlk = nlk_sk(sk);
296 
297         if (skb_queue_empty(&sk->sk_receive_queue))
298                 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
299         if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
300                 wake_up_interruptible(&nlk->wait);
301 }
302 
303 #ifdef CONFIG_NETLINK_MMAP
304 static bool netlink_rx_is_mmaped(struct sock *sk)
305 {
306         return nlk_sk(sk)->rx_ring.pg_vec != NULL;
307 }
308 
309 static bool netlink_tx_is_mmaped(struct sock *sk)
310 {
311         return nlk_sk(sk)->tx_ring.pg_vec != NULL;
312 }
313 
314 static __pure struct page *pgvec_to_page(const void *addr)
315 {
316         if (is_vmalloc_addr(addr))
317                 return vmalloc_to_page(addr);
318         else
319                 return virt_to_page(addr);
320 }
321 
322 static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
323 {
324         unsigned int i;
325 
326         for (i = 0; i < len; i++) {
327                 if (pg_vec[i] != NULL) {
328                         if (is_vmalloc_addr(pg_vec[i]))
329                                 vfree(pg_vec[i]);
330                         else
331                                 free_pages((unsigned long)pg_vec[i], order);
332                 }
333         }
334         kfree(pg_vec);
335 }
336 
337 static void *alloc_one_pg_vec_page(unsigned long order)
338 {
339         void *buffer;
340         gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
341                           __GFP_NOWARN | __GFP_NORETRY;
342 
343         buffer = (void *)__get_free_pages(gfp_flags, order);
344         if (buffer != NULL)
345                 return buffer;
346 
347         buffer = vzalloc((1 << order) * PAGE_SIZE);
348         if (buffer != NULL)
349                 return buffer;
350 
351         gfp_flags &= ~__GFP_NORETRY;
352         return (void *)__get_free_pages(gfp_flags, order);
353 }
354 
355 static void **alloc_pg_vec(struct netlink_sock *nlk,
356                            struct nl_mmap_req *req, unsigned int order)
357 {
358         unsigned int block_nr = req->nm_block_nr;
359         unsigned int i;
360         void **pg_vec;
361 
362         pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
363         if (pg_vec == NULL)
364                 return NULL;
365 
366         for (i = 0; i < block_nr; i++) {
367                 pg_vec[i] = alloc_one_pg_vec_page(order);
368                 if (pg_vec[i] == NULL)
369                         goto err1;
370         }
371 
372         return pg_vec;
373 err1:
374         free_pg_vec(pg_vec, order, block_nr);
375         return NULL;
376 }
377 
378 
379 static void
380 __netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
381                    unsigned int order)
382 {
383         struct netlink_sock *nlk = nlk_sk(sk);
384         struct sk_buff_head *queue;
385         struct netlink_ring *ring;
386 
387         queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
388         ring  = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
389 
390         spin_lock_bh(&queue->lock);
391 
392         ring->frame_max         = req->nm_frame_nr - 1;
393         ring->head              = 0;
394         ring->frame_size        = req->nm_frame_size;
395         ring->pg_vec_pages      = req->nm_block_size / PAGE_SIZE;
396 
397         swap(ring->pg_vec_len, req->nm_block_nr);
398         swap(ring->pg_vec_order, order);
399         swap(ring->pg_vec, pg_vec);
400 
401         __skb_queue_purge(queue);
402         spin_unlock_bh(&queue->lock);
403 
404         WARN_ON(atomic_read(&nlk->mapped));
405 
406         if (pg_vec)
407                 free_pg_vec(pg_vec, order, req->nm_block_nr);
408 }
409 
410 static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
411                             bool tx_ring)
412 {
413         struct netlink_sock *nlk = nlk_sk(sk);
414         struct netlink_ring *ring;
415         void **pg_vec = NULL;
416         unsigned int order = 0;
417 
418         ring  = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
419 
420         if (atomic_read(&nlk->mapped))
421                 return -EBUSY;
422         if (atomic_read(&ring->pending))
423                 return -EBUSY;
424 
425         if (req->nm_block_nr) {
426                 if (ring->pg_vec != NULL)
427                         return -EBUSY;
428 
429                 if ((int)req->nm_block_size <= 0)
430                         return -EINVAL;
431                 if (!PAGE_ALIGNED(req->nm_block_size))
432                         return -EINVAL;
433                 if (req->nm_frame_size < NL_MMAP_HDRLEN)
434                         return -EINVAL;
435                 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
436                         return -EINVAL;
437 
438                 ring->frames_per_block = req->nm_block_size /
439                                          req->nm_frame_size;
440                 if (ring->frames_per_block == 0)
441                         return -EINVAL;
442                 if (ring->frames_per_block * req->nm_block_nr !=
443                     req->nm_frame_nr)
444                         return -EINVAL;
445 
446                 order = get_order(req->nm_block_size);
447                 pg_vec = alloc_pg_vec(nlk, req, order);
448                 if (pg_vec == NULL)
449                         return -ENOMEM;
450         } else {
451                 if (req->nm_frame_nr)
452                         return -EINVAL;
453         }
454 
455         mutex_lock(&nlk->pg_vec_lock);
456         if (atomic_read(&nlk->mapped) == 0) {
457                 __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
458                 mutex_unlock(&nlk->pg_vec_lock);
459                 return 0;
460         }
461 
462         mutex_unlock(&nlk->pg_vec_lock);
463 
464         if (pg_vec)
465                 free_pg_vec(pg_vec, order, req->nm_block_nr);
466 
467         return -EBUSY;
468 }
469 
470 static void netlink_mm_open(struct vm_area_struct *vma)
471 {
472         struct file *file = vma->vm_file;
473         struct socket *sock = file->private_data;
474         struct sock *sk = sock->sk;
475 
476         if (sk)
477                 atomic_inc(&nlk_sk(sk)->mapped);
478 }
479 
480 static void netlink_mm_close(struct vm_area_struct *vma)
481 {
482         struct file *file = vma->vm_file;
483         struct socket *sock = file->private_data;
484         struct sock *sk = sock->sk;
485 
486         if (sk)
487                 atomic_dec(&nlk_sk(sk)->mapped);
488 }
489 
490 static const struct vm_operations_struct netlink_mmap_ops = {
491         .open   = netlink_mm_open,
492         .close  = netlink_mm_close,
493 };
494 
495 static int netlink_mmap(struct file *file, struct socket *sock,
496                         struct vm_area_struct *vma)
497 {
498         struct sock *sk = sock->sk;
499         struct netlink_sock *nlk = nlk_sk(sk);
500         struct netlink_ring *ring;
501         unsigned long start, size, expected;
502         unsigned int i;
503         int err = -EINVAL;
504 
505         if (vma->vm_pgoff)
506                 return -EINVAL;
507 
508         mutex_lock(&nlk->pg_vec_lock);
509 
510         expected = 0;
511         for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
512                 if (ring->pg_vec == NULL)
513                         continue;
514                 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
515         }
516 
517         if (expected == 0)
518                 goto out;
519 
520         size = vma->vm_end - vma->vm_start;
521         if (size != expected)
522                 goto out;
523 
524         start = vma->vm_start;
525         for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
526                 if (ring->pg_vec == NULL)
527                         continue;
528 
529                 for (i = 0; i < ring->pg_vec_len; i++) {
530                         struct page *page;
531                         void *kaddr = ring->pg_vec[i];
532                         unsigned int pg_num;
533 
534                         for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
535                                 page = pgvec_to_page(kaddr);
536                                 err = vm_insert_page(vma, start, page);
537                                 if (err < 0)
538                                         goto out;
539                                 start += PAGE_SIZE;
540                                 kaddr += PAGE_SIZE;
541                         }
542                 }
543         }
544 
545         atomic_inc(&nlk->mapped);
546         vma->vm_ops = &netlink_mmap_ops;
547         err = 0;
548 out:
549         mutex_unlock(&nlk->pg_vec_lock);
550         return err;
551 }
552 
553 static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
554 {
555 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
556         struct page *p_start, *p_end;
557 
558         /* First page is flushed through netlink_{get,set}_status */
559         p_start = pgvec_to_page(hdr + PAGE_SIZE);
560         p_end   = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
561         while (p_start <= p_end) {
562                 flush_dcache_page(p_start);
563                 p_start++;
564         }
565 #endif
566 }
567 
568 static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
569 {
570         smp_rmb();
571         flush_dcache_page(pgvec_to_page(hdr));
572         return hdr->nm_status;
573 }
574 
575 static void netlink_set_status(struct nl_mmap_hdr *hdr,
576                                enum nl_mmap_status status)
577 {
578         smp_mb();
579         hdr->nm_status = status;
580         flush_dcache_page(pgvec_to_page(hdr));
581 }
582 
583 static struct nl_mmap_hdr *
584 __netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
585 {
586         unsigned int pg_vec_pos, frame_off;
587 
588         pg_vec_pos = pos / ring->frames_per_block;
589         frame_off  = pos % ring->frames_per_block;
590 
591         return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
592 }
593 
594 static struct nl_mmap_hdr *
595 netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
596                      enum nl_mmap_status status)
597 {
598         struct nl_mmap_hdr *hdr;
599 
600         hdr = __netlink_lookup_frame(ring, pos);
601         if (netlink_get_status(hdr) != status)
602                 return NULL;
603 
604         return hdr;
605 }
606 
607 static struct nl_mmap_hdr *
608 netlink_current_frame(const struct netlink_ring *ring,
609                       enum nl_mmap_status status)
610 {
611         return netlink_lookup_frame(ring, ring->head, status);
612 }
613 
614 static void netlink_increment_head(struct netlink_ring *ring)
615 {
616         ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
617 }
618 
619 static void netlink_forward_ring(struct netlink_ring *ring)
620 {
621         unsigned int head = ring->head;
622         const struct nl_mmap_hdr *hdr;
623 
624         do {
625                 hdr = __netlink_lookup_frame(ring, ring->head);
626                 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
627                         break;
628                 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
629                         break;
630                 netlink_increment_head(ring);
631         } while (ring->head != head);
632 }
633 
634 static bool netlink_has_valid_frame(struct netlink_ring *ring)
635 {
636         unsigned int head = ring->head, pos = head;
637         const struct nl_mmap_hdr *hdr;
638 
639         do {
640                 hdr = __netlink_lookup_frame(ring, pos);
641                 if (hdr->nm_status == NL_MMAP_STATUS_VALID)
642                         return true;
643                 pos = pos != 0 ? pos - 1 : ring->frame_max;
644         } while (pos != head);
645 
646         return false;
647 }
648 
649 static bool netlink_dump_space(struct netlink_sock *nlk)
650 {
651         struct netlink_ring *ring = &nlk->rx_ring;
652         struct nl_mmap_hdr *hdr;
653         unsigned int n;
654 
655         hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
656         if (hdr == NULL)
657                 return false;
658 
659         n = ring->head + ring->frame_max / 2;
660         if (n > ring->frame_max)
661                 n -= ring->frame_max;
662 
663         hdr = __netlink_lookup_frame(ring, n);
664 
665         return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
666 }
667 
668 static unsigned int netlink_poll(struct file *file, struct socket *sock,
669                                  poll_table *wait)
670 {
671         struct sock *sk = sock->sk;
672         struct netlink_sock *nlk = nlk_sk(sk);
673         unsigned int mask;
674         int err;
675 
676         if (nlk->rx_ring.pg_vec != NULL) {
677                 /* Memory mapped sockets don't call recvmsg(), so flow control
678                  * for dumps is performed here. A dump is allowed to continue
679                  * if at least half the ring is unused.
680                  */
681                 while (nlk->cb_running && netlink_dump_space(nlk)) {
682                         err = netlink_dump(sk);
683                         if (err < 0) {
684                                 sk->sk_err = -err;
685                                 sk->sk_error_report(sk);
686                                 break;
687                         }
688                 }
689                 netlink_rcv_wake(sk);
690         }
691 
692         mask = datagram_poll(file, sock, wait);
693 
694         /* We could already have received frames in the normal receive
695          * queue, that will show up as NL_MMAP_STATUS_COPY in the ring,
696          * so if mask contains pollin/etc already, there's no point
697          * walking the ring.
698          */
699         if ((mask & (POLLIN | POLLRDNORM)) != (POLLIN | POLLRDNORM)) {
700                 spin_lock_bh(&sk->sk_receive_queue.lock);
701                 if (nlk->rx_ring.pg_vec) {
702                         if (netlink_has_valid_frame(&nlk->rx_ring))
703                                 mask |= POLLIN | POLLRDNORM;
704                 }
705                 spin_unlock_bh(&sk->sk_receive_queue.lock);
706         }
707 
708         spin_lock_bh(&sk->sk_write_queue.lock);
709         if (nlk->tx_ring.pg_vec) {
710                 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
711                         mask |= POLLOUT | POLLWRNORM;
712         }
713         spin_unlock_bh(&sk->sk_write_queue.lock);
714 
715         return mask;
716 }
717 
718 static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
719 {
720         return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
721 }
722 
723 static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
724                                    struct netlink_ring *ring,
725                                    struct nl_mmap_hdr *hdr)
726 {
727         unsigned int size;
728         void *data;
729 
730         size = ring->frame_size - NL_MMAP_HDRLEN;
731         data = (void *)hdr + NL_MMAP_HDRLEN;
732 
733         skb->head       = data;
734         skb->data       = data;
735         skb_reset_tail_pointer(skb);
736         skb->end        = skb->tail + size;
737         skb->len        = 0;
738 
739         skb->destructor = netlink_skb_destructor;
740         NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
741         NETLINK_CB(skb).sk = sk;
742 }
743 
744 static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
745                                 u32 dst_portid, u32 dst_group,
746                                 struct scm_cookie *scm)
747 {
748         struct netlink_sock *nlk = nlk_sk(sk);
749         struct netlink_ring *ring;
750         struct nl_mmap_hdr *hdr;
751         struct sk_buff *skb;
752         unsigned int maxlen;
753         int err = 0, len = 0;
754 
755         mutex_lock(&nlk->pg_vec_lock);
756 
757         ring   = &nlk->tx_ring;
758         maxlen = ring->frame_size - NL_MMAP_HDRLEN;
759 
760         do {
761                 unsigned int nm_len;
762 
763                 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
764                 if (hdr == NULL) {
765                         if (!(msg->msg_flags & MSG_DONTWAIT) &&
766                             atomic_read(&nlk->tx_ring.pending))
767                                 schedule();
768                         continue;
769                 }
770 
771                 nm_len = ACCESS_ONCE(hdr->nm_len);
772                 if (nm_len > maxlen) {
773                         err = -EINVAL;
774                         goto out;
775                 }
776 
777                 netlink_frame_flush_dcache(hdr, nm_len);
778 
779                 skb = alloc_skb(nm_len, GFP_KERNEL);
780                 if (skb == NULL) {
781                         err = -ENOBUFS;
782                         goto out;
783                 }
784                 __skb_put(skb, nm_len);
785                 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
786                 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
787 
788                 netlink_increment_head(ring);
789 
790                 NETLINK_CB(skb).portid    = nlk->portid;
791                 NETLINK_CB(skb).dst_group = dst_group;
792                 NETLINK_CB(skb).creds     = scm->creds;
793 
794                 err = security_netlink_send(sk, skb);
795                 if (err) {
796                         kfree_skb(skb);
797                         goto out;
798                 }
799 
800                 if (unlikely(dst_group)) {
801                         atomic_inc(&skb->users);
802                         netlink_broadcast(sk, skb, dst_portid, dst_group,
803                                           GFP_KERNEL);
804                 }
805                 err = netlink_unicast(sk, skb, dst_portid,
806                                       msg->msg_flags & MSG_DONTWAIT);
807                 if (err < 0)
808                         goto out;
809                 len += err;
810 
811         } while (hdr != NULL ||
812                  (!(msg->msg_flags & MSG_DONTWAIT) &&
813                   atomic_read(&nlk->tx_ring.pending)));
814 
815         if (len > 0)
816                 err = len;
817 out:
818         mutex_unlock(&nlk->pg_vec_lock);
819         return err;
820 }
821 
822 static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
823 {
824         struct nl_mmap_hdr *hdr;
825 
826         hdr = netlink_mmap_hdr(skb);
827         hdr->nm_len     = skb->len;
828         hdr->nm_group   = NETLINK_CB(skb).dst_group;
829         hdr->nm_pid     = NETLINK_CB(skb).creds.pid;
830         hdr->nm_uid     = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
831         hdr->nm_gid     = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
832         netlink_frame_flush_dcache(hdr, hdr->nm_len);
833         netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
834 
835         NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
836         kfree_skb(skb);
837 }
838 
839 static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
840 {
841         struct netlink_sock *nlk = nlk_sk(sk);
842         struct netlink_ring *ring = &nlk->rx_ring;
843         struct nl_mmap_hdr *hdr;
844 
845         spin_lock_bh(&sk->sk_receive_queue.lock);
846         hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
847         if (hdr == NULL) {
848                 spin_unlock_bh(&sk->sk_receive_queue.lock);
849                 kfree_skb(skb);
850                 netlink_overrun(sk);
851                 return;
852         }
853         netlink_increment_head(ring);
854         __skb_queue_tail(&sk->sk_receive_queue, skb);
855         spin_unlock_bh(&sk->sk_receive_queue.lock);
856 
857         hdr->nm_len     = skb->len;
858         hdr->nm_group   = NETLINK_CB(skb).dst_group;
859         hdr->nm_pid     = NETLINK_CB(skb).creds.pid;
860         hdr->nm_uid     = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
861         hdr->nm_gid     = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
862         netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
863 }
864 
865 #else /* CONFIG_NETLINK_MMAP */
866 #define netlink_rx_is_mmaped(sk)        false
867 #define netlink_tx_is_mmaped(sk)        false
868 #define netlink_mmap                    sock_no_mmap
869 #define netlink_poll                    datagram_poll
870 #define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, scm)       0
871 #endif /* CONFIG_NETLINK_MMAP */
872 
873 static void netlink_skb_destructor(struct sk_buff *skb)
874 {
875 #ifdef CONFIG_NETLINK_MMAP
876         struct nl_mmap_hdr *hdr;
877         struct netlink_ring *ring;
878         struct sock *sk;
879 
880         /* If a packet from the kernel to userspace was freed because of an
881          * error without being delivered to userspace, the kernel must reset
882          * the status. In the direction userspace to kernel, the status is
883          * always reset here after the packet was processed and freed.
884          */
885         if (netlink_skb_is_mmaped(skb)) {
886                 hdr = netlink_mmap_hdr(skb);
887                 sk = NETLINK_CB(skb).sk;
888 
889                 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
890                         netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
891                         ring = &nlk_sk(sk)->tx_ring;
892                 } else {
893                         if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
894                                 hdr->nm_len = 0;
895                                 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
896                         }
897                         ring = &nlk_sk(sk)->rx_ring;
898                 }
899 
900                 WARN_ON(atomic_read(&ring->pending) == 0);
901                 atomic_dec(&ring->pending);
902                 sock_put(sk);
903 
904                 skb->head = NULL;
905         }
906 #endif
907         if (is_vmalloc_addr(skb->head)) {
908                 if (!skb->cloned ||
909                     !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
910                         vfree(skb->head);
911 
912                 skb->head = NULL;
913         }
914         if (skb->sk != NULL)
915                 sock_rfree(skb);
916 }
917 
918 static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
919 {
920         WARN_ON(skb->sk != NULL);
921         skb->sk = sk;
922         skb->destructor = netlink_skb_destructor;
923         atomic_add(skb->truesize, &sk->sk_rmem_alloc);
924         sk_mem_charge(sk, skb->truesize);
925 }
926 
927 static void netlink_sock_destruct(struct sock *sk)
928 {
929         struct netlink_sock *nlk = nlk_sk(sk);
930 
931         if (nlk->cb_running) {
932                 if (nlk->cb.done)
933                         nlk->cb.done(&nlk->cb);
934 
935                 module_put(nlk->cb.module);
936                 kfree_skb(nlk->cb.skb);
937         }
938 
939         skb_queue_purge(&sk->sk_receive_queue);
940 #ifdef CONFIG_NETLINK_MMAP
941         if (1) {
942                 struct nl_mmap_req req;
943 
944                 memset(&req, 0, sizeof(req));
945                 if (nlk->rx_ring.pg_vec)
946                         __netlink_set_ring(sk, &req, false, NULL, 0);
947                 memset(&req, 0, sizeof(req));
948                 if (nlk->tx_ring.pg_vec)
949                         __netlink_set_ring(sk, &req, true, NULL, 0);
950         }
951 #endif /* CONFIG_NETLINK_MMAP */
952 
953         if (!sock_flag(sk, SOCK_DEAD)) {
954                 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
955                 return;
956         }
957 
958         WARN_ON(atomic_read(&sk->sk_rmem_alloc));
959         WARN_ON(atomic_read(&sk->sk_wmem_alloc));
960         WARN_ON(nlk_sk(sk)->groups);
961 }
962 
963 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
964  * SMP. Look, when several writers sleep and reader wakes them up, all but one
965  * immediately hit write lock and grab all the cpus. Exclusive sleep solves
966  * this, _but_ remember, it adds useless work on UP machines.
967  */
968 
969 void netlink_table_grab(void)
970         __acquires(nl_table_lock)
971 {
972         might_sleep();
973 
974         write_lock_irq(&nl_table_lock);
975 
976         if (atomic_read(&nl_table_users)) {
977                 DECLARE_WAITQUEUE(wait, current);
978 
979                 add_wait_queue_exclusive(&nl_table_wait, &wait);
980                 for (;;) {
981                         set_current_state(TASK_UNINTERRUPTIBLE);
982                         if (atomic_read(&nl_table_users) == 0)
983                                 break;
984                         write_unlock_irq(&nl_table_lock);
985                         schedule();
986                         write_lock_irq(&nl_table_lock);
987                 }
988 
989                 __set_current_state(TASK_RUNNING);
990                 remove_wait_queue(&nl_table_wait, &wait);
991         }
992 }
993 
994 void netlink_table_ungrab(void)
995         __releases(nl_table_lock)
996 {
997         write_unlock_irq(&nl_table_lock);
998         wake_up(&nl_table_wait);
999 }
1000 
1001 static inline void
1002 netlink_lock_table(void)
1003 {
1004         /* read_lock() synchronizes us to netlink_table_grab */
1005 
1006         read_lock(&nl_table_lock);
1007         atomic_inc(&nl_table_users);
1008         read_unlock(&nl_table_lock);
1009 }
1010 
1011 static inline void
1012 netlink_unlock_table(void)
1013 {
1014         if (atomic_dec_and_test(&nl_table_users))
1015                 wake_up(&nl_table_wait);
1016 }
1017 
1018 struct netlink_compare_arg
1019 {
1020         possible_net_t pnet;
1021         u32 portid;
1022 };
1023 
1024 /* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
1025 #define netlink_compare_arg_len \
1026         (offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
1027 
1028 static inline int netlink_compare(struct rhashtable_compare_arg *arg,
1029                                   const void *ptr)
1030 {
1031         const struct netlink_compare_arg *x = arg->key;
1032         const struct netlink_sock *nlk = ptr;
1033 
1034         return nlk->portid != x->portid ||
1035                !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
1036 }
1037 
1038 static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
1039                                      struct net *net, u32 portid)
1040 {
1041         memset(arg, 0, sizeof(*arg));
1042         write_pnet(&arg->pnet, net);
1043         arg->portid = portid;
1044 }
1045 
1046 static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
1047                                      struct net *net)
1048 {
1049         struct netlink_compare_arg arg;
1050 
1051         netlink_compare_arg_init(&arg, net, portid);
1052         return rhashtable_lookup_fast(&table->hash, &arg,
1053                                       netlink_rhashtable_params);
1054 }
1055 
1056 static int __netlink_insert(struct netlink_table *table, struct sock *sk)
1057 {
1058         struct netlink_compare_arg arg;
1059 
1060         netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
1061         return rhashtable_lookup_insert_key(&table->hash, &arg,
1062                                             &nlk_sk(sk)->node,
1063                                             netlink_rhashtable_params);
1064 }
1065 
1066 static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
1067 {
1068         struct netlink_table *table = &nl_table[protocol];
1069         struct sock *sk;
1070 
1071         rcu_read_lock();
1072         sk = __netlink_lookup(table, portid, net);
1073         if (sk)
1074                 sock_hold(sk);
1075         rcu_read_unlock();
1076 
1077         return sk;
1078 }
1079 
1080 static const struct proto_ops netlink_ops;
1081 
1082 static void
1083 netlink_update_listeners(struct sock *sk)
1084 {
1085         struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1086         unsigned long mask;
1087         unsigned int i;
1088         struct listeners *listeners;
1089 
1090         listeners = nl_deref_protected(tbl->listeners);
1091         if (!listeners)
1092                 return;
1093 
1094         for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
1095                 mask = 0;
1096                 sk_for_each_bound(sk, &tbl->mc_list) {
1097                         if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1098                                 mask |= nlk_sk(sk)->groups[i];
1099                 }
1100                 listeners->masks[i] = mask;
1101         }
1102         /* this function is only called with the netlink table "grabbed", which
1103          * makes sure updates are visible before bind or setsockopt return. */
1104 }
1105 
1106 static int netlink_insert(struct sock *sk, u32 portid)
1107 {
1108         struct netlink_table *table = &nl_table[sk->sk_protocol];
1109         int err;
1110 
1111         lock_sock(sk);
1112 
1113         err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY;
1114         if (nlk_sk(sk)->bound)
1115                 goto err;
1116 
1117         err = -ENOMEM;
1118         if (BITS_PER_LONG > 32 &&
1119             unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
1120                 goto err;
1121 
1122         nlk_sk(sk)->portid = portid;
1123         sock_hold(sk);
1124 
1125         err = __netlink_insert(table, sk);
1126         if (err) {
1127                 /* In case the hashtable backend returns with -EBUSY
1128                  * from here, it must not escape to the caller.
1129                  */
1130                 if (unlikely(err == -EBUSY))
1131                         err = -EOVERFLOW;
1132                 if (err == -EEXIST)
1133                         err = -EADDRINUSE;
1134                 sock_put(sk);
1135                 goto err;
1136         }
1137 
1138         /* We need to ensure that the socket is hashed and visible. */
1139         smp_wmb();
1140         nlk_sk(sk)->bound = portid;
1141 
1142 err:
1143         release_sock(sk);
1144         return err;
1145 }
1146 
1147 static void netlink_remove(struct sock *sk)
1148 {
1149         struct netlink_table *table;
1150 
1151         table = &nl_table[sk->sk_protocol];
1152         if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
1153                                     netlink_rhashtable_params)) {
1154                 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
1155                 __sock_put(sk);
1156         }
1157 
1158         netlink_table_grab();
1159         if (nlk_sk(sk)->subscriptions) {
1160                 __sk_del_bind_node(sk);
1161                 netlink_update_listeners(sk);
1162         }
1163         if (sk->sk_protocol == NETLINK_GENERIC)
1164                 atomic_inc(&genl_sk_destructing_cnt);
1165         netlink_table_ungrab();
1166 }
1167 
1168 static struct proto netlink_proto = {
1169         .name     = "NETLINK",
1170         .owner    = THIS_MODULE,
1171         .obj_size = sizeof(struct netlink_sock),
1172 };
1173 
1174 static int __netlink_create(struct net *net, struct socket *sock,
1175                             struct mutex *cb_mutex, int protocol,
1176                             int kern)
1177 {
1178         struct sock *sk;
1179         struct netlink_sock *nlk;
1180 
1181         sock->ops = &netlink_ops;
1182 
1183         sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern);
1184         if (!sk)
1185                 return -ENOMEM;
1186 
1187         sock_init_data(sock, sk);
1188 
1189         nlk = nlk_sk(sk);
1190         if (cb_mutex) {
1191                 nlk->cb_mutex = cb_mutex;
1192         } else {
1193                 nlk->cb_mutex = &nlk->cb_def_mutex;
1194                 mutex_init(nlk->cb_mutex);
1195         }
1196         init_waitqueue_head(&nlk->wait);
1197 #ifdef CONFIG_NETLINK_MMAP
1198         mutex_init(&nlk->pg_vec_lock);
1199 #endif
1200 
1201         sk->sk_destruct = netlink_sock_destruct;
1202         sk->sk_protocol = protocol;
1203         return 0;
1204 }
1205 
1206 static int netlink_create(struct net *net, struct socket *sock, int protocol,
1207                           int kern)
1208 {
1209         struct module *module = NULL;
1210         struct mutex *cb_mutex;
1211         struct netlink_sock *nlk;
1212         int (*bind)(struct net *net, int group);
1213         void (*unbind)(struct net *net, int group);
1214         int err = 0;
1215 
1216         sock->state = SS_UNCONNECTED;
1217 
1218         if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1219                 return -ESOCKTNOSUPPORT;
1220 
1221         if (protocol < 0 || protocol >= MAX_LINKS)
1222                 return -EPROTONOSUPPORT;
1223 
1224         netlink_lock_table();
1225 #ifdef CONFIG_MODULES
1226         if (!nl_table[protocol].registered) {
1227                 netlink_unlock_table();
1228                 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
1229                 netlink_lock_table();
1230         }
1231 #endif
1232         if (nl_table[protocol].registered &&
1233             try_module_get(nl_table[protocol].module))
1234                 module = nl_table[protocol].module;
1235         else
1236                 err = -EPROTONOSUPPORT;
1237         cb_mutex = nl_table[protocol].cb_mutex;
1238         bind = nl_table[protocol].bind;
1239         unbind = nl_table[protocol].unbind;
1240         netlink_unlock_table();
1241 
1242         if (err < 0)
1243                 goto out;
1244 
1245         err = __netlink_create(net, sock, cb_mutex, protocol, kern);
1246         if (err < 0)
1247                 goto out_module;
1248 
1249         local_bh_disable();
1250         sock_prot_inuse_add(net, &netlink_proto, 1);
1251         local_bh_enable();
1252 
1253         nlk = nlk_sk(sock->sk);
1254         nlk->module = module;
1255         nlk->netlink_bind = bind;
1256         nlk->netlink_unbind = unbind;
1257 out:
1258         return err;
1259 
1260 out_module:
1261         module_put(module);
1262         goto out;
1263 }
1264 
1265 static void deferred_put_nlk_sk(struct rcu_head *head)
1266 {
1267         struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
1268 
1269         sock_put(&nlk->sk);
1270 }
1271 
1272 static int netlink_release(struct socket *sock)
1273 {
1274         struct sock *sk = sock->sk;
1275         struct netlink_sock *nlk;
1276 
1277         if (!sk)
1278                 return 0;
1279 
1280         netlink_remove(sk);
1281         sock_orphan(sk);
1282         nlk = nlk_sk(sk);
1283 
1284         /*
1285          * OK. Socket is unlinked, any packets that arrive now
1286          * will be purged.
1287          */
1288 
1289         /* must not acquire netlink_table_lock in any way again before unbind
1290          * and notifying genetlink is done as otherwise it might deadlock
1291          */
1292         if (nlk->netlink_unbind) {
1293                 int i;
1294 
1295                 for (i = 0; i < nlk->ngroups; i++)
1296                         if (test_bit(i, nlk->groups))
1297                                 nlk->netlink_unbind(sock_net(sk), i + 1);
1298         }
1299         if (sk->sk_protocol == NETLINK_GENERIC &&
1300             atomic_dec_return(&genl_sk_destructing_cnt) == 0)
1301                 wake_up(&genl_sk_destructing_waitq);
1302 
1303         sock->sk = NULL;
1304         wake_up_interruptible_all(&nlk->wait);
1305 
1306         skb_queue_purge(&sk->sk_write_queue);
1307 
1308         if (nlk->portid && nlk->bound) {
1309                 struct netlink_notify n = {
1310                                                 .net = sock_net(sk),
1311                                                 .protocol = sk->sk_protocol,
1312                                                 .portid = nlk->portid,
1313                                           };
1314                 atomic_notifier_call_chain(&netlink_chain,
1315                                 NETLINK_URELEASE, &n);
1316         }
1317 
1318         module_put(nlk->module);
1319 
1320         if (netlink_is_kernel(sk)) {
1321                 netlink_table_grab();
1322                 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1323                 if (--nl_table[sk->sk_protocol].registered == 0) {
1324                         struct listeners *old;
1325 
1326                         old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1327                         RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1328                         kfree_rcu(old, rcu);
1329                         nl_table[sk->sk_protocol].module = NULL;
1330                         nl_table[sk->sk_protocol].bind = NULL;
1331                         nl_table[sk->sk_protocol].unbind = NULL;
1332                         nl_table[sk->sk_protocol].flags = 0;
1333                         nl_table[sk->sk_protocol].registered = 0;
1334                 }
1335                 netlink_table_ungrab();
1336         }
1337 
1338         kfree(nlk->groups);
1339         nlk->groups = NULL;
1340 
1341         local_bh_disable();
1342         sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
1343         local_bh_enable();
1344         call_rcu(&nlk->rcu, deferred_put_nlk_sk);
1345         return 0;
1346 }
1347 
1348 static int netlink_autobind(struct socket *sock)
1349 {
1350         struct sock *sk = sock->sk;
1351         struct net *net = sock_net(sk);
1352         struct netlink_table *table = &nl_table[sk->sk_protocol];
1353         s32 portid = task_tgid_vnr(current);
1354         int err;
1355         s32 rover = -4096;
1356         bool ok;
1357 
1358 retry:
1359         cond_resched();
1360         rcu_read_lock();
1361         ok = !__netlink_lookup(table, portid, net);
1362         rcu_read_unlock();
1363         if (!ok) {
1364                 /* Bind collision, search negative portid values. */
1365                 if (rover == -4096)
1366                         /* rover will be in range [S32_MIN, -4097] */
1367                         rover = S32_MIN + prandom_u32_max(-4096 - S32_MIN);
1368                 else if (rover >= -4096)
1369                         rover = -4097;
1370                 portid = rover--;
1371                 goto retry;
1372         }
1373 
1374         err = netlink_insert(sk, portid);
1375         if (err == -EADDRINUSE)
1376                 goto retry;
1377 
1378         /* If 2 threads race to autobind, that is fine.  */
1379         if (err == -EBUSY)
1380                 err = 0;
1381 
1382         return err;
1383 }
1384 
1385 /**
1386  * __netlink_ns_capable - General netlink message capability test
1387  * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1388  * @user_ns: The user namespace of the capability to use
1389  * @cap: The capability to use
1390  *
1391  * Test to see if the opener of the socket we received the message
1392  * from had when the netlink socket was created and the sender of the
1393  * message has has the capability @cap in the user namespace @user_ns.
1394  */
1395 bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1396                         struct user_namespace *user_ns, int cap)
1397 {
1398         return ((nsp->flags & NETLINK_SKB_DST) ||
1399                 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1400                 ns_capable(user_ns, cap);
1401 }
1402 EXPORT_SYMBOL(__netlink_ns_capable);
1403 
1404 /**
1405  * netlink_ns_capable - General netlink message capability test
1406  * @skb: socket buffer holding a netlink command from userspace
1407  * @user_ns: The user namespace of the capability to use
1408  * @cap: The capability to use
1409  *
1410  * Test to see if the opener of the socket we received the message
1411  * from had when the netlink socket was created and the sender of the
1412  * message has has the capability @cap in the user namespace @user_ns.
1413  */
1414 bool netlink_ns_capable(const struct sk_buff *skb,
1415                         struct user_namespace *user_ns, int cap)
1416 {
1417         return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
1418 }
1419 EXPORT_SYMBOL(netlink_ns_capable);
1420 
1421 /**
1422  * netlink_capable - Netlink global message capability test
1423  * @skb: socket buffer holding a netlink command from userspace
1424  * @cap: The capability to use
1425  *
1426  * Test to see if the opener of the socket we received the message
1427  * from had when the netlink socket was created and the sender of the
1428  * message has has the capability @cap in all user namespaces.
1429  */
1430 bool netlink_capable(const struct sk_buff *skb, int cap)
1431 {
1432         return netlink_ns_capable(skb, &init_user_ns, cap);
1433 }
1434 EXPORT_SYMBOL(netlink_capable);
1435 
1436 /**
1437  * netlink_net_capable - Netlink network namespace message capability test
1438  * @skb: socket buffer holding a netlink command from userspace
1439  * @cap: The capability to use
1440  *
1441  * Test to see if the opener of the socket we received the message
1442  * from had when the netlink socket was created and the sender of the
1443  * message has has the capability @cap over the network namespace of
1444  * the socket we received the message from.
1445  */
1446 bool netlink_net_capable(const struct sk_buff *skb, int cap)
1447 {
1448         return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
1449 }
1450 EXPORT_SYMBOL(netlink_net_capable);
1451 
1452 static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
1453 {
1454         return (nl_table[sock->sk->sk_protocol].flags & flag) ||
1455                 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
1456 }
1457 
1458 static void
1459 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1460 {
1461         struct netlink_sock *nlk = nlk_sk(sk);
1462 
1463         if (nlk->subscriptions && !subscriptions)
1464                 __sk_del_bind_node(sk);
1465         else if (!nlk->subscriptions && subscriptions)
1466                 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1467         nlk->subscriptions = subscriptions;
1468 }
1469 
1470 static int netlink_realloc_groups(struct sock *sk)
1471 {
1472         struct netlink_sock *nlk = nlk_sk(sk);
1473         unsigned int groups;
1474         unsigned long *new_groups;
1475         int err = 0;
1476 
1477         netlink_table_grab();
1478 
1479         groups = nl_table[sk->sk_protocol].groups;
1480         if (!nl_table[sk->sk_protocol].registered) {
1481                 err = -ENOENT;
1482                 goto out_unlock;
1483         }
1484 
1485         if (nlk->ngroups >= groups)
1486                 goto out_unlock;
1487 
1488         new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1489         if (new_groups == NULL) {
1490                 err = -ENOMEM;
1491                 goto out_unlock;
1492         }
1493         memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
1494                NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1495 
1496         nlk->groups = new_groups;
1497         nlk->ngroups = groups;
1498  out_unlock:
1499         netlink_table_ungrab();
1500         return err;
1501 }
1502 
1503 static void netlink_undo_bind(int group, long unsigned int groups,
1504                               struct sock *sk)
1505 {
1506         struct netlink_sock *nlk = nlk_sk(sk);
1507         int undo;
1508 
1509         if (!nlk->netlink_unbind)
1510                 return;
1511 
1512         for (undo = 0; undo < group; undo++)
1513                 if (test_bit(undo, &groups))
1514                         nlk->netlink_unbind(sock_net(sk), undo + 1);
1515 }
1516 
1517 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1518                         int addr_len)
1519 {
1520         struct sock *sk = sock->sk;
1521         struct net *net = sock_net(sk);
1522         struct netlink_sock *nlk = nlk_sk(sk);
1523         struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1524         int err;
1525         long unsigned int groups = nladdr->nl_groups;
1526         bool bound;
1527 
1528         if (addr_len < sizeof(struct sockaddr_nl))
1529                 return -EINVAL;
1530 
1531         if (nladdr->nl_family != AF_NETLINK)
1532                 return -EINVAL;
1533 
1534         /* Only superuser is allowed to listen multicasts */
1535         if (groups) {
1536                 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1537                         return -EPERM;
1538                 err = netlink_realloc_groups(sk);
1539                 if (err)
1540                         return err;
1541         }
1542 
1543         bound = nlk->bound;
1544         if (bound) {
1545                 /* Ensure nlk->portid is up-to-date. */
1546                 smp_rmb();
1547 
1548                 if (nladdr->nl_pid != nlk->portid)
1549                         return -EINVAL;
1550         }
1551 
1552         if (nlk->netlink_bind && groups) {
1553                 int group;
1554 
1555                 for (group = 0; group < nlk->ngroups; group++) {
1556                         if (!test_bit(group, &groups))
1557                                 continue;
1558                         err = nlk->netlink_bind(net, group + 1);
1559                         if (!err)
1560                                 continue;
1561                         netlink_undo_bind(group, groups, sk);
1562                         return err;
1563                 }
1564         }
1565 
1566         /* No need for barriers here as we return to user-space without
1567          * using any of the bound attributes.
1568          */
1569         if (!bound) {
1570                 err = nladdr->nl_pid ?
1571                         netlink_insert(sk, nladdr->nl_pid) :
1572                         netlink_autobind(sock);
1573                 if (err) {
1574                         netlink_undo_bind(nlk->ngroups, groups, sk);
1575                         return err;
1576                 }
1577         }
1578 
1579         if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1580                 return 0;
1581 
1582         netlink_table_grab();
1583         netlink_update_subscriptions(sk, nlk->subscriptions +
1584                                          hweight32(groups) -
1585                                          hweight32(nlk->groups[0]));
1586         nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
1587         netlink_update_listeners(sk);
1588         netlink_table_ungrab();
1589 
1590         return 0;
1591 }
1592 
1593 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1594                            int alen, int flags)
1595 {
1596         int err = 0;
1597         struct sock *sk = sock->sk;
1598         struct netlink_sock *nlk = nlk_sk(sk);
1599         struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1600 
1601         if (alen < sizeof(addr->sa_family))
1602                 return -EINVAL;
1603 
1604         if (addr->sa_family == AF_UNSPEC) {
1605                 sk->sk_state    = NETLINK_UNCONNECTED;
1606                 nlk->dst_portid = 0;
1607                 nlk->dst_group  = 0;
1608                 return 0;
1609         }
1610         if (addr->sa_family != AF_NETLINK)
1611                 return -EINVAL;
1612 
1613         if ((nladdr->nl_groups || nladdr->nl_pid) &&
1614             !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1615                 return -EPERM;
1616 
1617         /* No need for barriers here as we return to user-space without
1618          * using any of the bound attributes.
1619          */
1620         if (!nlk->bound)
1621                 err = netlink_autobind(sock);
1622 
1623         if (err == 0) {
1624                 sk->sk_state    = NETLINK_CONNECTED;
1625                 nlk->dst_portid = nladdr->nl_pid;
1626                 nlk->dst_group  = ffs(nladdr->nl_groups);
1627         }
1628 
1629         return err;
1630 }
1631 
1632 static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1633                            int *addr_len, int peer)
1634 {
1635         struct sock *sk = sock->sk;
1636         struct netlink_sock *nlk = nlk_sk(sk);
1637         DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
1638 
1639         nladdr->nl_family = AF_NETLINK;
1640         nladdr->nl_pad = 0;
1641         *addr_len = sizeof(*nladdr);
1642 
1643         if (peer) {
1644                 nladdr->nl_pid = nlk->dst_portid;
1645                 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1646         } else {
1647                 nladdr->nl_pid = nlk->portid;
1648                 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1649         }
1650         return 0;
1651 }
1652 
1653 static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1654 {
1655         struct sock *sock;
1656         struct netlink_sock *nlk;
1657 
1658         sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1659         if (!sock)
1660                 return ERR_PTR(-ECONNREFUSED);
1661 
1662         /* Don't bother queuing skb if kernel socket has no input function */
1663         nlk = nlk_sk(sock);
1664         if (sock->sk_state == NETLINK_CONNECTED &&
1665             nlk->dst_portid != nlk_sk(ssk)->portid) {
1666                 sock_put(sock);
1667                 return ERR_PTR(-ECONNREFUSED);
1668         }
1669         return sock;
1670 }
1671 
1672 struct sock *netlink_getsockbyfilp(struct file *filp)
1673 {
1674         struct inode *inode = file_inode(filp);
1675         struct sock *sock;
1676 
1677         if (!S_ISSOCK(inode->i_mode))
1678                 return ERR_PTR(-ENOTSOCK);
1679 
1680         sock = SOCKET_I(inode)->sk;
1681         if (sock->sk_family != AF_NETLINK)
1682                 return ERR_PTR(-EINVAL);
1683 
1684         sock_hold(sock);
1685         return sock;
1686 }
1687 
1688 static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1689                                                int broadcast)
1690 {
1691         struct sk_buff *skb;
1692         void *data;
1693 
1694         if (size <= NLMSG_GOODSIZE || broadcast)
1695                 return alloc_skb(size, GFP_KERNEL);
1696 
1697         size = SKB_DATA_ALIGN(size) +
1698                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1699 
1700         data = vmalloc(size);
1701         if (data == NULL)
1702                 return NULL;
1703 
1704         skb = __build_skb(data, size);
1705         if (skb == NULL)
1706                 vfree(data);
1707         else
1708                 skb->destructor = netlink_skb_destructor;
1709 
1710         return skb;
1711 }
1712 
1713 /*
1714  * Attach a skb to a netlink socket.
1715  * The caller must hold a reference to the destination socket. On error, the
1716  * reference is dropped. The skb is not send to the destination, just all
1717  * all error checks are performed and memory in the queue is reserved.
1718  * Return values:
1719  * < 0: error. skb freed, reference to sock dropped.
1720  * 0: continue
1721  * 1: repeat lookup - reference dropped while waiting for socket memory.
1722  */
1723 int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
1724                       long *timeo, struct sock *ssk)
1725 {
1726         struct netlink_sock *nlk;
1727 
1728         nlk = nlk_sk(sk);
1729 
1730         if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1731              test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
1732             !netlink_skb_is_mmaped(skb)) {
1733                 DECLARE_WAITQUEUE(wait, current);
1734                 if (!*timeo) {
1735                         if (!ssk || netlink_is_kernel(ssk))
1736                                 netlink_overrun(sk);
1737                         sock_put(sk);
1738                         kfree_skb(skb);
1739                         return -EAGAIN;
1740                 }
1741 
1742                 __set_current_state(TASK_INTERRUPTIBLE);
1743                 add_wait_queue(&nlk->wait, &wait);
1744 
1745                 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1746                      test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
1747                     !sock_flag(sk, SOCK_DEAD))
1748                         *timeo = schedule_timeout(*timeo);
1749 
1750                 __set_current_state(TASK_RUNNING);
1751                 remove_wait_queue(&nlk->wait, &wait);
1752                 sock_put(sk);
1753 
1754                 if (signal_pending(current)) {
1755                         kfree_skb(skb);
1756                         return sock_intr_errno(*timeo);
1757                 }
1758                 return 1;
1759         }
1760         netlink_skb_set_owner_r(skb, sk);
1761         return 0;
1762 }
1763 
1764 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1765 {
1766         int len = skb->len;
1767 
1768         netlink_deliver_tap(skb);
1769 
1770 #ifdef CONFIG_NETLINK_MMAP
1771         if (netlink_skb_is_mmaped(skb))
1772                 netlink_queue_mmaped_skb(sk, skb);
1773         else if (netlink_rx_is_mmaped(sk))
1774                 netlink_ring_set_copied(sk, skb);
1775         else
1776 #endif /* CONFIG_NETLINK_MMAP */
1777                 skb_queue_tail(&sk->sk_receive_queue, skb);
1778         sk->sk_data_ready(sk);
1779         return len;
1780 }
1781 
1782 int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1783 {
1784         int len = __netlink_sendskb(sk, skb);
1785 
1786         sock_put(sk);
1787         return len;
1788 }
1789 
1790 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1791 {
1792         kfree_skb(skb);
1793         sock_put(sk);
1794 }
1795 
1796 static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1797 {
1798         int delta;
1799 
1800         WARN_ON(skb->sk != NULL);
1801         if (netlink_skb_is_mmaped(skb))
1802                 return skb;
1803 
1804         delta = skb->end - skb->tail;
1805         if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1806                 return skb;
1807 
1808         if (skb_shared(skb)) {
1809                 struct sk_buff *nskb = skb_clone(skb, allocation);
1810                 if (!nskb)
1811                         return skb;
1812                 consume_skb(skb);
1813                 skb = nskb;
1814         }
1815 
1816         if (!pskb_expand_head(skb, 0, -delta, allocation))
1817                 skb->truesize -= delta;
1818 
1819         return skb;
1820 }
1821 
1822 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1823                                   struct sock *ssk)
1824 {
1825         int ret;
1826         struct netlink_sock *nlk = nlk_sk(sk);
1827 
1828         ret = -ECONNREFUSED;
1829         if (nlk->netlink_rcv != NULL) {
1830                 ret = skb->len;
1831                 netlink_skb_set_owner_r(skb, sk);
1832                 NETLINK_CB(skb).sk = ssk;
1833                 netlink_deliver_tap_kernel(sk, ssk, skb);
1834                 nlk->netlink_rcv(skb);
1835                 consume_skb(skb);
1836         } else {
1837                 kfree_skb(skb);
1838         }
1839         sock_put(sk);
1840         return ret;
1841 }
1842 
1843 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
1844                     u32 portid, int nonblock)
1845 {
1846         struct sock *sk;
1847         int err;
1848         long timeo;
1849 
1850         skb = netlink_trim(skb, gfp_any());
1851 
1852         timeo = sock_sndtimeo(ssk, nonblock);
1853 retry:
1854         sk = netlink_getsockbyportid(ssk, portid);
1855         if (IS_ERR(sk)) {
1856                 kfree_skb(skb);
1857                 return PTR_ERR(sk);
1858         }
1859         if (netlink_is_kernel(sk))
1860                 return netlink_unicast_kernel(sk, skb, ssk);
1861 
1862         if (sk_filter(sk, skb)) {
1863                 err = skb->len;
1864                 kfree_skb(skb);
1865                 sock_put(sk);
1866                 return err;
1867         }
1868 
1869         err = netlink_attachskb(sk, skb, &timeo, ssk);
1870         if (err == 1)
1871                 goto retry;
1872         if (err)
1873                 return err;
1874 
1875         return netlink_sendskb(sk, skb);
1876 }
1877 EXPORT_SYMBOL(netlink_unicast);
1878 
1879 struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
1880                                     unsigned int ldiff, u32 dst_portid,
1881                                     gfp_t gfp_mask)
1882 {
1883 #ifdef CONFIG_NETLINK_MMAP
1884         unsigned int maxlen, linear_size;
1885         struct sock *sk = NULL;
1886         struct sk_buff *skb;
1887         struct netlink_ring *ring;
1888         struct nl_mmap_hdr *hdr;
1889 
1890         sk = netlink_getsockbyportid(ssk, dst_portid);
1891         if (IS_ERR(sk))
1892                 goto out;
1893 
1894         ring = &nlk_sk(sk)->rx_ring;
1895         /* fast-path without atomic ops for common case: non-mmaped receiver */
1896         if (ring->pg_vec == NULL)
1897                 goto out_put;
1898 
1899         /* We need to account the full linear size needed as a ring
1900          * slot cannot have non-linear parts.
1901          */
1902         linear_size = size + ldiff;
1903         if (ring->frame_size - NL_MMAP_HDRLEN < linear_size)
1904                 goto out_put;
1905 
1906         skb = alloc_skb_head(gfp_mask);
1907         if (skb == NULL)
1908                 goto err1;
1909 
1910         spin_lock_bh(&sk->sk_receive_queue.lock);
1911         /* check again under lock */
1912         if (ring->pg_vec == NULL)
1913                 goto out_free;
1914 
1915         /* check again under lock */
1916         maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1917         if (maxlen < linear_size)
1918                 goto out_free;
1919 
1920         netlink_forward_ring(ring);
1921         hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1922         if (hdr == NULL)
1923                 goto err2;
1924 
1925         netlink_ring_setup_skb(skb, sk, ring, hdr);
1926         netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1927         atomic_inc(&ring->pending);
1928         netlink_increment_head(ring);
1929 
1930         spin_unlock_bh(&sk->sk_receive_queue.lock);
1931         return skb;
1932 
1933 err2:
1934         kfree_skb(skb);
1935         spin_unlock_bh(&sk->sk_receive_queue.lock);
1936         netlink_overrun(sk);
1937 err1:
1938         sock_put(sk);
1939         return NULL;
1940 
1941 out_free:
1942         kfree_skb(skb);
1943         spin_unlock_bh(&sk->sk_receive_queue.lock);
1944 out_put:
1945         sock_put(sk);
1946 out:
1947 #endif
1948         return alloc_skb(size, gfp_mask);
1949 }
1950 EXPORT_SYMBOL_GPL(__netlink_alloc_skb);
1951 
1952 int netlink_has_listeners(struct sock *sk, unsigned int group)
1953 {
1954         int res = 0;
1955         struct listeners *listeners;
1956 
1957         BUG_ON(!netlink_is_kernel(sk));
1958 
1959         rcu_read_lock();
1960         listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1961 
1962         if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
1963                 res = test_bit(group - 1, listeners->masks);
1964 
1965         rcu_read_unlock();
1966 
1967         return res;
1968 }
1969 EXPORT_SYMBOL_GPL(netlink_has_listeners);
1970 
1971 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1972 {
1973         struct netlink_sock *nlk = nlk_sk(sk);
1974 
1975         if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
1976             !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
1977                 netlink_skb_set_owner_r(skb, sk);
1978                 __netlink_sendskb(sk, skb);
1979                 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1980         }
1981         return -1;
1982 }
1983 
1984 struct netlink_broadcast_data {
1985         struct sock *exclude_sk;
1986         struct net *net;
1987         u32 portid;
1988         u32 group;
1989         int failure;
1990         int delivery_failure;
1991         int congested;
1992         int delivered;
1993         gfp_t allocation;
1994         struct sk_buff *skb, *skb2;
1995         int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1996         void *tx_data;
1997 };
1998 
1999 static void do_one_broadcast(struct sock *sk,
2000                                     struct netlink_broadcast_data *p)
2001 {
2002         struct netlink_sock *nlk = nlk_sk(sk);
2003         int val;
2004 
2005         if (p->exclude_sk == sk)
2006                 return;
2007 
2008         if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
2009             !test_bit(p->group - 1, nlk->groups))
2010                 return;
2011 
2012         if (!net_eq(sock_net(sk), p->net)) {
2013                 if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID))
2014                         return;
2015 
2016                 if (!peernet_has_id(sock_net(sk), p->net))
2017                         return;
2018 
2019                 if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns,
2020                                      CAP_NET_BROADCAST))
2021                         return;
2022         }
2023 
2024         if (p->failure) {
2025                 netlink_overrun(sk);
2026                 return;
2027         }
2028 
2029         sock_hold(sk);
2030         if (p->skb2 == NULL) {
2031                 if (skb_shared(p->skb)) {
2032                         p->skb2 = skb_clone(p->skb, p->allocation);
2033                 } else {
2034                         p->skb2 = skb_get(p->skb);
2035                         /*
2036                          * skb ownership may have been set when
2037                          * delivered to a previous socket.
2038                          */
2039                         skb_orphan(p->skb2);
2040                 }
2041         }
2042         if (p->skb2 == NULL) {
2043                 netlink_overrun(sk);
2044                 /* Clone failed. Notify ALL listeners. */
2045                 p->failure = 1;
2046                 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
2047                         p->delivery_failure = 1;
2048                 goto out;
2049         }
2050         if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
2051                 kfree_skb(p->skb2);
2052                 p->skb2 = NULL;
2053                 goto out;
2054         }
2055         if (sk_filter(sk, p->skb2)) {
2056                 kfree_skb(p->skb2);
2057                 p->skb2 = NULL;
2058                 goto out;
2059         }
2060         NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
2061         NETLINK_CB(p->skb2).nsid_is_set = true;
2062         val = netlink_broadcast_deliver(sk, p->skb2);
2063         if (val < 0) {
2064                 netlink_overrun(sk);
2065                 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
2066                         p->delivery_failure = 1;
2067         } else {
2068                 p->congested |= val;
2069                 p->delivered = 1;
2070                 p->skb2 = NULL;
2071         }
2072 out:
2073         sock_put(sk);
2074 }
2075 
2076 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
2077         u32 group, gfp_t allocation,
2078         int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
2079         void *filter_data)
2080 {
2081         struct net *net = sock_net(ssk);
2082         struct netlink_broadcast_data info;
2083         struct sock *sk;
2084 
2085         skb = netlink_trim(skb, allocation);
2086 
2087         info.exclude_sk = ssk;
2088         info.net = net;
2089         info.portid = portid;
2090         info.group = group;
2091         info.failure = 0;
2092         info.delivery_failure = 0;
2093         info.congested = 0;
2094         info.delivered = 0;
2095         info.allocation = allocation;
2096         info.skb = skb;
2097         info.skb2 = NULL;
2098         info.tx_filter = filter;
2099         info.tx_data = filter_data;
2100 
2101         /* While we sleep in clone, do not allow to change socket list */
2102 
2103         netlink_lock_table();
2104 
2105         sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
2106                 do_one_broadcast(sk, &info);
2107 
2108         consume_skb(skb);
2109 
2110         netlink_unlock_table();
2111 
2112         if (info.delivery_failure) {
2113                 kfree_skb(info.skb2);
2114                 return -ENOBUFS;
2115         }
2116         consume_skb(info.skb2);
2117 
2118         if (info.delivered) {
2119                 if (info.congested && gfpflags_allow_blocking(allocation))
2120                         yield();
2121                 return 0;
2122         }
2123         return -ESRCH;
2124 }
2125 EXPORT_SYMBOL(netlink_broadcast_filtered);
2126 
2127 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
2128                       u32 group, gfp_t allocation)
2129 {
2130         return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
2131                 NULL, NULL);
2132 }
2133 EXPORT_SYMBOL(netlink_broadcast);
2134 
2135 struct netlink_set_err_data {
2136         struct sock *exclude_sk;
2137         u32 portid;
2138         u32 group;
2139         int code;
2140 };
2141 
2142 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
2143 {
2144         struct netlink_sock *nlk = nlk_sk(sk);
2145         int ret = 0;
2146 
2147         if (sk == p->exclude_sk)
2148                 goto out;
2149 
2150         if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
2151                 goto out;
2152 
2153         if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
2154             !test_bit(p->group - 1, nlk->groups))
2155                 goto out;
2156 
2157         if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
2158                 ret = 1;
2159                 goto out;
2160         }
2161 
2162         sk->sk_err = p->code;
2163         sk->sk_error_report(sk);
2164 out:
2165         return ret;
2166 }
2167 
2168 /**
2169  * netlink_set_err - report error to broadcast listeners
2170  * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
2171  * @portid: the PORTID of a process that we want to skip (if any)
2172  * @group: the broadcast group that will notice the error
2173  * @code: error code, must be negative (as usual in kernelspace)
2174  *
2175  * This function returns the number of broadcast listeners that have set the
2176  * NETLINK_NO_ENOBUFS socket option.
2177  */
2178 int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
2179 {
2180         struct netlink_set_err_data info;
2181         struct sock *sk;
2182         int ret = 0;
2183 
2184         info.exclude_sk = ssk;
2185         info.portid = portid;
2186         info.group = group;
2187         /* sk->sk_err wants a positive error value */
2188         info.code = -code;
2189 
2190         read_lock(&nl_table_lock);
2191 
2192         sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
2193                 ret += do_one_set_err(sk, &info);
2194 
2195         read_unlock(&nl_table_lock);
2196         return ret;
2197 }
2198 EXPORT_SYMBOL(netlink_set_err);
2199 
2200 /* must be called with netlink table grabbed */
2201 static void netlink_update_socket_mc(struct netlink_sock *nlk,
2202                                      unsigned int group,
2203                                      int is_new)
2204 {
2205         int old, new = !!is_new, subscriptions;
2206 
2207         old = test_bit(group - 1, nlk->groups);
2208         subscriptions = nlk->subscriptions - old + new;
2209         if (new)
2210                 __set_bit(group - 1, nlk->groups);
2211         else
2212                 __clear_bit(group - 1, nlk->groups);
2213         netlink_update_subscriptions(&nlk->sk, subscriptions);
2214         netlink_update_listeners(&nlk->sk);
2215 }
2216 
2217 static int netlink_setsockopt(struct socket *sock, int level, int optname,
2218                               char __user *optval, unsigned int optlen)
2219 {
2220         struct sock *sk = sock->sk;
2221         struct netlink_sock *nlk = nlk_sk(sk);
2222         unsigned int val = 0;
2223         int err;
2224 
2225         if (level != SOL_NETLINK)
2226                 return -ENOPROTOOPT;
2227 
2228         if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2229             optlen >= sizeof(int) &&
2230             get_user(val, (unsigned int __user *)optval))
2231                 return -EFAULT;
2232 
2233         switch (optname) {
2234         case NETLINK_PKTINFO:
2235                 if (val)
2236                         nlk->flags |= NETLINK_F_RECV_PKTINFO;
2237                 else
2238                         nlk->flags &= ~NETLINK_F_RECV_PKTINFO;
2239                 err = 0;
2240                 break;
2241         case NETLINK_ADD_MEMBERSHIP:
2242         case NETLINK_DROP_MEMBERSHIP: {
2243                 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
2244                         return -EPERM;
2245                 err = netlink_realloc_groups(sk);
2246                 if (err)
2247                         return err;
2248                 if (!val || val - 1 >= nlk->ngroups)
2249                         return -EINVAL;
2250                 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
2251                         err = nlk->netlink_bind(sock_net(sk), val);
2252                         if (err)
2253                                 return err;
2254                 }
2255                 netlink_table_grab();
2256                 netlink_update_socket_mc(nlk, val,
2257                                          optname == NETLINK_ADD_MEMBERSHIP);
2258                 netlink_table_ungrab();
2259                 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
2260                         nlk->netlink_unbind(sock_net(sk), val);
2261 
2262                 err = 0;
2263                 break;
2264         }
2265         case NETLINK_BROADCAST_ERROR:
2266                 if (val)
2267                         nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR;
2268                 else
2269                         nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR;
2270                 err = 0;
2271                 break;
2272         case NETLINK_NO_ENOBUFS:
2273                 if (val) {
2274                         nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS;
2275                         clear_bit(NETLINK_S_CONGESTED, &nlk->state);
2276                         wake_up_interruptible(&nlk->wait);
2277                 } else {
2278                         nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS;
2279                 }
2280                 err = 0;
2281                 break;
2282 #ifdef CONFIG_NETLINK_MMAP
2283         case NETLINK_RX_RING:
2284         case NETLINK_TX_RING: {
2285                 struct nl_mmap_req req;
2286 
2287                 /* Rings might consume more memory than queue limits, require
2288                  * CAP_NET_ADMIN.
2289                  */
2290                 if (!capable(CAP_NET_ADMIN))
2291                         return -EPERM;
2292                 if (optlen < sizeof(req))
2293                         return -EINVAL;
2294                 if (copy_from_user(&req, optval, sizeof(req)))
2295                         return -EFAULT;
2296                 err = netlink_set_ring(sk, &req,
2297                                        optname == NETLINK_TX_RING);
2298                 break;
2299         }
2300 #endif /* CONFIG_NETLINK_MMAP */
2301         case NETLINK_LISTEN_ALL_NSID:
2302                 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
2303                         return -EPERM;
2304 
2305                 if (val)
2306                         nlk->flags |= NETLINK_F_LISTEN_ALL_NSID;
2307                 else
2308                         nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
2309                 err = 0;
2310                 break;
2311         case NETLINK_CAP_ACK:
2312                 if (val)
2313                         nlk->flags |= NETLINK_F_CAP_ACK;
2314                 else
2315                         nlk->flags &= ~NETLINK_F_CAP_ACK;
2316                 err = 0;
2317                 break;
2318         default:
2319                 err = -ENOPROTOOPT;
2320         }
2321         return err;
2322 }
2323 
2324 static int netlink_getsockopt(struct socket *sock, int level, int optname,
2325                               char __user *optval, int __user *optlen)
2326 {
2327         struct sock *sk = sock->sk;
2328         struct netlink_sock *nlk = nlk_sk(sk);
2329         int len, val, err;
2330 
2331         if (level != SOL_NETLINK)
2332                 return -ENOPROTOOPT;
2333 
2334         if (get_user(len, optlen))
2335                 return -EFAULT;
2336         if (len < 0)
2337                 return -EINVAL;
2338 
2339         switch (optname) {
2340         case NETLINK_PKTINFO:
2341                 if (len < sizeof(int))
2342                         return -EINVAL;
2343                 len = sizeof(int);
2344                 val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
2345                 if (put_user(len, optlen) ||
2346                     put_user(val, optval))
2347                         return -EFAULT;
2348                 err = 0;
2349                 break;
2350         case NETLINK_BROADCAST_ERROR:
2351                 if (len < sizeof(int))
2352                         return -EINVAL;
2353                 len = sizeof(int);
2354                 val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
2355                 if (put_user(len, optlen) ||
2356                     put_user(val, optval))
2357                         return -EFAULT;
2358                 err = 0;
2359                 break;
2360         case NETLINK_NO_ENOBUFS:
2361                 if (len < sizeof(int))
2362                         return -EINVAL;
2363                 len = sizeof(int);
2364                 val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
2365                 if (put_user(len, optlen) ||
2366                     put_user(val, optval))
2367                         return -EFAULT;
2368                 err = 0;
2369                 break;
2370         case NETLINK_LIST_MEMBERSHIPS: {
2371                 int pos, idx, shift;
2372 
2373                 err = 0;
2374                 netlink_lock_table();
2375                 for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
2376                         if (len - pos < sizeof(u32))
2377                                 break;
2378 
2379                         idx = pos / sizeof(unsigned long);
2380                         shift = (pos % sizeof(unsigned long)) * 8;
2381                         if (put_user((u32)(nlk->groups[idx] >> shift),
2382                                      (u32 __user *)(optval + pos))) {
2383                                 err = -EFAULT;
2384                                 break;
2385                         }
2386                 }
2387                 if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
2388                         err = -EFAULT;
2389                 netlink_unlock_table();
2390                 break;
2391         }
2392         case NETLINK_CAP_ACK:
2393                 if (len < sizeof(int))
2394                         return -EINVAL;
2395                 len = sizeof(int);
2396                 val = nlk->flags & NETLINK_F_CAP_ACK ? 1 : 0;
2397                 if (put_user(len, optlen) ||
2398                     put_user(val, optval))
2399                         return -EFAULT;
2400                 err = 0;
2401                 break;
2402         default:
2403                 err = -ENOPROTOOPT;
2404         }
2405         return err;
2406 }
2407 
2408 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2409 {
2410         struct nl_pktinfo info;
2411 
2412         info.group = NETLINK_CB(skb).dst_group;
2413         put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2414 }
2415 
2416 static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg,
2417                                          struct sk_buff *skb)
2418 {
2419         if (!NETLINK_CB(skb).nsid_is_set)
2420                 return;
2421 
2422         put_cmsg(msg, SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, sizeof(int),
2423                  &NETLINK_CB(skb).nsid);
2424 }
2425 
2426 static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2427 {
2428         struct sock *sk = sock->sk;
2429         struct netlink_sock *nlk = nlk_sk(sk);
2430         DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
2431         u32 dst_portid;
2432         u32 dst_group;
2433         struct sk_buff *skb;
2434         int err;
2435         struct scm_cookie scm;
2436         u32 netlink_skb_flags = 0;
2437 
2438         if (msg->msg_flags&MSG_OOB)
2439                 return -EOPNOTSUPP;
2440 
2441         err = scm_send(sock, msg, &scm, true);
2442         if (err < 0)
2443                 return err;
2444 
2445         if (msg->msg_namelen) {
2446                 err = -EINVAL;
2447                 if (addr->nl_family != AF_NETLINK)
2448                         goto out;
2449                 dst_portid = addr->nl_pid;
2450                 dst_group = ffs(addr->nl_groups);
2451                 err =  -EPERM;
2452                 if ((dst_group || dst_portid) &&
2453                     !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
2454                         goto out;
2455                 netlink_skb_flags |= NETLINK_SKB_DST;
2456         } else {
2457                 dst_portid = nlk->dst_portid;
2458                 dst_group = nlk->dst_group;
2459         }
2460 
2461         if (!nlk->bound) {
2462                 err = netlink_autobind(sock);
2463                 if (err)
2464                         goto out;
2465         } else {
2466                 /* Ensure nlk is hashed and visible. */
2467                 smp_rmb();
2468         }
2469 
2470         /* It's a really convoluted way for userland to ask for mmaped
2471          * sendmsg(), but that's what we've got...
2472          */
2473         if (netlink_tx_is_mmaped(sk) &&
2474             iter_is_iovec(&msg->msg_iter) &&
2475             msg->msg_iter.nr_segs == 1 &&
2476             msg->msg_iter.iov->iov_base == NULL) {
2477                 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
2478                                            &scm);
2479                 goto out;
2480         }
2481 
2482         err = -EMSGSIZE;
2483         if (len > sk->sk_sndbuf - 32)
2484                 goto out;
2485         err = -ENOBUFS;
2486         skb = netlink_alloc_large_skb(len, dst_group);
2487         if (skb == NULL)
2488                 goto out;
2489 
2490         NETLINK_CB(skb).portid  = nlk->portid;
2491         NETLINK_CB(skb).dst_group = dst_group;
2492         NETLINK_CB(skb).creds   = scm.creds;
2493         NETLINK_CB(skb).flags   = netlink_skb_flags;
2494 
2495         err = -EFAULT;
2496         if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
2497                 kfree_skb(skb);
2498                 goto out;
2499         }
2500 
2501         err = security_netlink_send(sk, skb);
2502         if (err) {
2503                 kfree_skb(skb);
2504                 goto out;
2505         }
2506 
2507         if (dst_group) {
2508                 atomic_inc(&skb->users);
2509                 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
2510         }
2511         err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
2512 
2513 out:
2514         scm_destroy(&scm);
2515         return err;
2516 }
2517 
2518 static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2519                            int flags)
2520 {
2521         struct scm_cookie scm;
2522         struct sock *sk = sock->sk;
2523         struct netlink_sock *nlk = nlk_sk(sk);
2524         int noblock = flags&MSG_DONTWAIT;
2525         size_t copied;
2526         struct sk_buff *skb, *data_skb;
2527         int err, ret;
2528 
2529         if (flags&MSG_OOB)
2530                 return -EOPNOTSUPP;
2531 
2532         copied = 0;
2533 
2534         skb = skb_recv_datagram(sk, flags, noblock, &err);
2535         if (skb == NULL)
2536                 goto out;
2537 
2538         data_skb = skb;
2539 
2540 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2541         if (unlikely(skb_shinfo(skb)->frag_list)) {
2542                 /*
2543                  * If this skb has a frag_list, then here that means that we
2544                  * will have to use the frag_list skb's data for compat tasks
2545                  * and the regular skb's data for normal (non-compat) tasks.
2546                  *
2547                  * If we need to send the compat skb, assign it to the
2548                  * 'data_skb' variable so that it will be used below for data
2549                  * copying. We keep 'skb' for everything else, including
2550                  * freeing both later.
2551                  */
2552                 if (flags & MSG_CMSG_COMPAT)
2553                         data_skb = skb_shinfo(skb)->frag_list;
2554         }
2555 #endif
2556 
2557         /* Record the max length of recvmsg() calls for future allocations */
2558         nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
2559         nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
2560                                      16384);
2561 
2562         copied = data_skb->len;
2563         if (len < copied) {
2564                 msg->msg_flags |= MSG_TRUNC;
2565                 copied = len;
2566         }
2567 
2568         skb_reset_transport_header(data_skb);
2569         err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
2570 
2571         if (msg->msg_name) {
2572                 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
2573                 addr->nl_family = AF_NETLINK;
2574                 addr->nl_pad    = 0;
2575                 addr->nl_pid    = NETLINK_CB(skb).portid;
2576                 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
2577                 msg->msg_namelen = sizeof(*addr);
2578         }
2579 
2580         if (nlk->flags & NETLINK_F_RECV_PKTINFO)
2581                 netlink_cmsg_recv_pktinfo(msg, skb);
2582         if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
2583                 netlink_cmsg_listen_all_nsid(sk, msg, skb);
2584 
2585         memset(&scm, 0, sizeof(scm));
2586         scm.creds = *NETLINK_CREDS(skb);
2587         if (flags & MSG_TRUNC)
2588                 copied = data_skb->len;
2589 
2590         skb_free_datagram(sk, skb);
2591 
2592         if (nlk->cb_running &&
2593             atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
2594                 ret = netlink_dump(sk);
2595                 if (ret) {
2596                         sk->sk_err = -ret;
2597                         sk->sk_error_report(sk);
2598                 }
2599         }
2600 
2601         scm_recv(sock, msg, &scm, flags);
2602 out:
2603         netlink_rcv_wake(sk);
2604         return err ? : copied;
2605 }
2606 
2607 static void netlink_data_ready(struct sock *sk)
2608 {
2609         BUG();
2610 }
2611 
2612 /*
2613  *      We export these functions to other modules. They provide a
2614  *      complete set of kernel non-blocking support for message
2615  *      queueing.
2616  */
2617 
2618 struct sock *
2619 __netlink_kernel_create(struct net *net, int unit, struct module *module,
2620                         struct netlink_kernel_cfg *cfg)
2621 {
2622         struct socket *sock;
2623         struct sock *sk;
2624         struct netlink_sock *nlk;
2625         struct listeners *listeners = NULL;
2626         struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2627         unsigned int groups;
2628 
2629         BUG_ON(!nl_table);
2630 
2631         if (unit < 0 || unit >= MAX_LINKS)
2632                 return NULL;
2633 
2634         if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2635                 return NULL;
2636 
2637         if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0)
2638                 goto out_sock_release_nosk;
2639 
2640         sk = sock->sk;
2641 
2642         if (!cfg || cfg->groups < 32)
2643                 groups = 32;
2644         else
2645                 groups = cfg->groups;
2646 
2647         listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2648         if (!listeners)
2649                 goto out_sock_release;
2650 
2651         sk->sk_data_ready = netlink_data_ready;
2652         if (cfg && cfg->input)
2653                 nlk_sk(sk)->netlink_rcv = cfg->input;
2654 
2655         if (netlink_insert(sk, 0))
2656                 goto out_sock_release;
2657 
2658         nlk = nlk_sk(sk);
2659         nlk->flags |= NETLINK_F_KERNEL_SOCKET;
2660 
2661         netlink_table_grab();
2662         if (!nl_table[unit].registered) {
2663                 nl_table[unit].groups = groups;
2664                 rcu_assign_pointer(nl_table[unit].listeners, listeners);
2665                 nl_table[unit].cb_mutex = cb_mutex;
2666                 nl_table[unit].module = module;
2667                 if (cfg) {
2668                         nl_table[unit].bind = cfg->bind;
2669                         nl_table[unit].unbind = cfg->unbind;
2670                         nl_table[unit].flags = cfg->flags;
2671                         if (cfg->compare)
2672                                 nl_table[unit].compare = cfg->compare;
2673                 }
2674                 nl_table[unit].registered = 1;
2675         } else {
2676                 kfree(listeners);
2677                 nl_table[unit].registered++;
2678         }
2679         netlink_table_ungrab();
2680         return sk;
2681 
2682 out_sock_release:
2683         kfree(listeners);
2684         netlink_kernel_release(sk);
2685         return NULL;
2686 
2687 out_sock_release_nosk:
2688         sock_release(sock);
2689         return NULL;
2690 }
2691 EXPORT_SYMBOL(__netlink_kernel_create);
2692 
2693 void
2694 netlink_kernel_release(struct sock *sk)
2695 {
2696         if (sk == NULL || sk->sk_socket == NULL)
2697                 return;
2698 
2699         sock_release(sk->sk_socket);
2700 }
2701 EXPORT_SYMBOL(netlink_kernel_release);
2702 
2703 int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
2704 {
2705         struct listeners *new, *old;
2706         struct netlink_table *tbl = &nl_table[sk->sk_protocol];
2707 
2708         if (groups < 32)
2709                 groups = 32;
2710 
2711         if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
2712                 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2713                 if (!new)
2714                         return -ENOMEM;
2715                 old = nl_deref_protected(tbl->listeners);
2716                 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2717                 rcu_assign_pointer(tbl->listeners, new);
2718 
2719                 kfree_rcu(old, rcu);
2720         }
2721         tbl->groups = groups;
2722 
2723         return 0;
2724 }
2725 
2726 /**
2727  * netlink_change_ngroups - change number of multicast groups
2728  *
2729  * This changes the number of multicast groups that are available
2730  * on a certain netlink family. Note that it is not possible to
2731  * change the number of groups to below 32. Also note that it does
2732  * not implicitly call netlink_clear_multicast_users() when the
2733  * number of groups is reduced.
2734  *
2735  * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2736  * @groups: The new number of groups.
2737  */
2738 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2739 {
2740         int err;
2741 
2742         netlink_table_grab();
2743         err = __netlink_change_ngroups(sk, groups);
2744         netlink_table_ungrab();
2745 
2746         return err;
2747 }
2748 
2749 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2750 {
2751         struct sock *sk;
2752         struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2753 
2754         sk_for_each_bound(sk, &tbl->mc_list)
2755                 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2756 }
2757 
2758 struct nlmsghdr *
2759 __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
2760 {
2761         struct nlmsghdr *nlh;
2762         int size = nlmsg_msg_size(len);
2763 
2764         nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
2765         nlh->nlmsg_type = type;
2766         nlh->nlmsg_len = size;
2767         nlh->nlmsg_flags = flags;
2768         nlh->nlmsg_pid = portid;
2769         nlh->nlmsg_seq = seq;
2770         if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
2771                 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
2772         return nlh;
2773 }
2774 EXPORT_SYMBOL(__nlmsg_put);
2775 
2776 /*
2777  * It looks a bit ugly.
2778  * It would be better to create kernel thread.
2779  */
2780 
2781 static int netlink_dump(struct sock *sk)
2782 {
2783         struct netlink_sock *nlk = nlk_sk(sk);
2784         struct netlink_callback *cb;
2785         struct sk_buff *skb = NULL;
2786         struct nlmsghdr *nlh;
2787         int len, err = -ENOBUFS;
2788         int alloc_min_size;
2789         int alloc_size;
2790 
2791         mutex_lock(nlk->cb_mutex);
2792         if (!nlk->cb_running) {
2793                 err = -EINVAL;
2794                 goto errout_skb;
2795         }
2796 
2797         if (!netlink_rx_is_mmaped(sk) &&
2798             atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2799                 goto errout_skb;
2800 
2801         /* NLMSG_GOODSIZE is small to avoid high order allocations being
2802          * required, but it makes sense to _attempt_ a 16K bytes allocation
2803          * to reduce number of system calls on dump operations, if user
2804          * ever provided a big enough buffer.
2805          */
2806         cb = &nlk->cb;
2807         alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2808 
2809         if (alloc_min_size < nlk->max_recvmsg_len) {
2810                 alloc_size = nlk->max_recvmsg_len;
2811                 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2812                                         GFP_KERNEL |
2813                                         __GFP_NOWARN |
2814                                         __GFP_NORETRY);
2815         }
2816         if (!skb) {
2817                 alloc_size = alloc_min_size;
2818                 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2819                                         GFP_KERNEL);
2820         }
2821         if (!skb)
2822                 goto errout_skb;
2823 
2824         /* Trim skb to allocated size. User is expected to provide buffer as
2825          * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at
2826          * netlink_recvmsg())). dump will pack as many smaller messages as
2827          * could fit within the allocated skb. skb is typically allocated
2828          * with larger space than required (could be as much as near 2x the
2829          * requested size with align to next power of 2 approach). Allowing
2830          * dump to use the excess space makes it difficult for a user to have a
2831          * reasonable static buffer based on the expected largest dump of a
2832          * single netdev. The outcome is MSG_TRUNC error.
2833          */
2834         if (!netlink_rx_is_mmaped(sk))
2835                 skb_reserve(skb, skb_tailroom(skb) - alloc_size);
2836         netlink_skb_set_owner_r(skb, sk);
2837 
2838         len = cb->dump(skb, cb);
2839 
2840         if (len > 0) {
2841                 mutex_unlock(nlk->cb_mutex);
2842 
2843                 if (sk_filter(sk, skb))
2844                         kfree_skb(skb);
2845                 else
2846                         __netlink_sendskb(sk, skb);
2847                 return 0;
2848         }
2849 
2850         nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2851         if (!nlh)
2852                 goto errout_skb;
2853 
2854         nl_dump_check_consistent(cb, nlh);
2855 
2856         memcpy(nlmsg_data(nlh), &len, sizeof(len));
2857 
2858         if (sk_filter(sk, skb))
2859                 kfree_skb(skb);
2860         else
2861                 __netlink_sendskb(sk, skb);
2862 
2863         if (cb->done)
2864                 cb->done(cb);
2865 
2866         nlk->cb_running = false;
2867         mutex_unlock(nlk->cb_mutex);
2868         module_put(cb->module);
2869         consume_skb(cb->skb);
2870         return 0;
2871 
2872 errout_skb:
2873         mutex_unlock(nlk->cb_mutex);
2874         kfree_skb(skb);
2875         return err;
2876 }
2877 
2878 int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2879                          const struct nlmsghdr *nlh,
2880                          struct netlink_dump_control *control)
2881 {
2882         struct netlink_callback *cb;
2883         struct sock *sk;
2884         struct netlink_sock *nlk;
2885         int ret;
2886 
2887         /* Memory mapped dump requests need to be copied to avoid looping
2888          * on the pending state in netlink_mmap_sendmsg() while the CB hold
2889          * a reference to the skb.
2890          */
2891         if (netlink_skb_is_mmaped(skb)) {
2892                 skb = skb_copy(skb, GFP_KERNEL);
2893                 if (skb == NULL)
2894                         return -ENOBUFS;
2895         } else
2896                 atomic_inc(&skb->users);
2897 
2898         sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2899         if (sk == NULL) {
2900                 ret = -ECONNREFUSED;
2901                 goto error_free;
2902         }
2903 
2904         nlk = nlk_sk(sk);
2905         mutex_lock(nlk->cb_mutex);
2906         /* A dump is in progress... */
2907         if (nlk->cb_running) {
2908                 ret = -EBUSY;
2909                 goto error_unlock;
2910         }
2911         /* add reference of module which cb->dump belongs to */
2912         if (!try_module_get(control->module)) {
2913                 ret = -EPROTONOSUPPORT;
2914                 goto error_unlock;
2915         }
2916 
2917         cb = &nlk->cb;
2918         memset(cb, 0, sizeof(*cb));
2919         cb->start = control->start;
2920         cb->dump = control->dump;
2921         cb->done = control->done;
2922         cb->nlh = nlh;
2923         cb->data = control->data;
2924         cb->module = control->module;
2925         cb->min_dump_alloc = control->min_dump_alloc;
2926         cb->skb = skb;
2927 
2928         nlk->cb_running = true;
2929 
2930         mutex_unlock(nlk->cb_mutex);
2931 
2932         if (cb->start)
2933                 cb->start(cb);
2934 
2935         ret = netlink_dump(sk);
2936         sock_put(sk);
2937 
2938         if (ret)
2939                 return ret;
2940 
2941         /* We successfully started a dump, by returning -EINTR we
2942          * signal not to send ACK even if it was requested.
2943          */
2944         return -EINTR;
2945 
2946 error_unlock:
2947         sock_put(sk);
2948         mutex_unlock(nlk->cb_mutex);
2949 error_free:
2950         kfree_skb(skb);
2951         return ret;
2952 }
2953 EXPORT_SYMBOL(__netlink_dump_start);
2954 
2955 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2956 {
2957         struct sk_buff *skb;
2958         struct nlmsghdr *rep;
2959         struct nlmsgerr *errmsg;
2960         size_t payload = sizeof(*errmsg);
2961         struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
2962 
2963         /* Error messages get the original request appened, unless the user
2964          * requests to cap the error message.
2965          */
2966         if (!(nlk->flags & NETLINK_F_CAP_ACK) && err)
2967                 payload += nlmsg_len(nlh);
2968 
2969         skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2970                                 NETLINK_CB(in_skb).portid, GFP_KERNEL);
2971         if (!skb) {
2972                 struct sock *sk;
2973 
2974                 sk = netlink_lookup(sock_net(in_skb->sk),
2975                                     in_skb->sk->sk_protocol,
2976                                     NETLINK_CB(in_skb).portid);
2977                 if (sk) {
2978                         sk->sk_err = ENOBUFS;
2979                         sk->sk_error_report(sk);
2980                         sock_put(sk);
2981                 }
2982                 return;
2983         }
2984 
2985         rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2986                           NLMSG_ERROR, payload, 0);
2987         errmsg = nlmsg_data(rep);
2988         errmsg->error = err;
2989         memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh));
2990         netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
2991 }
2992 EXPORT_SYMBOL(netlink_ack);
2993 
2994 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2995                                                      struct nlmsghdr *))
2996 {
2997         struct nlmsghdr *nlh;
2998         int err;
2999 
3000         while (skb->len >= nlmsg_total_size(0)) {
3001                 int msglen;
3002 
3003                 nlh = nlmsg_hdr(skb);
3004                 err = 0;
3005 
3006                 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
3007                         return 0;
3008 
3009                 /* Only requests are handled by the kernel */
3010                 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
3011                         goto ack;
3012 
3013                 /* Skip control messages */
3014                 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
3015                         goto ack;
3016 
3017                 err = cb(skb, nlh);
3018                 if (err == -EINTR)
3019                         goto skip;
3020 
3021 ack:
3022                 if (nlh->nlmsg_flags & NLM_F_ACK || err)
3023                         netlink_ack(skb, nlh, err);
3024 
3025 skip:
3026                 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
3027                 if (msglen > skb->len)
3028                         msglen = skb->len;
3029                 skb_pull(skb, msglen);
3030         }
3031 
3032         return 0;
3033 }
3034 EXPORT_SYMBOL(netlink_rcv_skb);
3035 
3036 /**
3037  * nlmsg_notify - send a notification netlink message
3038  * @sk: netlink socket to use
3039  * @skb: notification message
3040  * @portid: destination netlink portid for reports or 0
3041  * @group: destination multicast group or 0
3042  * @report: 1 to report back, 0 to disable
3043  * @flags: allocation flags
3044  */
3045 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
3046                  unsigned int group, int report, gfp_t flags)
3047 {
3048         int err = 0;
3049 
3050         if (group) {
3051                 int exclude_portid = 0;
3052 
3053                 if (report) {
3054                         atomic_inc(&skb->users);
3055                         exclude_portid = portid;
3056                 }
3057 
3058                 /* errors reported via destination sk->sk_err, but propagate
3059                  * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
3060                 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
3061         }
3062 
3063         if (report) {
3064                 int err2;
3065 
3066                 err2 = nlmsg_unicast(sk, skb, portid);
3067                 if (!err || err == -ESRCH)
3068                         err = err2;
3069         }
3070 
3071         return err;
3072 }
3073 EXPORT_SYMBOL(nlmsg_notify);
3074 
3075 #ifdef CONFIG_PROC_FS
3076 struct nl_seq_iter {
3077         struct seq_net_private p;
3078         struct rhashtable_iter hti;
3079         int link;
3080 };
3081 
3082 static int netlink_walk_start(struct nl_seq_iter *iter)
3083 {
3084         int err;
3085 
3086         err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti);
3087         if (err) {
3088                 iter->link = MAX_LINKS;
3089                 return err;
3090         }
3091 
3092         err = rhashtable_walk_start(&iter->hti);
3093         return err == -EAGAIN ? 0 : err;
3094 }
3095 
3096 static void netlink_walk_stop(struct nl_seq_iter *iter)
3097 {
3098         rhashtable_walk_stop(&iter->hti);
3099         rhashtable_walk_exit(&iter->hti);
3100 }
3101 
3102 static void *__netlink_seq_next(struct seq_file *seq)
3103 {
3104         struct nl_seq_iter *iter = seq->private;
3105         struct netlink_sock *nlk;
3106 
3107         do {
3108                 for (;;) {
3109                         int err;
3110 
3111                         nlk = rhashtable_walk_next(&iter->hti);
3112 
3113                         if (IS_ERR(nlk)) {
3114                                 if (PTR_ERR(nlk) == -EAGAIN)
3115                                         continue;
3116 
3117                                 return nlk;
3118                         }
3119 
3120                         if (nlk)
3121                                 break;
3122 
3123                         netlink_walk_stop(iter);
3124                         if (++iter->link >= MAX_LINKS)
3125                                 return NULL;
3126 
3127                         err = netlink_walk_start(iter);
3128                         if (err)
3129                                 return ERR_PTR(err);
3130                 }
3131         } while (sock_net(&nlk->sk) != seq_file_net(seq));
3132 
3133         return nlk;
3134 }
3135 
3136 static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
3137 {
3138         struct nl_seq_iter *iter = seq->private;
3139         void *obj = SEQ_START_TOKEN;
3140         loff_t pos;
3141         int err;
3142 
3143         iter->link = 0;
3144 
3145         err = netlink_walk_start(iter);
3146         if (err)
3147                 return ERR_PTR(err);
3148 
3149         for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
3150                 obj = __netlink_seq_next(seq);
3151 
3152         return obj;
3153 }
3154 
3155 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3156 {
3157         ++*pos;
3158         return __netlink_seq_next(seq);
3159 }
3160 
3161 static void netlink_seq_stop(struct seq_file *seq, void *v)
3162 {
3163         struct nl_seq_iter *iter = seq->private;
3164 
3165         if (iter->link >= MAX_LINKS)
3166                 return;
3167 
3168         netlink_walk_stop(iter);
3169 }
3170 
3171 
3172 static int netlink_seq_show(struct seq_file *seq, void *v)
3173 {
3174         if (v == SEQ_START_TOKEN) {
3175                 seq_puts(seq,
3176                          "sk       Eth Pid    Groups   "
3177                          "Rmem     Wmem     Dump     Locks     Drops     Inode\n");
3178         } else {
3179                 struct sock *s = v;
3180                 struct netlink_sock *nlk = nlk_sk(s);
3181 
3182                 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
3183                            s,
3184                            s->sk_protocol,
3185                            nlk->portid,
3186                            nlk->groups ? (u32)nlk->groups[0] : 0,
3187                            sk_rmem_alloc_get(s),
3188                            sk_wmem_alloc_get(s),
3189                            nlk->cb_running,
3190                            atomic_read(&s->sk_refcnt),
3191                            atomic_read(&s->sk_drops),
3192                            sock_i_ino(s)
3193                         );
3194 
3195         }
3196         return 0;
3197 }
3198 
3199 static const struct seq_operations netlink_seq_ops = {
3200         .start  = netlink_seq_start,
3201         .next   = netlink_seq_next,
3202         .stop   = netlink_seq_stop,
3203         .show   = netlink_seq_show,
3204 };
3205 
3206 
3207 static int netlink_seq_open(struct inode *inode, struct file *file)
3208 {
3209         return seq_open_net(inode, file, &netlink_seq_ops,
3210                                 sizeof(struct nl_seq_iter));
3211 }
3212 
3213 static const struct file_operations netlink_seq_fops = {
3214         .owner          = THIS_MODULE,
3215         .open           = netlink_seq_open,
3216         .read           = seq_read,
3217         .llseek         = seq_lseek,
3218         .release        = seq_release_net,
3219 };
3220 
3221 #endif
3222 
3223 int netlink_register_notifier(struct notifier_block *nb)
3224 {
3225         return atomic_notifier_chain_register(&netlink_chain, nb);
3226 }
3227 EXPORT_SYMBOL(netlink_register_notifier);
3228 
3229 int netlink_unregister_notifier(struct notifier_block *nb)
3230 {
3231         return atomic_notifier_chain_unregister(&netlink_chain, nb);
3232 }
3233 EXPORT_SYMBOL(netlink_unregister_notifier);
3234 
3235 static const struct proto_ops netlink_ops = {
3236         .family =       PF_NETLINK,
3237         .owner =        THIS_MODULE,
3238         .release =      netlink_release,
3239         .bind =         netlink_bind,
3240         .connect =      netlink_connect,
3241         .socketpair =   sock_no_socketpair,
3242         .accept =       sock_no_accept,
3243         .getname =      netlink_getname,
3244         .poll =         netlink_poll,
3245         .ioctl =        sock_no_ioctl,
3246         .listen =       sock_no_listen,
3247         .shutdown =     sock_no_shutdown,
3248         .setsockopt =   netlink_setsockopt,
3249         .getsockopt =   netlink_getsockopt,
3250         .sendmsg =      netlink_sendmsg,
3251         .recvmsg =      netlink_recvmsg,
3252         .mmap =         netlink_mmap,
3253         .sendpage =     sock_no_sendpage,
3254 };
3255 
3256 static const struct net_proto_family netlink_family_ops = {
3257         .family = PF_NETLINK,
3258         .create = netlink_create,
3259         .owner  = THIS_MODULE,  /* for consistency 8) */
3260 };
3261 
3262 static int __net_init netlink_net_init(struct net *net)
3263 {
3264 #ifdef CONFIG_PROC_FS
3265         if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
3266                 return -ENOMEM;
3267 #endif
3268         return 0;
3269 }
3270 
3271 static void __net_exit netlink_net_exit(struct net *net)
3272 {
3273 #ifdef CONFIG_PROC_FS
3274         remove_proc_entry("netlink", net->proc_net);
3275 #endif
3276 }
3277 
3278 static void __init netlink_add_usersock_entry(void)
3279 {
3280         struct listeners *listeners;
3281         int groups = 32;
3282 
3283         listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
3284         if (!listeners)
3285                 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
3286 
3287         netlink_table_grab();
3288 
3289         nl_table[NETLINK_USERSOCK].groups = groups;
3290         rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
3291         nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3292         nl_table[NETLINK_USERSOCK].registered = 1;
3293         nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
3294 
3295         netlink_table_ungrab();
3296 }
3297 
3298 static struct pernet_operations __net_initdata netlink_net_ops = {
3299         .init = netlink_net_init,
3300         .exit = netlink_net_exit,
3301 };
3302 
3303 static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
3304 {
3305         const struct netlink_sock *nlk = data;
3306         struct netlink_compare_arg arg;
3307 
3308         netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
3309         return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
3310 }
3311 
3312 static const struct rhashtable_params netlink_rhashtable_params = {
3313         .head_offset = offsetof(struct netlink_sock, node),
3314         .key_len = netlink_compare_arg_len,
3315         .obj_hashfn = netlink_hash,
3316         .obj_cmpfn = netlink_compare,
3317         .automatic_shrinking = true,
3318 };
3319 
3320 static int __init netlink_proto_init(void)
3321 {
3322         int i;
3323         int err = proto_register(&netlink_proto, 0);
3324 
3325         if (err != 0)
3326                 goto out;
3327 
3328         BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
3329 
3330         nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
3331         if (!nl_table)
3332                 goto panic;
3333 
3334         for (i = 0; i < MAX_LINKS; i++) {
3335                 if (rhashtable_init(&nl_table[i].hash,
3336                                     &netlink_rhashtable_params) < 0) {
3337                         while (--i > 0)
3338                                 rhashtable_destroy(&nl_table[i].hash);
3339                         kfree(nl_table);
3340                         goto panic;
3341                 }
3342         }
3343 
3344         INIT_LIST_HEAD(&netlink_tap_all);
3345 
3346         netlink_add_usersock_entry();
3347 
3348         sock_register(&netlink_family_ops);
3349         register_pernet_subsys(&netlink_net_ops);
3350         /* The netlink device handler may be needed early. */
3351         rtnetlink_init();
3352 out:
3353         return err;
3354 panic:
3355         panic("netlink_init: Cannot allocate nl_table\n");
3356 }
3357 
3358 core_initcall(netlink_proto_init);
3359 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp