~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/netlink/af_netlink.c

Version: ~ [ linux-5.7-rc7 ] ~ [ linux-5.6.14 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.42 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.124 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.181 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.224 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.224 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.84 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * NETLINK      Kernel-user communication protocol.
  3  *
  4  *              Authors:        Alan Cox <alan@lxorguk.ukuu.org.uk>
  5  *                              Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
  6  *                              Patrick McHardy <kaber@trash.net>
  7  *
  8  *              This program is free software; you can redistribute it and/or
  9  *              modify it under the terms of the GNU General Public License
 10  *              as published by the Free Software Foundation; either version
 11  *              2 of the License, or (at your option) any later version.
 12  *
 13  * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
 14  *                               added netlink_proto_exit
 15  * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
 16  *                               use nlk_sk, as sk->protinfo is on a diet 8)
 17  * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
 18  *                               - inc module use count of module that owns
 19  *                                 the kernel socket in case userspace opens
 20  *                                 socket of same protocol
 21  *                               - remove all module support, since netlink is
 22  *                                 mandatory if CONFIG_NET=y these days
 23  */
 24 
 25 #include <linux/module.h>
 26 
 27 #include <linux/capability.h>
 28 #include <linux/kernel.h>
 29 #include <linux/init.h>
 30 #include <linux/signal.h>
 31 #include <linux/sched.h>
 32 #include <linux/errno.h>
 33 #include <linux/string.h>
 34 #include <linux/stat.h>
 35 #include <linux/socket.h>
 36 #include <linux/un.h>
 37 #include <linux/fcntl.h>
 38 #include <linux/termios.h>
 39 #include <linux/sockios.h>
 40 #include <linux/net.h>
 41 #include <linux/fs.h>
 42 #include <linux/slab.h>
 43 #include <asm/uaccess.h>
 44 #include <linux/skbuff.h>
 45 #include <linux/netdevice.h>
 46 #include <linux/rtnetlink.h>
 47 #include <linux/proc_fs.h>
 48 #include <linux/seq_file.h>
 49 #include <linux/notifier.h>
 50 #include <linux/security.h>
 51 #include <linux/jhash.h>
 52 #include <linux/jiffies.h>
 53 #include <linux/random.h>
 54 #include <linux/bitops.h>
 55 #include <linux/mm.h>
 56 #include <linux/types.h>
 57 #include <linux/audit.h>
 58 #include <linux/mutex.h>
 59 #include <linux/vmalloc.h>
 60 #include <linux/if_arp.h>
 61 #include <asm/cacheflush.h>
 62 
 63 #include <net/net_namespace.h>
 64 #include <net/sock.h>
 65 #include <net/scm.h>
 66 #include <net/netlink.h>
 67 
 68 #include "af_netlink.h"
 69 
 70 struct listeners {
 71         struct rcu_head         rcu;
 72         unsigned long           masks[0];
 73 };
 74 
 75 /* state bits */
 76 #define NETLINK_CONGESTED       0x0
 77 
 78 /* flags */
 79 #define NETLINK_KERNEL_SOCKET   0x1
 80 #define NETLINK_RECV_PKTINFO    0x2
 81 #define NETLINK_BROADCAST_SEND_ERROR    0x4
 82 #define NETLINK_RECV_NO_ENOBUFS 0x8
 83 
 84 static inline int netlink_is_kernel(struct sock *sk)
 85 {
 86         return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
 87 }
 88 
 89 struct netlink_table *nl_table;
 90 EXPORT_SYMBOL_GPL(nl_table);
 91 
 92 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
 93 
 94 static int netlink_dump(struct sock *sk);
 95 static void netlink_skb_destructor(struct sk_buff *skb);
 96 
 97 DEFINE_RWLOCK(nl_table_lock);
 98 EXPORT_SYMBOL_GPL(nl_table_lock);
 99 static atomic_t nl_table_users = ATOMIC_INIT(0);
100 
101 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
102 
103 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
104 
105 static DEFINE_SPINLOCK(netlink_tap_lock);
106 static struct list_head netlink_tap_all __read_mostly;
107 
108 static inline u32 netlink_group_mask(u32 group)
109 {
110         return group ? 1 << (group - 1) : 0;
111 }
112 
113 static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u32 portid)
114 {
115         return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
116 }
117 
118 int netlink_add_tap(struct netlink_tap *nt)
119 {
120         if (unlikely(nt->dev->type != ARPHRD_NETLINK))
121                 return -EINVAL;
122 
123         spin_lock(&netlink_tap_lock);
124         list_add_rcu(&nt->list, &netlink_tap_all);
125         spin_unlock(&netlink_tap_lock);
126 
127         if (nt->module)
128                 __module_get(nt->module);
129 
130         return 0;
131 }
132 EXPORT_SYMBOL_GPL(netlink_add_tap);
133 
134 int __netlink_remove_tap(struct netlink_tap *nt)
135 {
136         bool found = false;
137         struct netlink_tap *tmp;
138 
139         spin_lock(&netlink_tap_lock);
140 
141         list_for_each_entry(tmp, &netlink_tap_all, list) {
142                 if (nt == tmp) {
143                         list_del_rcu(&nt->list);
144                         found = true;
145                         goto out;
146                 }
147         }
148 
149         pr_warn("__netlink_remove_tap: %p not found\n", nt);
150 out:
151         spin_unlock(&netlink_tap_lock);
152 
153         if (found && nt->module)
154                 module_put(nt->module);
155 
156         return found ? 0 : -ENODEV;
157 }
158 EXPORT_SYMBOL_GPL(__netlink_remove_tap);
159 
160 int netlink_remove_tap(struct netlink_tap *nt)
161 {
162         int ret;
163 
164         ret = __netlink_remove_tap(nt);
165         synchronize_net();
166 
167         return ret;
168 }
169 EXPORT_SYMBOL_GPL(netlink_remove_tap);
170 
171 static bool netlink_filter_tap(const struct sk_buff *skb)
172 {
173         struct sock *sk = skb->sk;
174         bool pass = false;
175 
176         /* We take the more conservative approach and
177          * whitelist socket protocols that may pass.
178          */
179         switch (sk->sk_protocol) {
180         case NETLINK_ROUTE:
181         case NETLINK_USERSOCK:
182         case NETLINK_SOCK_DIAG:
183         case NETLINK_NFLOG:
184         case NETLINK_XFRM:
185         case NETLINK_FIB_LOOKUP:
186         case NETLINK_NETFILTER:
187         case NETLINK_GENERIC:
188                 pass = true;
189                 break;
190         }
191 
192         return pass;
193 }
194 
195 static int __netlink_deliver_tap_skb(struct sk_buff *skb,
196                                      struct net_device *dev)
197 {
198         struct sk_buff *nskb;
199         struct sock *sk = skb->sk;
200         int ret = -ENOMEM;
201 
202         dev_hold(dev);
203         nskb = skb_clone(skb, GFP_ATOMIC);
204         if (nskb) {
205                 nskb->dev = dev;
206                 nskb->protocol = htons((u16) sk->sk_protocol);
207 
208                 ret = dev_queue_xmit(nskb);
209                 if (unlikely(ret > 0))
210                         ret = net_xmit_errno(ret);
211         }
212 
213         dev_put(dev);
214         return ret;
215 }
216 
217 static void __netlink_deliver_tap(struct sk_buff *skb)
218 {
219         int ret;
220         struct netlink_tap *tmp;
221 
222         if (!netlink_filter_tap(skb))
223                 return;
224 
225         list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
226                 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
227                 if (unlikely(ret))
228                         break;
229         }
230 }
231 
232 static void netlink_deliver_tap(struct sk_buff *skb)
233 {
234         rcu_read_lock();
235 
236         if (unlikely(!list_empty(&netlink_tap_all)))
237                 __netlink_deliver_tap(skb);
238 
239         rcu_read_unlock();
240 }
241 
242 static void netlink_overrun(struct sock *sk)
243 {
244         struct netlink_sock *nlk = nlk_sk(sk);
245 
246         if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
247                 if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
248                         sk->sk_err = ENOBUFS;
249                         sk->sk_error_report(sk);
250                 }
251         }
252         atomic_inc(&sk->sk_drops);
253 }
254 
255 static void netlink_rcv_wake(struct sock *sk)
256 {
257         struct netlink_sock *nlk = nlk_sk(sk);
258 
259         if (skb_queue_empty(&sk->sk_receive_queue))
260                 clear_bit(NETLINK_CONGESTED, &nlk->state);
261         if (!test_bit(NETLINK_CONGESTED, &nlk->state))
262                 wake_up_interruptible(&nlk->wait);
263 }
264 
265 #ifdef CONFIG_NETLINK_MMAP
266 static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
267 {
268         return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
269 }
270 
271 static bool netlink_rx_is_mmaped(struct sock *sk)
272 {
273         return nlk_sk(sk)->rx_ring.pg_vec != NULL;
274 }
275 
276 static bool netlink_tx_is_mmaped(struct sock *sk)
277 {
278         return nlk_sk(sk)->tx_ring.pg_vec != NULL;
279 }
280 
281 static __pure struct page *pgvec_to_page(const void *addr)
282 {
283         if (is_vmalloc_addr(addr))
284                 return vmalloc_to_page(addr);
285         else
286                 return virt_to_page(addr);
287 }
288 
289 static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
290 {
291         unsigned int i;
292 
293         for (i = 0; i < len; i++) {
294                 if (pg_vec[i] != NULL) {
295                         if (is_vmalloc_addr(pg_vec[i]))
296                                 vfree(pg_vec[i]);
297                         else
298                                 free_pages((unsigned long)pg_vec[i], order);
299                 }
300         }
301         kfree(pg_vec);
302 }
303 
304 static void *alloc_one_pg_vec_page(unsigned long order)
305 {
306         void *buffer;
307         gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
308                           __GFP_NOWARN | __GFP_NORETRY;
309 
310         buffer = (void *)__get_free_pages(gfp_flags, order);
311         if (buffer != NULL)
312                 return buffer;
313 
314         buffer = vzalloc((1 << order) * PAGE_SIZE);
315         if (buffer != NULL)
316                 return buffer;
317 
318         gfp_flags &= ~__GFP_NORETRY;
319         return (void *)__get_free_pages(gfp_flags, order);
320 }
321 
322 static void **alloc_pg_vec(struct netlink_sock *nlk,
323                            struct nl_mmap_req *req, unsigned int order)
324 {
325         unsigned int block_nr = req->nm_block_nr;
326         unsigned int i;
327         void **pg_vec;
328 
329         pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
330         if (pg_vec == NULL)
331                 return NULL;
332 
333         for (i = 0; i < block_nr; i++) {
334                 pg_vec[i] = alloc_one_pg_vec_page(order);
335                 if (pg_vec[i] == NULL)
336                         goto err1;
337         }
338 
339         return pg_vec;
340 err1:
341         free_pg_vec(pg_vec, order, block_nr);
342         return NULL;
343 }
344 
345 static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
346                             bool closing, bool tx_ring)
347 {
348         struct netlink_sock *nlk = nlk_sk(sk);
349         struct netlink_ring *ring;
350         struct sk_buff_head *queue;
351         void **pg_vec = NULL;
352         unsigned int order = 0;
353         int err;
354 
355         ring  = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
356         queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
357 
358         if (!closing) {
359                 if (atomic_read(&nlk->mapped))
360                         return -EBUSY;
361                 if (atomic_read(&ring->pending))
362                         return -EBUSY;
363         }
364 
365         if (req->nm_block_nr) {
366                 if (ring->pg_vec != NULL)
367                         return -EBUSY;
368 
369                 if ((int)req->nm_block_size <= 0)
370                         return -EINVAL;
371                 if (!IS_ALIGNED(req->nm_block_size, PAGE_SIZE))
372                         return -EINVAL;
373                 if (req->nm_frame_size < NL_MMAP_HDRLEN)
374                         return -EINVAL;
375                 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
376                         return -EINVAL;
377 
378                 ring->frames_per_block = req->nm_block_size /
379                                          req->nm_frame_size;
380                 if (ring->frames_per_block == 0)
381                         return -EINVAL;
382                 if (ring->frames_per_block * req->nm_block_nr !=
383                     req->nm_frame_nr)
384                         return -EINVAL;
385 
386                 order = get_order(req->nm_block_size);
387                 pg_vec = alloc_pg_vec(nlk, req, order);
388                 if (pg_vec == NULL)
389                         return -ENOMEM;
390         } else {
391                 if (req->nm_frame_nr)
392                         return -EINVAL;
393         }
394 
395         err = -EBUSY;
396         mutex_lock(&nlk->pg_vec_lock);
397         if (closing || atomic_read(&nlk->mapped) == 0) {
398                 err = 0;
399                 spin_lock_bh(&queue->lock);
400 
401                 ring->frame_max         = req->nm_frame_nr - 1;
402                 ring->head              = 0;
403                 ring->frame_size        = req->nm_frame_size;
404                 ring->pg_vec_pages      = req->nm_block_size / PAGE_SIZE;
405 
406                 swap(ring->pg_vec_len, req->nm_block_nr);
407                 swap(ring->pg_vec_order, order);
408                 swap(ring->pg_vec, pg_vec);
409 
410                 __skb_queue_purge(queue);
411                 spin_unlock_bh(&queue->lock);
412 
413                 WARN_ON(atomic_read(&nlk->mapped));
414         }
415         mutex_unlock(&nlk->pg_vec_lock);
416 
417         if (pg_vec)
418                 free_pg_vec(pg_vec, order, req->nm_block_nr);
419         return err;
420 }
421 
422 static void netlink_mm_open(struct vm_area_struct *vma)
423 {
424         struct file *file = vma->vm_file;
425         struct socket *sock = file->private_data;
426         struct sock *sk = sock->sk;
427 
428         if (sk)
429                 atomic_inc(&nlk_sk(sk)->mapped);
430 }
431 
432 static void netlink_mm_close(struct vm_area_struct *vma)
433 {
434         struct file *file = vma->vm_file;
435         struct socket *sock = file->private_data;
436         struct sock *sk = sock->sk;
437 
438         if (sk)
439                 atomic_dec(&nlk_sk(sk)->mapped);
440 }
441 
442 static const struct vm_operations_struct netlink_mmap_ops = {
443         .open   = netlink_mm_open,
444         .close  = netlink_mm_close,
445 };
446 
447 static int netlink_mmap(struct file *file, struct socket *sock,
448                         struct vm_area_struct *vma)
449 {
450         struct sock *sk = sock->sk;
451         struct netlink_sock *nlk = nlk_sk(sk);
452         struct netlink_ring *ring;
453         unsigned long start, size, expected;
454         unsigned int i;
455         int err = -EINVAL;
456 
457         if (vma->vm_pgoff)
458                 return -EINVAL;
459 
460         mutex_lock(&nlk->pg_vec_lock);
461 
462         expected = 0;
463         for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
464                 if (ring->pg_vec == NULL)
465                         continue;
466                 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
467         }
468 
469         if (expected == 0)
470                 goto out;
471 
472         size = vma->vm_end - vma->vm_start;
473         if (size != expected)
474                 goto out;
475 
476         start = vma->vm_start;
477         for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
478                 if (ring->pg_vec == NULL)
479                         continue;
480 
481                 for (i = 0; i < ring->pg_vec_len; i++) {
482                         struct page *page;
483                         void *kaddr = ring->pg_vec[i];
484                         unsigned int pg_num;
485 
486                         for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
487                                 page = pgvec_to_page(kaddr);
488                                 err = vm_insert_page(vma, start, page);
489                                 if (err < 0)
490                                         goto out;
491                                 start += PAGE_SIZE;
492                                 kaddr += PAGE_SIZE;
493                         }
494                 }
495         }
496 
497         atomic_inc(&nlk->mapped);
498         vma->vm_ops = &netlink_mmap_ops;
499         err = 0;
500 out:
501         mutex_unlock(&nlk->pg_vec_lock);
502         return err;
503 }
504 
505 static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
506 {
507 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
508         struct page *p_start, *p_end;
509 
510         /* First page is flushed through netlink_{get,set}_status */
511         p_start = pgvec_to_page(hdr + PAGE_SIZE);
512         p_end   = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1);
513         while (p_start <= p_end) {
514                 flush_dcache_page(p_start);
515                 p_start++;
516         }
517 #endif
518 }
519 
520 static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
521 {
522         smp_rmb();
523         flush_dcache_page(pgvec_to_page(hdr));
524         return hdr->nm_status;
525 }
526 
527 static void netlink_set_status(struct nl_mmap_hdr *hdr,
528                                enum nl_mmap_status status)
529 {
530         hdr->nm_status = status;
531         flush_dcache_page(pgvec_to_page(hdr));
532         smp_wmb();
533 }
534 
535 static struct nl_mmap_hdr *
536 __netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
537 {
538         unsigned int pg_vec_pos, frame_off;
539 
540         pg_vec_pos = pos / ring->frames_per_block;
541         frame_off  = pos % ring->frames_per_block;
542 
543         return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
544 }
545 
546 static struct nl_mmap_hdr *
547 netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
548                      enum nl_mmap_status status)
549 {
550         struct nl_mmap_hdr *hdr;
551 
552         hdr = __netlink_lookup_frame(ring, pos);
553         if (netlink_get_status(hdr) != status)
554                 return NULL;
555 
556         return hdr;
557 }
558 
559 static struct nl_mmap_hdr *
560 netlink_current_frame(const struct netlink_ring *ring,
561                       enum nl_mmap_status status)
562 {
563         return netlink_lookup_frame(ring, ring->head, status);
564 }
565 
566 static struct nl_mmap_hdr *
567 netlink_previous_frame(const struct netlink_ring *ring,
568                        enum nl_mmap_status status)
569 {
570         unsigned int prev;
571 
572         prev = ring->head ? ring->head - 1 : ring->frame_max;
573         return netlink_lookup_frame(ring, prev, status);
574 }
575 
576 static void netlink_increment_head(struct netlink_ring *ring)
577 {
578         ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
579 }
580 
581 static void netlink_forward_ring(struct netlink_ring *ring)
582 {
583         unsigned int head = ring->head, pos = head;
584         const struct nl_mmap_hdr *hdr;
585 
586         do {
587                 hdr = __netlink_lookup_frame(ring, pos);
588                 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
589                         break;
590                 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
591                         break;
592                 netlink_increment_head(ring);
593         } while (ring->head != head);
594 }
595 
596 static bool netlink_dump_space(struct netlink_sock *nlk)
597 {
598         struct netlink_ring *ring = &nlk->rx_ring;
599         struct nl_mmap_hdr *hdr;
600         unsigned int n;
601 
602         hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
603         if (hdr == NULL)
604                 return false;
605 
606         n = ring->head + ring->frame_max / 2;
607         if (n > ring->frame_max)
608                 n -= ring->frame_max;
609 
610         hdr = __netlink_lookup_frame(ring, n);
611 
612         return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
613 }
614 
615 static unsigned int netlink_poll(struct file *file, struct socket *sock,
616                                  poll_table *wait)
617 {
618         struct sock *sk = sock->sk;
619         struct netlink_sock *nlk = nlk_sk(sk);
620         unsigned int mask;
621         int err;
622 
623         if (nlk->rx_ring.pg_vec != NULL) {
624                 /* Memory mapped sockets don't call recvmsg(), so flow control
625                  * for dumps is performed here. A dump is allowed to continue
626                  * if at least half the ring is unused.
627                  */
628                 while (nlk->cb_running && netlink_dump_space(nlk)) {
629                         err = netlink_dump(sk);
630                         if (err < 0) {
631                                 sk->sk_err = err;
632                                 sk->sk_error_report(sk);
633                                 break;
634                         }
635                 }
636                 netlink_rcv_wake(sk);
637         }
638 
639         mask = datagram_poll(file, sock, wait);
640 
641         spin_lock_bh(&sk->sk_receive_queue.lock);
642         if (nlk->rx_ring.pg_vec) {
643                 netlink_forward_ring(&nlk->rx_ring);
644                 if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
645                         mask |= POLLIN | POLLRDNORM;
646         }
647         spin_unlock_bh(&sk->sk_receive_queue.lock);
648 
649         spin_lock_bh(&sk->sk_write_queue.lock);
650         if (nlk->tx_ring.pg_vec) {
651                 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
652                         mask |= POLLOUT | POLLWRNORM;
653         }
654         spin_unlock_bh(&sk->sk_write_queue.lock);
655 
656         return mask;
657 }
658 
659 static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
660 {
661         return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
662 }
663 
664 static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
665                                    struct netlink_ring *ring,
666                                    struct nl_mmap_hdr *hdr)
667 {
668         unsigned int size;
669         void *data;
670 
671         size = ring->frame_size - NL_MMAP_HDRLEN;
672         data = (void *)hdr + NL_MMAP_HDRLEN;
673 
674         skb->head       = data;
675         skb->data       = data;
676         skb_reset_tail_pointer(skb);
677         skb->end        = skb->tail + size;
678         skb->len        = 0;
679 
680         skb->destructor = netlink_skb_destructor;
681         NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
682         NETLINK_CB(skb).sk = sk;
683 }
684 
685 static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
686                                 u32 dst_portid, u32 dst_group,
687                                 struct sock_iocb *siocb)
688 {
689         struct netlink_sock *nlk = nlk_sk(sk);
690         struct netlink_ring *ring;
691         struct nl_mmap_hdr *hdr;
692         struct sk_buff *skb;
693         unsigned int maxlen;
694         bool excl = true;
695         int err = 0, len = 0;
696 
697         /* Netlink messages are validated by the receiver before processing.
698          * In order to avoid userspace changing the contents of the message
699          * after validation, the socket and the ring may only be used by a
700          * single process, otherwise we fall back to copying.
701          */
702         if (atomic_long_read(&sk->sk_socket->file->f_count) > 2 ||
703             atomic_read(&nlk->mapped) > 1)
704                 excl = false;
705 
706         mutex_lock(&nlk->pg_vec_lock);
707 
708         ring   = &nlk->tx_ring;
709         maxlen = ring->frame_size - NL_MMAP_HDRLEN;
710 
711         do {
712                 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
713                 if (hdr == NULL) {
714                         if (!(msg->msg_flags & MSG_DONTWAIT) &&
715                             atomic_read(&nlk->tx_ring.pending))
716                                 schedule();
717                         continue;
718                 }
719                 if (hdr->nm_len > maxlen) {
720                         err = -EINVAL;
721                         goto out;
722                 }
723 
724                 netlink_frame_flush_dcache(hdr);
725 
726                 if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
727                         skb = alloc_skb_head(GFP_KERNEL);
728                         if (skb == NULL) {
729                                 err = -ENOBUFS;
730                                 goto out;
731                         }
732                         sock_hold(sk);
733                         netlink_ring_setup_skb(skb, sk, ring, hdr);
734                         NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
735                         __skb_put(skb, hdr->nm_len);
736                         netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
737                         atomic_inc(&ring->pending);
738                 } else {
739                         skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
740                         if (skb == NULL) {
741                                 err = -ENOBUFS;
742                                 goto out;
743                         }
744                         __skb_put(skb, hdr->nm_len);
745                         memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
746                         netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
747                 }
748 
749                 netlink_increment_head(ring);
750 
751                 NETLINK_CB(skb).portid    = nlk->portid;
752                 NETLINK_CB(skb).dst_group = dst_group;
753                 NETLINK_CB(skb).creds     = siocb->scm->creds;
754 
755                 err = security_netlink_send(sk, skb);
756                 if (err) {
757                         kfree_skb(skb);
758                         goto out;
759                 }
760 
761                 if (unlikely(dst_group)) {
762                         atomic_inc(&skb->users);
763                         netlink_broadcast(sk, skb, dst_portid, dst_group,
764                                           GFP_KERNEL);
765                 }
766                 err = netlink_unicast(sk, skb, dst_portid,
767                                       msg->msg_flags & MSG_DONTWAIT);
768                 if (err < 0)
769                         goto out;
770                 len += err;
771 
772         } while (hdr != NULL ||
773                  (!(msg->msg_flags & MSG_DONTWAIT) &&
774                   atomic_read(&nlk->tx_ring.pending)));
775 
776         if (len > 0)
777                 err = len;
778 out:
779         mutex_unlock(&nlk->pg_vec_lock);
780         return err;
781 }
782 
783 static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
784 {
785         struct nl_mmap_hdr *hdr;
786 
787         hdr = netlink_mmap_hdr(skb);
788         hdr->nm_len     = skb->len;
789         hdr->nm_group   = NETLINK_CB(skb).dst_group;
790         hdr->nm_pid     = NETLINK_CB(skb).creds.pid;
791         hdr->nm_uid     = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
792         hdr->nm_gid     = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
793         netlink_frame_flush_dcache(hdr);
794         netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
795 
796         NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
797         kfree_skb(skb);
798 }
799 
800 static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
801 {
802         struct netlink_sock *nlk = nlk_sk(sk);
803         struct netlink_ring *ring = &nlk->rx_ring;
804         struct nl_mmap_hdr *hdr;
805 
806         spin_lock_bh(&sk->sk_receive_queue.lock);
807         hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
808         if (hdr == NULL) {
809                 spin_unlock_bh(&sk->sk_receive_queue.lock);
810                 kfree_skb(skb);
811                 netlink_overrun(sk);
812                 return;
813         }
814         netlink_increment_head(ring);
815         __skb_queue_tail(&sk->sk_receive_queue, skb);
816         spin_unlock_bh(&sk->sk_receive_queue.lock);
817 
818         hdr->nm_len     = skb->len;
819         hdr->nm_group   = NETLINK_CB(skb).dst_group;
820         hdr->nm_pid     = NETLINK_CB(skb).creds.pid;
821         hdr->nm_uid     = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
822         hdr->nm_gid     = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
823         netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
824 }
825 
826 #else /* CONFIG_NETLINK_MMAP */
827 #define netlink_skb_is_mmaped(skb)      false
828 #define netlink_rx_is_mmaped(sk)        false
829 #define netlink_tx_is_mmaped(sk)        false
830 #define netlink_mmap                    sock_no_mmap
831 #define netlink_poll                    datagram_poll
832 #define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb)     0
833 #endif /* CONFIG_NETLINK_MMAP */
834 
835 static void netlink_skb_destructor(struct sk_buff *skb)
836 {
837 #ifdef CONFIG_NETLINK_MMAP
838         struct nl_mmap_hdr *hdr;
839         struct netlink_ring *ring;
840         struct sock *sk;
841 
842         /* If a packet from the kernel to userspace was freed because of an
843          * error without being delivered to userspace, the kernel must reset
844          * the status. In the direction userspace to kernel, the status is
845          * always reset here after the packet was processed and freed.
846          */
847         if (netlink_skb_is_mmaped(skb)) {
848                 hdr = netlink_mmap_hdr(skb);
849                 sk = NETLINK_CB(skb).sk;
850 
851                 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
852                         netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
853                         ring = &nlk_sk(sk)->tx_ring;
854                 } else {
855                         if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
856                                 hdr->nm_len = 0;
857                                 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
858                         }
859                         ring = &nlk_sk(sk)->rx_ring;
860                 }
861 
862                 WARN_ON(atomic_read(&ring->pending) == 0);
863                 atomic_dec(&ring->pending);
864                 sock_put(sk);
865 
866                 skb->head = NULL;
867         }
868 #endif
869         if (is_vmalloc_addr(skb->head)) {
870                 if (!skb->cloned ||
871                     !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
872                         vfree(skb->head);
873 
874                 skb->head = NULL;
875         }
876         if (skb->sk != NULL)
877                 sock_rfree(skb);
878 }
879 
880 static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
881 {
882         WARN_ON(skb->sk != NULL);
883         skb->sk = sk;
884         skb->destructor = netlink_skb_destructor;
885         atomic_add(skb->truesize, &sk->sk_rmem_alloc);
886         sk_mem_charge(sk, skb->truesize);
887 }
888 
889 static void netlink_sock_destruct(struct sock *sk)
890 {
891         struct netlink_sock *nlk = nlk_sk(sk);
892 
893         if (nlk->cb_running) {
894                 if (nlk->cb.done)
895                         nlk->cb.done(&nlk->cb);
896 
897                 module_put(nlk->cb.module);
898                 kfree_skb(nlk->cb.skb);
899         }
900 
901         skb_queue_purge(&sk->sk_receive_queue);
902 #ifdef CONFIG_NETLINK_MMAP
903         if (1) {
904                 struct nl_mmap_req req;
905 
906                 memset(&req, 0, sizeof(req));
907                 if (nlk->rx_ring.pg_vec)
908                         netlink_set_ring(sk, &req, true, false);
909                 memset(&req, 0, sizeof(req));
910                 if (nlk->tx_ring.pg_vec)
911                         netlink_set_ring(sk, &req, true, true);
912         }
913 #endif /* CONFIG_NETLINK_MMAP */
914 
915         if (!sock_flag(sk, SOCK_DEAD)) {
916                 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
917                 return;
918         }
919 
920         WARN_ON(atomic_read(&sk->sk_rmem_alloc));
921         WARN_ON(atomic_read(&sk->sk_wmem_alloc));
922         WARN_ON(nlk_sk(sk)->groups);
923 }
924 
925 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
926  * SMP. Look, when several writers sleep and reader wakes them up, all but one
927  * immediately hit write lock and grab all the cpus. Exclusive sleep solves
928  * this, _but_ remember, it adds useless work on UP machines.
929  */
930 
931 void netlink_table_grab(void)
932         __acquires(nl_table_lock)
933 {
934         might_sleep();
935 
936         write_lock_irq(&nl_table_lock);
937 
938         if (atomic_read(&nl_table_users)) {
939                 DECLARE_WAITQUEUE(wait, current);
940 
941                 add_wait_queue_exclusive(&nl_table_wait, &wait);
942                 for (;;) {
943                         set_current_state(TASK_UNINTERRUPTIBLE);
944                         if (atomic_read(&nl_table_users) == 0)
945                                 break;
946                         write_unlock_irq(&nl_table_lock);
947                         schedule();
948                         write_lock_irq(&nl_table_lock);
949                 }
950 
951                 __set_current_state(TASK_RUNNING);
952                 remove_wait_queue(&nl_table_wait, &wait);
953         }
954 }
955 
956 void netlink_table_ungrab(void)
957         __releases(nl_table_lock)
958 {
959         write_unlock_irq(&nl_table_lock);
960         wake_up(&nl_table_wait);
961 }
962 
963 static inline void
964 netlink_lock_table(void)
965 {
966         /* read_lock() synchronizes us to netlink_table_grab */
967 
968         read_lock(&nl_table_lock);
969         atomic_inc(&nl_table_users);
970         read_unlock(&nl_table_lock);
971 }
972 
973 static inline void
974 netlink_unlock_table(void)
975 {
976         if (atomic_dec_and_test(&nl_table_users))
977                 wake_up(&nl_table_wait);
978 }
979 
980 static bool netlink_compare(struct net *net, struct sock *sk)
981 {
982         return net_eq(sock_net(sk), net);
983 }
984 
985 static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
986 {
987         struct netlink_table *table = &nl_table[protocol];
988         struct nl_portid_hash *hash = &table->hash;
989         struct hlist_head *head;
990         struct sock *sk;
991 
992         read_lock(&nl_table_lock);
993         head = nl_portid_hashfn(hash, portid);
994         sk_for_each(sk, head) {
995                 if (table->compare(net, sk) &&
996                     (nlk_sk(sk)->portid == portid)) {
997                         sock_hold(sk);
998                         goto found;
999                 }
1000         }
1001         sk = NULL;
1002 found:
1003         read_unlock(&nl_table_lock);
1004         return sk;
1005 }
1006 
1007 static struct hlist_head *nl_portid_hash_zalloc(size_t size)
1008 {
1009         if (size <= PAGE_SIZE)
1010                 return kzalloc(size, GFP_ATOMIC);
1011         else
1012                 return (struct hlist_head *)
1013                         __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
1014                                          get_order(size));
1015 }
1016 
1017 static void nl_portid_hash_free(struct hlist_head *table, size_t size)
1018 {
1019         if (size <= PAGE_SIZE)
1020                 kfree(table);
1021         else
1022                 free_pages((unsigned long)table, get_order(size));
1023 }
1024 
1025 static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow)
1026 {
1027         unsigned int omask, mask, shift;
1028         size_t osize, size;
1029         struct hlist_head *otable, *table;
1030         int i;
1031 
1032         omask = mask = hash->mask;
1033         osize = size = (mask + 1) * sizeof(*table);
1034         shift = hash->shift;
1035 
1036         if (grow) {
1037                 if (++shift > hash->max_shift)
1038                         return 0;
1039                 mask = mask * 2 + 1;
1040                 size *= 2;
1041         }
1042 
1043         table = nl_portid_hash_zalloc(size);
1044         if (!table)
1045                 return 0;
1046 
1047         otable = hash->table;
1048         hash->table = table;
1049         hash->mask = mask;
1050         hash->shift = shift;
1051         get_random_bytes(&hash->rnd, sizeof(hash->rnd));
1052 
1053         for (i = 0; i <= omask; i++) {
1054                 struct sock *sk;
1055                 struct hlist_node *tmp;
1056 
1057                 sk_for_each_safe(sk, tmp, &otable[i])
1058                         __sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid));
1059         }
1060 
1061         nl_portid_hash_free(otable, osize);
1062         hash->rehash_time = jiffies + 10 * 60 * HZ;
1063         return 1;
1064 }
1065 
1066 static inline int nl_portid_hash_dilute(struct nl_portid_hash *hash, int len)
1067 {
1068         int avg = hash->entries >> hash->shift;
1069 
1070         if (unlikely(avg > 1) && nl_portid_hash_rehash(hash, 1))
1071                 return 1;
1072 
1073         if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
1074                 nl_portid_hash_rehash(hash, 0);
1075                 return 1;
1076         }
1077 
1078         return 0;
1079 }
1080 
1081 static const struct proto_ops netlink_ops;
1082 
1083 static void
1084 netlink_update_listeners(struct sock *sk)
1085 {
1086         struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1087         unsigned long mask;
1088         unsigned int i;
1089         struct listeners *listeners;
1090 
1091         listeners = nl_deref_protected(tbl->listeners);
1092         if (!listeners)
1093                 return;
1094 
1095         for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
1096                 mask = 0;
1097                 sk_for_each_bound(sk, &tbl->mc_list) {
1098                         if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1099                                 mask |= nlk_sk(sk)->groups[i];
1100                 }
1101                 listeners->masks[i] = mask;
1102         }
1103         /* this function is only called with the netlink table "grabbed", which
1104          * makes sure updates are visible before bind or setsockopt return. */
1105 }
1106 
1107 static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
1108 {
1109         struct netlink_table *table = &nl_table[sk->sk_protocol];
1110         struct nl_portid_hash *hash = &table->hash;
1111         struct hlist_head *head;
1112         int err = -EADDRINUSE;
1113         struct sock *osk;
1114         int len;
1115 
1116         netlink_table_grab();
1117         head = nl_portid_hashfn(hash, portid);
1118         len = 0;
1119         sk_for_each(osk, head) {
1120                 if (table->compare(net, osk) &&
1121                     (nlk_sk(osk)->portid == portid))
1122                         break;
1123                 len++;
1124         }
1125         if (osk)
1126                 goto err;
1127 
1128         err = -EBUSY;
1129         if (nlk_sk(sk)->portid)
1130                 goto err;
1131 
1132         err = -ENOMEM;
1133         if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
1134                 goto err;
1135 
1136         if (len && nl_portid_hash_dilute(hash, len))
1137                 head = nl_portid_hashfn(hash, portid);
1138         hash->entries++;
1139         nlk_sk(sk)->portid = portid;
1140         sk_add_node(sk, head);
1141         err = 0;
1142 
1143 err:
1144         netlink_table_ungrab();
1145         return err;
1146 }
1147 
1148 static void netlink_remove(struct sock *sk)
1149 {
1150         netlink_table_grab();
1151         if (sk_del_node_init(sk))
1152                 nl_table[sk->sk_protocol].hash.entries--;
1153         if (nlk_sk(sk)->subscriptions)
1154                 __sk_del_bind_node(sk);
1155         netlink_table_ungrab();
1156 }
1157 
1158 static struct proto netlink_proto = {
1159         .name     = "NETLINK",
1160         .owner    = THIS_MODULE,
1161         .obj_size = sizeof(struct netlink_sock),
1162 };
1163 
1164 static int __netlink_create(struct net *net, struct socket *sock,
1165                             struct mutex *cb_mutex, int protocol)
1166 {
1167         struct sock *sk;
1168         struct netlink_sock *nlk;
1169 
1170         sock->ops = &netlink_ops;
1171 
1172         sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
1173         if (!sk)
1174                 return -ENOMEM;
1175 
1176         sock_init_data(sock, sk);
1177 
1178         nlk = nlk_sk(sk);
1179         if (cb_mutex) {
1180                 nlk->cb_mutex = cb_mutex;
1181         } else {
1182                 nlk->cb_mutex = &nlk->cb_def_mutex;
1183                 mutex_init(nlk->cb_mutex);
1184         }
1185         init_waitqueue_head(&nlk->wait);
1186 #ifdef CONFIG_NETLINK_MMAP
1187         mutex_init(&nlk->pg_vec_lock);
1188 #endif
1189 
1190         sk->sk_destruct = netlink_sock_destruct;
1191         sk->sk_protocol = protocol;
1192         return 0;
1193 }
1194 
1195 static int netlink_create(struct net *net, struct socket *sock, int protocol,
1196                           int kern)
1197 {
1198         struct module *module = NULL;
1199         struct mutex *cb_mutex;
1200         struct netlink_sock *nlk;
1201         void (*bind)(int group);
1202         int err = 0;
1203 
1204         sock->state = SS_UNCONNECTED;
1205 
1206         if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1207                 return -ESOCKTNOSUPPORT;
1208 
1209         if (protocol < 0 || protocol >= MAX_LINKS)
1210                 return -EPROTONOSUPPORT;
1211 
1212         netlink_lock_table();
1213 #ifdef CONFIG_MODULES
1214         if (!nl_table[protocol].registered) {
1215                 netlink_unlock_table();
1216                 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
1217                 netlink_lock_table();
1218         }
1219 #endif
1220         if (nl_table[protocol].registered &&
1221             try_module_get(nl_table[protocol].module))
1222                 module = nl_table[protocol].module;
1223         else
1224                 err = -EPROTONOSUPPORT;
1225         cb_mutex = nl_table[protocol].cb_mutex;
1226         bind = nl_table[protocol].bind;
1227         netlink_unlock_table();
1228 
1229         if (err < 0)
1230                 goto out;
1231 
1232         err = __netlink_create(net, sock, cb_mutex, protocol);
1233         if (err < 0)
1234                 goto out_module;
1235 
1236         local_bh_disable();
1237         sock_prot_inuse_add(net, &netlink_proto, 1);
1238         local_bh_enable();
1239 
1240         nlk = nlk_sk(sock->sk);
1241         nlk->module = module;
1242         nlk->netlink_bind = bind;
1243 out:
1244         return err;
1245 
1246 out_module:
1247         module_put(module);
1248         goto out;
1249 }
1250 
1251 static int netlink_release(struct socket *sock)
1252 {
1253         struct sock *sk = sock->sk;
1254         struct netlink_sock *nlk;
1255 
1256         if (!sk)
1257                 return 0;
1258 
1259         netlink_remove(sk);
1260         sock_orphan(sk);
1261         nlk = nlk_sk(sk);
1262 
1263         /*
1264          * OK. Socket is unlinked, any packets that arrive now
1265          * will be purged.
1266          */
1267 
1268         sock->sk = NULL;
1269         wake_up_interruptible_all(&nlk->wait);
1270 
1271         skb_queue_purge(&sk->sk_write_queue);
1272 
1273         if (nlk->portid) {
1274                 struct netlink_notify n = {
1275                                                 .net = sock_net(sk),
1276                                                 .protocol = sk->sk_protocol,
1277                                                 .portid = nlk->portid,
1278                                           };
1279                 atomic_notifier_call_chain(&netlink_chain,
1280                                 NETLINK_URELEASE, &n);
1281         }
1282 
1283         module_put(nlk->module);
1284 
1285         netlink_table_grab();
1286         if (netlink_is_kernel(sk)) {
1287                 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1288                 if (--nl_table[sk->sk_protocol].registered == 0) {
1289                         struct listeners *old;
1290 
1291                         old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1292                         RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1293                         kfree_rcu(old, rcu);
1294                         nl_table[sk->sk_protocol].module = NULL;
1295                         nl_table[sk->sk_protocol].bind = NULL;
1296                         nl_table[sk->sk_protocol].flags = 0;
1297                         nl_table[sk->sk_protocol].registered = 0;
1298                 }
1299         } else if (nlk->subscriptions) {
1300                 netlink_update_listeners(sk);
1301         }
1302         netlink_table_ungrab();
1303 
1304         kfree(nlk->groups);
1305         nlk->groups = NULL;
1306 
1307         local_bh_disable();
1308         sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
1309         local_bh_enable();
1310         sock_put(sk);
1311         return 0;
1312 }
1313 
1314 static int netlink_autobind(struct socket *sock)
1315 {
1316         struct sock *sk = sock->sk;
1317         struct net *net = sock_net(sk);
1318         struct netlink_table *table = &nl_table[sk->sk_protocol];
1319         struct nl_portid_hash *hash = &table->hash;
1320         struct hlist_head *head;
1321         struct sock *osk;
1322         s32 portid = task_tgid_vnr(current);
1323         int err;
1324         static s32 rover = -4097;
1325 
1326 retry:
1327         cond_resched();
1328         netlink_table_grab();
1329         head = nl_portid_hashfn(hash, portid);
1330         sk_for_each(osk, head) {
1331                 if (!table->compare(net, osk))
1332                         continue;
1333                 if (nlk_sk(osk)->portid == portid) {
1334                         /* Bind collision, search negative portid values. */
1335                         portid = rover--;
1336                         if (rover > -4097)
1337                                 rover = -4097;
1338                         netlink_table_ungrab();
1339                         goto retry;
1340                 }
1341         }
1342         netlink_table_ungrab();
1343 
1344         err = netlink_insert(sk, net, portid);
1345         if (err == -EADDRINUSE)
1346                 goto retry;
1347 
1348         /* If 2 threads race to autobind, that is fine.  */
1349         if (err == -EBUSY)
1350                 err = 0;
1351 
1352         return err;
1353 }
1354 
1355 static inline int netlink_capable(const struct socket *sock, unsigned int flag)
1356 {
1357         return (nl_table[sock->sk->sk_protocol].flags & flag) ||
1358                 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
1359 }
1360 
1361 static void
1362 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1363 {
1364         struct netlink_sock *nlk = nlk_sk(sk);
1365 
1366         if (nlk->subscriptions && !subscriptions)
1367                 __sk_del_bind_node(sk);
1368         else if (!nlk->subscriptions && subscriptions)
1369                 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1370         nlk->subscriptions = subscriptions;
1371 }
1372 
1373 static int netlink_realloc_groups(struct sock *sk)
1374 {
1375         struct netlink_sock *nlk = nlk_sk(sk);
1376         unsigned int groups;
1377         unsigned long *new_groups;
1378         int err = 0;
1379 
1380         netlink_table_grab();
1381 
1382         groups = nl_table[sk->sk_protocol].groups;
1383         if (!nl_table[sk->sk_protocol].registered) {
1384                 err = -ENOENT;
1385                 goto out_unlock;
1386         }
1387 
1388         if (nlk->ngroups >= groups)
1389                 goto out_unlock;
1390 
1391         new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1392         if (new_groups == NULL) {
1393                 err = -ENOMEM;
1394                 goto out_unlock;
1395         }
1396         memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
1397                NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1398 
1399         nlk->groups = new_groups;
1400         nlk->ngroups = groups;
1401  out_unlock:
1402         netlink_table_ungrab();
1403         return err;
1404 }
1405 
1406 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1407                         int addr_len)
1408 {
1409         struct sock *sk = sock->sk;
1410         struct net *net = sock_net(sk);
1411         struct netlink_sock *nlk = nlk_sk(sk);
1412         struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1413         int err;
1414 
1415         if (addr_len < sizeof(struct sockaddr_nl))
1416                 return -EINVAL;
1417 
1418         if (nladdr->nl_family != AF_NETLINK)
1419                 return -EINVAL;
1420 
1421         /* Only superuser is allowed to listen multicasts */
1422         if (nladdr->nl_groups) {
1423                 if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
1424                         return -EPERM;
1425                 err = netlink_realloc_groups(sk);
1426                 if (err)
1427                         return err;
1428         }
1429 
1430         if (nlk->portid) {
1431                 if (nladdr->nl_pid != nlk->portid)
1432                         return -EINVAL;
1433         } else {
1434                 err = nladdr->nl_pid ?
1435                         netlink_insert(sk, net, nladdr->nl_pid) :
1436                         netlink_autobind(sock);
1437                 if (err)
1438                         return err;
1439         }
1440 
1441         if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1442                 return 0;
1443 
1444         netlink_table_grab();
1445         netlink_update_subscriptions(sk, nlk->subscriptions +
1446                                          hweight32(nladdr->nl_groups) -
1447                                          hweight32(nlk->groups[0]));
1448         nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
1449         netlink_update_listeners(sk);
1450         netlink_table_ungrab();
1451 
1452         if (nlk->netlink_bind && nlk->groups[0]) {
1453                 int i;
1454 
1455                 for (i=0; i<nlk->ngroups; i++) {
1456                         if (test_bit(i, nlk->groups))
1457                                 nlk->netlink_bind(i);
1458                 }
1459         }
1460 
1461         return 0;
1462 }
1463 
1464 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1465                            int alen, int flags)
1466 {
1467         int err = 0;
1468         struct sock *sk = sock->sk;
1469         struct netlink_sock *nlk = nlk_sk(sk);
1470         struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1471 
1472         if (alen < sizeof(addr->sa_family))
1473                 return -EINVAL;
1474 
1475         if (addr->sa_family == AF_UNSPEC) {
1476                 sk->sk_state    = NETLINK_UNCONNECTED;
1477                 nlk->dst_portid = 0;
1478                 nlk->dst_group  = 0;
1479                 return 0;
1480         }
1481         if (addr->sa_family != AF_NETLINK)
1482                 return -EINVAL;
1483 
1484         /* Only superuser is allowed to send multicasts */
1485         if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
1486                 return -EPERM;
1487 
1488         if (!nlk->portid)
1489                 err = netlink_autobind(sock);
1490 
1491         if (err == 0) {
1492                 sk->sk_state    = NETLINK_CONNECTED;
1493                 nlk->dst_portid = nladdr->nl_pid;
1494                 nlk->dst_group  = ffs(nladdr->nl_groups);
1495         }
1496 
1497         return err;
1498 }
1499 
1500 static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1501                            int *addr_len, int peer)
1502 {
1503         struct sock *sk = sock->sk;
1504         struct netlink_sock *nlk = nlk_sk(sk);
1505         DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
1506 
1507         nladdr->nl_family = AF_NETLINK;
1508         nladdr->nl_pad = 0;
1509         *addr_len = sizeof(*nladdr);
1510 
1511         if (peer) {
1512                 nladdr->nl_pid = nlk->dst_portid;
1513                 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1514         } else {
1515                 nladdr->nl_pid = nlk->portid;
1516                 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1517         }
1518         return 0;
1519 }
1520 
1521 static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1522 {
1523         struct sock *sock;
1524         struct netlink_sock *nlk;
1525 
1526         sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1527         if (!sock)
1528                 return ERR_PTR(-ECONNREFUSED);
1529 
1530         /* Don't bother queuing skb if kernel socket has no input function */
1531         nlk = nlk_sk(sock);
1532         if (sock->sk_state == NETLINK_CONNECTED &&
1533             nlk->dst_portid != nlk_sk(ssk)->portid) {
1534                 sock_put(sock);
1535                 return ERR_PTR(-ECONNREFUSED);
1536         }
1537         return sock;
1538 }
1539 
1540 struct sock *netlink_getsockbyfilp(struct file *filp)
1541 {
1542         struct inode *inode = file_inode(filp);
1543         struct sock *sock;
1544 
1545         if (!S_ISSOCK(inode->i_mode))
1546                 return ERR_PTR(-ENOTSOCK);
1547 
1548         sock = SOCKET_I(inode)->sk;
1549         if (sock->sk_family != AF_NETLINK)
1550                 return ERR_PTR(-EINVAL);
1551 
1552         sock_hold(sock);
1553         return sock;
1554 }
1555 
1556 static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1557                                                int broadcast)
1558 {
1559         struct sk_buff *skb;
1560         void *data;
1561 
1562         if (size <= NLMSG_GOODSIZE || broadcast)
1563                 return alloc_skb(size, GFP_KERNEL);
1564 
1565         size = SKB_DATA_ALIGN(size) +
1566                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1567 
1568         data = vmalloc(size);
1569         if (data == NULL)
1570                 return NULL;
1571 
1572         skb = build_skb(data, size);
1573         if (skb == NULL)
1574                 vfree(data);
1575         else {
1576                 skb->head_frag = 0;
1577                 skb->destructor = netlink_skb_destructor;
1578         }
1579 
1580         return skb;
1581 }
1582 
1583 /*
1584  * Attach a skb to a netlink socket.
1585  * The caller must hold a reference to the destination socket. On error, the
1586  * reference is dropped. The skb is not send to the destination, just all
1587  * all error checks are performed and memory in the queue is reserved.
1588  * Return values:
1589  * < 0: error. skb freed, reference to sock dropped.
1590  * 0: continue
1591  * 1: repeat lookup - reference dropped while waiting for socket memory.
1592  */
1593 int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
1594                       long *timeo, struct sock *ssk)
1595 {
1596         struct netlink_sock *nlk;
1597 
1598         nlk = nlk_sk(sk);
1599 
1600         if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1601              test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1602             !netlink_skb_is_mmaped(skb)) {
1603                 DECLARE_WAITQUEUE(wait, current);
1604                 if (!*timeo) {
1605                         if (!ssk || netlink_is_kernel(ssk))
1606                                 netlink_overrun(sk);
1607                         sock_put(sk);
1608                         kfree_skb(skb);
1609                         return -EAGAIN;
1610                 }
1611 
1612                 __set_current_state(TASK_INTERRUPTIBLE);
1613                 add_wait_queue(&nlk->wait, &wait);
1614 
1615                 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1616                      test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1617                     !sock_flag(sk, SOCK_DEAD))
1618                         *timeo = schedule_timeout(*timeo);
1619 
1620                 __set_current_state(TASK_RUNNING);
1621                 remove_wait_queue(&nlk->wait, &wait);
1622                 sock_put(sk);
1623 
1624                 if (signal_pending(current)) {
1625                         kfree_skb(skb);
1626                         return sock_intr_errno(*timeo);
1627                 }
1628                 return 1;
1629         }
1630         netlink_skb_set_owner_r(skb, sk);
1631         return 0;
1632 }
1633 
1634 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1635 {
1636         int len = skb->len;
1637 
1638         netlink_deliver_tap(skb);
1639 
1640 #ifdef CONFIG_NETLINK_MMAP
1641         if (netlink_skb_is_mmaped(skb))
1642                 netlink_queue_mmaped_skb(sk, skb);
1643         else if (netlink_rx_is_mmaped(sk))
1644                 netlink_ring_set_copied(sk, skb);
1645         else
1646 #endif /* CONFIG_NETLINK_MMAP */
1647                 skb_queue_tail(&sk->sk_receive_queue, skb);
1648         sk->sk_data_ready(sk, len);
1649         return len;
1650 }
1651 
1652 int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1653 {
1654         int len = __netlink_sendskb(sk, skb);
1655 
1656         sock_put(sk);
1657         return len;
1658 }
1659 
1660 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1661 {
1662         kfree_skb(skb);
1663         sock_put(sk);
1664 }
1665 
1666 static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1667 {
1668         int delta;
1669 
1670         WARN_ON(skb->sk != NULL);
1671         if (netlink_skb_is_mmaped(skb))
1672                 return skb;
1673 
1674         delta = skb->end - skb->tail;
1675         if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1676                 return skb;
1677 
1678         if (skb_shared(skb)) {
1679                 struct sk_buff *nskb = skb_clone(skb, allocation);
1680                 if (!nskb)
1681                         return skb;
1682                 consume_skb(skb);
1683                 skb = nskb;
1684         }
1685 
1686         if (!pskb_expand_head(skb, 0, -delta, allocation))
1687                 skb->truesize -= delta;
1688 
1689         return skb;
1690 }
1691 
1692 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1693                                   struct sock *ssk)
1694 {
1695         int ret;
1696         struct netlink_sock *nlk = nlk_sk(sk);
1697 
1698         ret = -ECONNREFUSED;
1699         if (nlk->netlink_rcv != NULL) {
1700                 /* We could do a netlink_deliver_tap(skb) here as well
1701                  * but since this is intended for the kernel only, we
1702                  * should rather let it stay under the hood.
1703                  */
1704 
1705                 ret = skb->len;
1706                 netlink_skb_set_owner_r(skb, sk);
1707                 NETLINK_CB(skb).sk = ssk;
1708                 nlk->netlink_rcv(skb);
1709                 consume_skb(skb);
1710         } else {
1711                 kfree_skb(skb);
1712         }
1713         sock_put(sk);
1714         return ret;
1715 }
1716 
1717 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
1718                     u32 portid, int nonblock)
1719 {
1720         struct sock *sk;
1721         int err;
1722         long timeo;
1723 
1724         skb = netlink_trim(skb, gfp_any());
1725 
1726         timeo = sock_sndtimeo(ssk, nonblock);
1727 retry:
1728         sk = netlink_getsockbyportid(ssk, portid);
1729         if (IS_ERR(sk)) {
1730                 kfree_skb(skb);
1731                 return PTR_ERR(sk);
1732         }
1733         if (netlink_is_kernel(sk))
1734                 return netlink_unicast_kernel(sk, skb, ssk);
1735 
1736         if (sk_filter(sk, skb)) {
1737                 err = skb->len;
1738                 kfree_skb(skb);
1739                 sock_put(sk);
1740                 return err;
1741         }
1742 
1743         err = netlink_attachskb(sk, skb, &timeo, ssk);
1744         if (err == 1)
1745                 goto retry;
1746         if (err)
1747                 return err;
1748 
1749         return netlink_sendskb(sk, skb);
1750 }
1751 EXPORT_SYMBOL(netlink_unicast);
1752 
1753 struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1754                                   u32 dst_portid, gfp_t gfp_mask)
1755 {
1756 #ifdef CONFIG_NETLINK_MMAP
1757         struct sock *sk = NULL;
1758         struct sk_buff *skb;
1759         struct netlink_ring *ring;
1760         struct nl_mmap_hdr *hdr;
1761         unsigned int maxlen;
1762 
1763         sk = netlink_getsockbyportid(ssk, dst_portid);
1764         if (IS_ERR(sk))
1765                 goto out;
1766 
1767         ring = &nlk_sk(sk)->rx_ring;
1768         /* fast-path without atomic ops for common case: non-mmaped receiver */
1769         if (ring->pg_vec == NULL)
1770                 goto out_put;
1771 
1772         skb = alloc_skb_head(gfp_mask);
1773         if (skb == NULL)
1774                 goto err1;
1775 
1776         spin_lock_bh(&sk->sk_receive_queue.lock);
1777         /* check again under lock */
1778         if (ring->pg_vec == NULL)
1779                 goto out_free;
1780 
1781         maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1782         if (maxlen < size)
1783                 goto out_free;
1784 
1785         netlink_forward_ring(ring);
1786         hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1787         if (hdr == NULL)
1788                 goto err2;
1789         netlink_ring_setup_skb(skb, sk, ring, hdr);
1790         netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1791         atomic_inc(&ring->pending);
1792         netlink_increment_head(ring);
1793 
1794         spin_unlock_bh(&sk->sk_receive_queue.lock);
1795         return skb;
1796 
1797 err2:
1798         kfree_skb(skb);
1799         spin_unlock_bh(&sk->sk_receive_queue.lock);
1800         netlink_overrun(sk);
1801 err1:
1802         sock_put(sk);
1803         return NULL;
1804 
1805 out_free:
1806         kfree_skb(skb);
1807         spin_unlock_bh(&sk->sk_receive_queue.lock);
1808 out_put:
1809         sock_put(sk);
1810 out:
1811 #endif
1812         return alloc_skb(size, gfp_mask);
1813 }
1814 EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1815 
1816 int netlink_has_listeners(struct sock *sk, unsigned int group)
1817 {
1818         int res = 0;
1819         struct listeners *listeners;
1820 
1821         BUG_ON(!netlink_is_kernel(sk));
1822 
1823         rcu_read_lock();
1824         listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1825 
1826         if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
1827                 res = test_bit(group - 1, listeners->masks);
1828 
1829         rcu_read_unlock();
1830 
1831         return res;
1832 }
1833 EXPORT_SYMBOL_GPL(netlink_has_listeners);
1834 
1835 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1836 {
1837         struct netlink_sock *nlk = nlk_sk(sk);
1838 
1839         if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
1840             !test_bit(NETLINK_CONGESTED, &nlk->state)) {
1841                 netlink_skb_set_owner_r(skb, sk);
1842                 __netlink_sendskb(sk, skb);
1843                 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1844         }
1845         return -1;
1846 }
1847 
1848 struct netlink_broadcast_data {
1849         struct sock *exclude_sk;
1850         struct net *net;
1851         u32 portid;
1852         u32 group;
1853         int failure;
1854         int delivery_failure;
1855         int congested;
1856         int delivered;
1857         gfp_t allocation;
1858         struct sk_buff *skb, *skb2;
1859         int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1860         void *tx_data;
1861 };
1862 
1863 static int do_one_broadcast(struct sock *sk,
1864                                    struct netlink_broadcast_data *p)
1865 {
1866         struct netlink_sock *nlk = nlk_sk(sk);
1867         int val;
1868 
1869         if (p->exclude_sk == sk)
1870                 goto out;
1871 
1872         if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1873             !test_bit(p->group - 1, nlk->groups))
1874                 goto out;
1875 
1876         if (!net_eq(sock_net(sk), p->net))
1877                 goto out;
1878 
1879         if (p->failure) {
1880                 netlink_overrun(sk);
1881                 goto out;
1882         }
1883 
1884         sock_hold(sk);
1885         if (p->skb2 == NULL) {
1886                 if (skb_shared(p->skb)) {
1887                         p->skb2 = skb_clone(p->skb, p->allocation);
1888                 } else {
1889                         p->skb2 = skb_get(p->skb);
1890                         /*
1891                          * skb ownership may have been set when
1892                          * delivered to a previous socket.
1893                          */
1894                         skb_orphan(p->skb2);
1895                 }
1896         }
1897         if (p->skb2 == NULL) {
1898                 netlink_overrun(sk);
1899                 /* Clone failed. Notify ALL listeners. */
1900                 p->failure = 1;
1901                 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1902                         p->delivery_failure = 1;
1903         } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1904                 kfree_skb(p->skb2);
1905                 p->skb2 = NULL;
1906         } else if (sk_filter(sk, p->skb2)) {
1907                 kfree_skb(p->skb2);
1908                 p->skb2 = NULL;
1909         } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1910                 netlink_overrun(sk);
1911                 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1912                         p->delivery_failure = 1;
1913         } else {
1914                 p->congested |= val;
1915                 p->delivered = 1;
1916                 p->skb2 = NULL;
1917         }
1918         sock_put(sk);
1919 
1920 out:
1921         return 0;
1922 }
1923 
1924 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
1925         u32 group, gfp_t allocation,
1926         int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1927         void *filter_data)
1928 {
1929         struct net *net = sock_net(ssk);
1930         struct netlink_broadcast_data info;
1931         struct sock *sk;
1932 
1933         skb = netlink_trim(skb, allocation);
1934 
1935         info.exclude_sk = ssk;
1936         info.net = net;
1937         info.portid = portid;
1938         info.group = group;
1939         info.failure = 0;
1940         info.delivery_failure = 0;
1941         info.congested = 0;
1942         info.delivered = 0;
1943         info.allocation = allocation;
1944         info.skb = skb;
1945         info.skb2 = NULL;
1946         info.tx_filter = filter;
1947         info.tx_data = filter_data;
1948 
1949         /* While we sleep in clone, do not allow to change socket list */
1950 
1951         netlink_lock_table();
1952 
1953         sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1954                 do_one_broadcast(sk, &info);
1955 
1956         consume_skb(skb);
1957 
1958         netlink_unlock_table();
1959 
1960         if (info.delivery_failure) {
1961                 kfree_skb(info.skb2);
1962                 return -ENOBUFS;
1963         }
1964         consume_skb(info.skb2);
1965 
1966         if (info.delivered) {
1967                 if (info.congested && (allocation & __GFP_WAIT))
1968                         yield();
1969                 return 0;
1970         }
1971         return -ESRCH;
1972 }
1973 EXPORT_SYMBOL(netlink_broadcast_filtered);
1974 
1975 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
1976                       u32 group, gfp_t allocation)
1977 {
1978         return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
1979                 NULL, NULL);
1980 }
1981 EXPORT_SYMBOL(netlink_broadcast);
1982 
1983 struct netlink_set_err_data {
1984         struct sock *exclude_sk;
1985         u32 portid;
1986         u32 group;
1987         int code;
1988 };
1989 
1990 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1991 {
1992         struct netlink_sock *nlk = nlk_sk(sk);
1993         int ret = 0;
1994 
1995         if (sk == p->exclude_sk)
1996                 goto out;
1997 
1998         if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
1999                 goto out;
2000 
2001         if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
2002             !test_bit(p->group - 1, nlk->groups))
2003                 goto out;
2004 
2005         if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
2006                 ret = 1;
2007                 goto out;
2008         }
2009 
2010         sk->sk_err = p->code;
2011         sk->sk_error_report(sk);
2012 out:
2013         return ret;
2014 }
2015 
2016 /**
2017  * netlink_set_err - report error to broadcast listeners
2018  * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
2019  * @portid: the PORTID of a process that we want to skip (if any)
2020  * @group: the broadcast group that will notice the error
2021  * @code: error code, must be negative (as usual in kernelspace)
2022  *
2023  * This function returns the number of broadcast listeners that have set the
2024  * NETLINK_RECV_NO_ENOBUFS socket option.
2025  */
2026 int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
2027 {
2028         struct netlink_set_err_data info;
2029         struct sock *sk;
2030         int ret = 0;
2031 
2032         info.exclude_sk = ssk;
2033         info.portid = portid;
2034         info.group = group;
2035         /* sk->sk_err wants a positive error value */
2036         info.code = -code;
2037 
2038         read_lock(&nl_table_lock);
2039 
2040         sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
2041                 ret += do_one_set_err(sk, &info);
2042 
2043         read_unlock(&nl_table_lock);
2044         return ret;
2045 }
2046 EXPORT_SYMBOL(netlink_set_err);
2047 
2048 /* must be called with netlink table grabbed */
2049 static void netlink_update_socket_mc(struct netlink_sock *nlk,
2050                                      unsigned int group,
2051                                      int is_new)
2052 {
2053         int old, new = !!is_new, subscriptions;
2054 
2055         old = test_bit(group - 1, nlk->groups);
2056         subscriptions = nlk->subscriptions - old + new;
2057         if (new)
2058                 __set_bit(group - 1, nlk->groups);
2059         else
2060                 __clear_bit(group - 1, nlk->groups);
2061         netlink_update_subscriptions(&nlk->sk, subscriptions);
2062         netlink_update_listeners(&nlk->sk);
2063 }
2064 
2065 static int netlink_setsockopt(struct socket *sock, int level, int optname,
2066                               char __user *optval, unsigned int optlen)
2067 {
2068         struct sock *sk = sock->sk;
2069         struct netlink_sock *nlk = nlk_sk(sk);
2070         unsigned int val = 0;
2071         int err;
2072 
2073         if (level != SOL_NETLINK)
2074                 return -ENOPROTOOPT;
2075 
2076         if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2077             optlen >= sizeof(int) &&
2078             get_user(val, (unsigned int __user *)optval))
2079                 return -EFAULT;
2080 
2081         switch (optname) {
2082         case NETLINK_PKTINFO:
2083                 if (val)
2084                         nlk->flags |= NETLINK_RECV_PKTINFO;
2085                 else
2086                         nlk->flags &= ~NETLINK_RECV_PKTINFO;
2087                 err = 0;
2088                 break;
2089         case NETLINK_ADD_MEMBERSHIP:
2090         case NETLINK_DROP_MEMBERSHIP: {
2091                 if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
2092                         return -EPERM;
2093                 err = netlink_realloc_groups(sk);
2094                 if (err)
2095                         return err;
2096                 if (!val || val - 1 >= nlk->ngroups)
2097                         return -EINVAL;
2098                 netlink_table_grab();
2099                 netlink_update_socket_mc(nlk, val,
2100                                          optname == NETLINK_ADD_MEMBERSHIP);
2101                 netlink_table_ungrab();
2102 
2103                 if (nlk->netlink_bind)
2104                         nlk->netlink_bind(val);
2105 
2106                 err = 0;
2107                 break;
2108         }
2109         case NETLINK_BROADCAST_ERROR:
2110                 if (val)
2111                         nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
2112                 else
2113                         nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
2114                 err = 0;
2115                 break;
2116         case NETLINK_NO_ENOBUFS:
2117                 if (val) {
2118                         nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
2119                         clear_bit(NETLINK_CONGESTED, &nlk->state);
2120                         wake_up_interruptible(&nlk->wait);
2121                 } else {
2122                         nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
2123                 }
2124                 err = 0;
2125                 break;
2126 #ifdef CONFIG_NETLINK_MMAP
2127         case NETLINK_RX_RING:
2128         case NETLINK_TX_RING: {
2129                 struct nl_mmap_req req;
2130 
2131                 /* Rings might consume more memory than queue limits, require
2132                  * CAP_NET_ADMIN.
2133                  */
2134                 if (!capable(CAP_NET_ADMIN))
2135                         return -EPERM;
2136                 if (optlen < sizeof(req))
2137                         return -EINVAL;
2138                 if (copy_from_user(&req, optval, sizeof(req)))
2139                         return -EFAULT;
2140                 err = netlink_set_ring(sk, &req, false,
2141                                        optname == NETLINK_TX_RING);
2142                 break;
2143         }
2144 #endif /* CONFIG_NETLINK_MMAP */
2145         default:
2146                 err = -ENOPROTOOPT;
2147         }
2148         return err;
2149 }
2150 
2151 static int netlink_getsockopt(struct socket *sock, int level, int optname,
2152                               char __user *optval, int __user *optlen)
2153 {
2154         struct sock *sk = sock->sk;
2155         struct netlink_sock *nlk = nlk_sk(sk);
2156         int len, val, err;
2157 
2158         if (level != SOL_NETLINK)
2159                 return -ENOPROTOOPT;
2160 
2161         if (get_user(len, optlen))
2162                 return -EFAULT;
2163         if (len < 0)
2164                 return -EINVAL;
2165 
2166         switch (optname) {
2167         case NETLINK_PKTINFO:
2168                 if (len < sizeof(int))
2169                         return -EINVAL;
2170                 len = sizeof(int);
2171                 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
2172                 if (put_user(len, optlen) ||
2173                     put_user(val, optval))
2174                         return -EFAULT;
2175                 err = 0;
2176                 break;
2177         case NETLINK_BROADCAST_ERROR:
2178                 if (len < sizeof(int))
2179                         return -EINVAL;
2180                 len = sizeof(int);
2181                 val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
2182                 if (put_user(len, optlen) ||
2183                     put_user(val, optval))
2184                         return -EFAULT;
2185                 err = 0;
2186                 break;
2187         case NETLINK_NO_ENOBUFS:
2188                 if (len < sizeof(int))
2189                         return -EINVAL;
2190                 len = sizeof(int);
2191                 val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
2192                 if (put_user(len, optlen) ||
2193                     put_user(val, optval))
2194                         return -EFAULT;
2195                 err = 0;
2196                 break;
2197         default:
2198                 err = -ENOPROTOOPT;
2199         }
2200         return err;
2201 }
2202 
2203 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2204 {
2205         struct nl_pktinfo info;
2206 
2207         info.group = NETLINK_CB(skb).dst_group;
2208         put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2209 }
2210 
2211 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
2212                            struct msghdr *msg, size_t len)
2213 {
2214         struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2215         struct sock *sk = sock->sk;
2216         struct netlink_sock *nlk = nlk_sk(sk);
2217         struct sockaddr_nl *addr = msg->msg_name;
2218         u32 dst_portid;
2219         u32 dst_group;
2220         struct sk_buff *skb;
2221         int err;
2222         struct scm_cookie scm;
2223 
2224         if (msg->msg_flags&MSG_OOB)
2225                 return -EOPNOTSUPP;
2226 
2227         if (NULL == siocb->scm)
2228                 siocb->scm = &scm;
2229 
2230         err = scm_send(sock, msg, siocb->scm, true);
2231         if (err < 0)
2232                 return err;
2233 
2234         if (msg->msg_namelen) {
2235                 err = -EINVAL;
2236                 if (addr->nl_family != AF_NETLINK)
2237                         goto out;
2238                 dst_portid = addr->nl_pid;
2239                 dst_group = ffs(addr->nl_groups);
2240                 err =  -EPERM;
2241                 if ((dst_group || dst_portid) &&
2242                     !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
2243                         goto out;
2244         } else {
2245                 dst_portid = nlk->dst_portid;
2246                 dst_group = nlk->dst_group;
2247         }
2248 
2249         if (!nlk->portid) {
2250                 err = netlink_autobind(sock);
2251                 if (err)
2252                         goto out;
2253         }
2254 
2255         if (netlink_tx_is_mmaped(sk) &&
2256             msg->msg_iov->iov_base == NULL) {
2257                 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
2258                                            siocb);
2259                 goto out;
2260         }
2261 
2262         err = -EMSGSIZE;
2263         if (len > sk->sk_sndbuf - 32)
2264                 goto out;
2265         err = -ENOBUFS;
2266         skb = netlink_alloc_large_skb(len, dst_group);
2267         if (skb == NULL)
2268                 goto out;
2269 
2270         NETLINK_CB(skb).portid  = nlk->portid;
2271         NETLINK_CB(skb).dst_group = dst_group;
2272         NETLINK_CB(skb).creds   = siocb->scm->creds;
2273 
2274         err = -EFAULT;
2275         if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
2276                 kfree_skb(skb);
2277                 goto out;
2278         }
2279 
2280         err = security_netlink_send(sk, skb);
2281         if (err) {
2282                 kfree_skb(skb);
2283                 goto out;
2284         }
2285 
2286         if (dst_group) {
2287                 atomic_inc(&skb->users);
2288                 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
2289         }
2290         err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
2291 
2292 out:
2293         scm_destroy(siocb->scm);
2294         return err;
2295 }
2296 
2297 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2298                            struct msghdr *msg, size_t len,
2299                            int flags)
2300 {
2301         struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2302         struct scm_cookie scm;
2303         struct sock *sk = sock->sk;
2304         struct netlink_sock *nlk = nlk_sk(sk);
2305         int noblock = flags&MSG_DONTWAIT;
2306         size_t copied;
2307         struct sk_buff *skb, *data_skb;
2308         int err, ret;
2309 
2310         if (flags&MSG_OOB)
2311                 return -EOPNOTSUPP;
2312 
2313         copied = 0;
2314 
2315         skb = skb_recv_datagram(sk, flags, noblock, &err);
2316         if (skb == NULL)
2317                 goto out;
2318 
2319         data_skb = skb;
2320 
2321 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2322         if (unlikely(skb_shinfo(skb)->frag_list)) {
2323                 /*
2324                  * If this skb has a frag_list, then here that means that we
2325                  * will have to use the frag_list skb's data for compat tasks
2326                  * and the regular skb's data for normal (non-compat) tasks.
2327                  *
2328                  * If we need to send the compat skb, assign it to the
2329                  * 'data_skb' variable so that it will be used below for data
2330                  * copying. We keep 'skb' for everything else, including
2331                  * freeing both later.
2332                  */
2333                 if (flags & MSG_CMSG_COMPAT)
2334                         data_skb = skb_shinfo(skb)->frag_list;
2335         }
2336 #endif
2337 
2338         copied = data_skb->len;
2339         if (len < copied) {
2340                 msg->msg_flags |= MSG_TRUNC;
2341                 copied = len;
2342         }
2343 
2344         skb_reset_transport_header(data_skb);
2345         err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
2346 
2347         if (msg->msg_name) {
2348                 struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
2349                 addr->nl_family = AF_NETLINK;
2350                 addr->nl_pad    = 0;
2351                 addr->nl_pid    = NETLINK_CB(skb).portid;
2352                 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
2353                 msg->msg_namelen = sizeof(*addr);
2354         }
2355 
2356         if (nlk->flags & NETLINK_RECV_PKTINFO)
2357                 netlink_cmsg_recv_pktinfo(msg, skb);
2358 
2359         if (NULL == siocb->scm) {
2360                 memset(&scm, 0, sizeof(scm));
2361                 siocb->scm = &scm;
2362         }
2363         siocb->scm->creds = *NETLINK_CREDS(skb);
2364         if (flags & MSG_TRUNC)
2365                 copied = data_skb->len;
2366 
2367         skb_free_datagram(sk, skb);
2368 
2369         if (nlk->cb_running &&
2370             atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
2371                 ret = netlink_dump(sk);
2372                 if (ret) {
2373                         sk->sk_err = ret;
2374                         sk->sk_error_report(sk);
2375                 }
2376         }
2377 
2378         scm_recv(sock, msg, siocb->scm, flags);
2379 out:
2380         netlink_rcv_wake(sk);
2381         return err ? : copied;
2382 }
2383 
2384 static void netlink_data_ready(struct sock *sk, int len)
2385 {
2386         BUG();
2387 }
2388 
2389 /*
2390  *      We export these functions to other modules. They provide a
2391  *      complete set of kernel non-blocking support for message
2392  *      queueing.
2393  */
2394 
2395 struct sock *
2396 __netlink_kernel_create(struct net *net, int unit, struct module *module,
2397                         struct netlink_kernel_cfg *cfg)
2398 {
2399         struct socket *sock;
2400         struct sock *sk;
2401         struct netlink_sock *nlk;
2402         struct listeners *listeners = NULL;
2403         struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2404         unsigned int groups;
2405 
2406         BUG_ON(!nl_table);
2407 
2408         if (unit < 0 || unit >= MAX_LINKS)
2409                 return NULL;
2410 
2411         if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2412                 return NULL;
2413 
2414         /*
2415          * We have to just have a reference on the net from sk, but don't
2416          * get_net it. Besides, we cannot get and then put the net here.
2417          * So we create one inside init_net and the move it to net.
2418          */
2419 
2420         if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
2421                 goto out_sock_release_nosk;
2422 
2423         sk = sock->sk;
2424         sk_change_net(sk, net);
2425 
2426         if (!cfg || cfg->groups < 32)
2427                 groups = 32;
2428         else
2429                 groups = cfg->groups;
2430 
2431         listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2432         if (!listeners)
2433                 goto out_sock_release;
2434 
2435         sk->sk_data_ready = netlink_data_ready;
2436         if (cfg && cfg->input)
2437                 nlk_sk(sk)->netlink_rcv = cfg->input;
2438 
2439         if (netlink_insert(sk, net, 0))
2440                 goto out_sock_release;
2441 
2442         nlk = nlk_sk(sk);
2443         nlk->flags |= NETLINK_KERNEL_SOCKET;
2444 
2445         netlink_table_grab();
2446         if (!nl_table[unit].registered) {
2447                 nl_table[unit].groups = groups;
2448                 rcu_assign_pointer(nl_table[unit].listeners, listeners);
2449                 nl_table[unit].cb_mutex = cb_mutex;
2450                 nl_table[unit].module = module;
2451                 if (cfg) {
2452                         nl_table[unit].bind = cfg->bind;
2453                         nl_table[unit].flags = cfg->flags;
2454                         if (cfg->compare)
2455                                 nl_table[unit].compare = cfg->compare;
2456                 }
2457                 nl_table[unit].registered = 1;
2458         } else {
2459                 kfree(listeners);
2460                 nl_table[unit].registered++;
2461         }
2462         netlink_table_ungrab();
2463         return sk;
2464 
2465 out_sock_release:
2466         kfree(listeners);
2467         netlink_kernel_release(sk);
2468         return NULL;
2469 
2470 out_sock_release_nosk:
2471         sock_release(sock);
2472         return NULL;
2473 }
2474 EXPORT_SYMBOL(__netlink_kernel_create);
2475 
2476 void
2477 netlink_kernel_release(struct sock *sk)
2478 {
2479         sk_release_kernel(sk);
2480 }
2481 EXPORT_SYMBOL(netlink_kernel_release);
2482 
2483 int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
2484 {
2485         struct listeners *new, *old;
2486         struct netlink_table *tbl = &nl_table[sk->sk_protocol];
2487 
2488         if (groups < 32)
2489                 groups = 32;
2490 
2491         if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
2492                 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2493                 if (!new)
2494                         return -ENOMEM;
2495                 old = nl_deref_protected(tbl->listeners);
2496                 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2497                 rcu_assign_pointer(tbl->listeners, new);
2498 
2499                 kfree_rcu(old, rcu);
2500         }
2501         tbl->groups = groups;
2502 
2503         return 0;
2504 }
2505 
2506 /**
2507  * netlink_change_ngroups - change number of multicast groups
2508  *
2509  * This changes the number of multicast groups that are available
2510  * on a certain netlink family. Note that it is not possible to
2511  * change the number of groups to below 32. Also note that it does
2512  * not implicitly call netlink_clear_multicast_users() when the
2513  * number of groups is reduced.
2514  *
2515  * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2516  * @groups: The new number of groups.
2517  */
2518 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2519 {
2520         int err;
2521 
2522         netlink_table_grab();
2523         err = __netlink_change_ngroups(sk, groups);
2524         netlink_table_ungrab();
2525 
2526         return err;
2527 }
2528 
2529 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2530 {
2531         struct sock *sk;
2532         struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2533 
2534         sk_for_each_bound(sk, &tbl->mc_list)
2535                 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2536 }
2537 
2538 /**
2539  * netlink_clear_multicast_users - kick off multicast listeners
2540  *
2541  * This function removes all listeners from the given group.
2542  * @ksk: The kernel netlink socket, as returned by
2543  *      netlink_kernel_create().
2544  * @group: The multicast group to clear.
2545  */
2546 void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2547 {
2548         netlink_table_grab();
2549         __netlink_clear_multicast_users(ksk, group);
2550         netlink_table_ungrab();
2551 }
2552 
2553 struct nlmsghdr *
2554 __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
2555 {
2556         struct nlmsghdr *nlh;
2557         int size = nlmsg_msg_size(len);
2558 
2559         nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
2560         nlh->nlmsg_type = type;
2561         nlh->nlmsg_len = size;
2562         nlh->nlmsg_flags = flags;
2563         nlh->nlmsg_pid = portid;
2564         nlh->nlmsg_seq = seq;
2565         if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
2566                 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
2567         return nlh;
2568 }
2569 EXPORT_SYMBOL(__nlmsg_put);
2570 
2571 /*
2572  * It looks a bit ugly.
2573  * It would be better to create kernel thread.
2574  */
2575 
2576 static int netlink_dump(struct sock *sk)
2577 {
2578         struct netlink_sock *nlk = nlk_sk(sk);
2579         struct netlink_callback *cb;
2580         struct sk_buff *skb = NULL;
2581         struct nlmsghdr *nlh;
2582         int len, err = -ENOBUFS;
2583         int alloc_size;
2584 
2585         mutex_lock(nlk->cb_mutex);
2586         if (!nlk->cb_running) {
2587                 err = -EINVAL;
2588                 goto errout_skb;
2589         }
2590 
2591         cb = &nlk->cb;
2592         alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2593 
2594         if (!netlink_rx_is_mmaped(sk) &&
2595             atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2596                 goto errout_skb;
2597         skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, GFP_KERNEL);
2598         if (!skb)
2599                 goto errout_skb;
2600         netlink_skb_set_owner_r(skb, sk);
2601 
2602         len = cb->dump(skb, cb);
2603 
2604         if (len > 0) {
2605                 mutex_unlock(nlk->cb_mutex);
2606 
2607                 if (sk_filter(sk, skb))
2608                         kfree_skb(skb);
2609                 else
2610                         __netlink_sendskb(sk, skb);
2611                 return 0;
2612         }
2613 
2614         nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2615         if (!nlh)
2616                 goto errout_skb;
2617 
2618         nl_dump_check_consistent(cb, nlh);
2619 
2620         memcpy(nlmsg_data(nlh), &len, sizeof(len));
2621 
2622         if (sk_filter(sk, skb))
2623                 kfree_skb(skb);
2624         else
2625                 __netlink_sendskb(sk, skb);
2626 
2627         if (cb->done)
2628                 cb->done(cb);
2629 
2630         nlk->cb_running = false;
2631         mutex_unlock(nlk->cb_mutex);
2632         module_put(cb->module);
2633         consume_skb(cb->skb);
2634         return 0;
2635 
2636 errout_skb:
2637         mutex_unlock(nlk->cb_mutex);
2638         kfree_skb(skb);
2639         return err;
2640 }
2641 
2642 int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2643                          const struct nlmsghdr *nlh,
2644                          struct netlink_dump_control *control)
2645 {
2646         struct netlink_callback *cb;
2647         struct sock *sk;
2648         struct netlink_sock *nlk;
2649         int ret;
2650 
2651         /* Memory mapped dump requests need to be copied to avoid looping
2652          * on the pending state in netlink_mmap_sendmsg() while the CB hold
2653          * a reference to the skb.
2654          */
2655         if (netlink_skb_is_mmaped(skb)) {
2656                 skb = skb_copy(skb, GFP_KERNEL);
2657                 if (skb == NULL)
2658                         return -ENOBUFS;
2659         } else
2660                 atomic_inc(&skb->users);
2661 
2662         sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2663         if (sk == NULL) {
2664                 ret = -ECONNREFUSED;
2665                 goto error_free;
2666         }
2667 
2668         nlk = nlk_sk(sk);
2669         mutex_lock(nlk->cb_mutex);
2670         /* A dump is in progress... */
2671         if (nlk->cb_running) {
2672                 ret = -EBUSY;
2673                 goto error_unlock;
2674         }
2675         /* add reference of module which cb->dump belongs to */
2676         if (!try_module_get(control->module)) {
2677                 ret = -EPROTONOSUPPORT;
2678                 goto error_unlock;
2679         }
2680 
2681         cb = &nlk->cb;
2682         memset(cb, 0, sizeof(*cb));
2683         cb->dump = control->dump;
2684         cb->done = control->done;
2685         cb->nlh = nlh;
2686         cb->data = control->data;
2687         cb->module = control->module;
2688         cb->min_dump_alloc = control->min_dump_alloc;
2689         cb->skb = skb;
2690 
2691         nlk->cb_running = true;
2692 
2693         mutex_unlock(nlk->cb_mutex);
2694 
2695         ret = netlink_dump(sk);
2696         sock_put(sk);
2697 
2698         if (ret)
2699                 return ret;
2700 
2701         /* We successfully started a dump, by returning -EINTR we
2702          * signal not to send ACK even if it was requested.
2703          */
2704         return -EINTR;
2705 
2706 error_unlock:
2707         sock_put(sk);
2708         mutex_unlock(nlk->cb_mutex);
2709 error_free:
2710         kfree_skb(skb);
2711         return ret;
2712 }
2713 EXPORT_SYMBOL(__netlink_dump_start);
2714 
2715 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2716 {
2717         struct sk_buff *skb;
2718         struct nlmsghdr *rep;
2719         struct nlmsgerr *errmsg;
2720         size_t payload = sizeof(*errmsg);
2721 
2722         /* error messages get the original request appened */
2723         if (err)
2724                 payload += nlmsg_len(nlh);
2725 
2726         skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2727                                 NETLINK_CB(in_skb).portid, GFP_KERNEL);
2728         if (!skb) {
2729                 struct sock *sk;
2730 
2731                 sk = netlink_lookup(sock_net(in_skb->sk),
2732                                     in_skb->sk->sk_protocol,
2733                                     NETLINK_CB(in_skb).portid);
2734                 if (sk) {
2735                         sk->sk_err = ENOBUFS;
2736                         sk->sk_error_report(sk);
2737                         sock_put(sk);
2738                 }
2739                 return;
2740         }
2741 
2742         rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2743                           NLMSG_ERROR, payload, 0);
2744         errmsg = nlmsg_data(rep);
2745         errmsg->error = err;
2746         memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
2747         netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
2748 }
2749 EXPORT_SYMBOL(netlink_ack);
2750 
2751 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2752                                                      struct nlmsghdr *))
2753 {
2754         struct nlmsghdr *nlh;
2755         int err;
2756 
2757         while (skb->len >= nlmsg_total_size(0)) {
2758                 int msglen;
2759 
2760                 nlh = nlmsg_hdr(skb);
2761                 err = 0;
2762 
2763                 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
2764                         return 0;
2765 
2766                 /* Only requests are handled by the kernel */
2767                 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
2768                         goto ack;
2769 
2770                 /* Skip control messages */
2771                 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
2772                         goto ack;
2773 
2774                 err = cb(skb, nlh);
2775                 if (err == -EINTR)
2776                         goto skip;
2777 
2778 ack:
2779                 if (nlh->nlmsg_flags & NLM_F_ACK || err)
2780                         netlink_ack(skb, nlh, err);
2781 
2782 skip:
2783                 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
2784                 if (msglen > skb->len)
2785                         msglen = skb->len;
2786                 skb_pull(skb, msglen);
2787         }
2788 
2789         return 0;
2790 }
2791 EXPORT_SYMBOL(netlink_rcv_skb);
2792 
2793 /**
2794  * nlmsg_notify - send a notification netlink message
2795  * @sk: netlink socket to use
2796  * @skb: notification message
2797  * @portid: destination netlink portid for reports or 0
2798  * @group: destination multicast group or 0
2799  * @report: 1 to report back, 0 to disable
2800  * @flags: allocation flags
2801  */
2802 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
2803                  unsigned int group, int report, gfp_t flags)
2804 {
2805         int err = 0;
2806 
2807         if (group) {
2808                 int exclude_portid = 0;
2809 
2810                 if (report) {
2811                         atomic_inc(&skb->users);
2812                         exclude_portid = portid;
2813                 }
2814 
2815                 /* errors reported via destination sk->sk_err, but propagate
2816                  * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
2817                 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
2818         }
2819 
2820         if (report) {
2821                 int err2;
2822 
2823                 err2 = nlmsg_unicast(sk, skb, portid);
2824                 if (!err || err == -ESRCH)
2825                         err = err2;
2826         }
2827 
2828         return err;
2829 }
2830 EXPORT_SYMBOL(nlmsg_notify);
2831 
2832 #ifdef CONFIG_PROC_FS
2833 struct nl_seq_iter {
2834         struct seq_net_private p;
2835         int link;
2836         int hash_idx;
2837 };
2838 
2839 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
2840 {
2841         struct nl_seq_iter *iter = seq->private;
2842         int i, j;
2843         struct sock *s;
2844         loff_t off = 0;
2845 
2846         for (i = 0; i < MAX_LINKS; i++) {
2847                 struct nl_portid_hash *hash = &nl_table[i].hash;
2848 
2849                 for (j = 0; j <= hash->mask; j++) {
2850                         sk_for_each(s, &hash->table[j]) {
2851                                 if (sock_net(s) != seq_file_net(seq))
2852                                         continue;
2853                                 if (off == pos) {
2854                                         iter->link = i;
2855                                         iter->hash_idx = j;
2856                                         return s;
2857                                 }
2858                                 ++off;
2859                         }
2860                 }
2861         }
2862         return NULL;
2863 }
2864 
2865 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
2866         __acquires(nl_table_lock)
2867 {
2868         read_lock(&nl_table_lock);
2869         return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2870 }
2871 
2872 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2873 {
2874         struct sock *s;
2875         struct nl_seq_iter *iter;
2876         struct net *net;
2877         int i, j;
2878 
2879         ++*pos;
2880 
2881         if (v == SEQ_START_TOKEN)
2882                 return netlink_seq_socket_idx(seq, 0);
2883 
2884         net = seq_file_net(seq);
2885         iter = seq->private;
2886         s = v;
2887         do {
2888                 s = sk_next(s);
2889         } while (s && !nl_table[s->sk_protocol].compare(net, s));
2890         if (s)
2891                 return s;
2892 
2893         i = iter->link;
2894         j = iter->hash_idx + 1;
2895 
2896         do {
2897                 struct nl_portid_hash *hash = &nl_table[i].hash;
2898 
2899                 for (; j <= hash->mask; j++) {
2900                         s = sk_head(&hash->table[j]);
2901 
2902                         while (s && !nl_table[s->sk_protocol].compare(net, s))
2903                                 s = sk_next(s);
2904                         if (s) {
2905                                 iter->link = i;
2906                                 iter->hash_idx = j;
2907                                 return s;
2908                         }
2909                 }
2910 
2911                 j = 0;
2912         } while (++i < MAX_LINKS);
2913 
2914         return NULL;
2915 }
2916 
2917 static void netlink_seq_stop(struct seq_file *seq, void *v)
2918         __releases(nl_table_lock)
2919 {
2920         read_unlock(&nl_table_lock);
2921 }
2922 
2923 
2924 static int netlink_seq_show(struct seq_file *seq, void *v)
2925 {
2926         if (v == SEQ_START_TOKEN) {
2927                 seq_puts(seq,
2928                          "sk       Eth Pid    Groups   "
2929                          "Rmem     Wmem     Dump     Locks     Drops     Inode\n");
2930         } else {
2931                 struct sock *s = v;
2932                 struct netlink_sock *nlk = nlk_sk(s);
2933 
2934                 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
2935                            s,
2936                            s->sk_protocol,
2937                            nlk->portid,
2938                            nlk->groups ? (u32)nlk->groups[0] : 0,
2939                            sk_rmem_alloc_get(s),
2940                            sk_wmem_alloc_get(s),
2941                            nlk->cb_running,
2942                            atomic_read(&s->sk_refcnt),
2943                            atomic_read(&s->sk_drops),
2944                            sock_i_ino(s)
2945                         );
2946 
2947         }
2948         return 0;
2949 }
2950 
2951 static const struct seq_operations netlink_seq_ops = {
2952         .start  = netlink_seq_start,
2953         .next   = netlink_seq_next,
2954         .stop   = netlink_seq_stop,
2955         .show   = netlink_seq_show,
2956 };
2957 
2958 
2959 static int netlink_seq_open(struct inode *inode, struct file *file)
2960 {
2961         return seq_open_net(inode, file, &netlink_seq_ops,
2962                                 sizeof(struct nl_seq_iter));
2963 }
2964 
2965 static const struct file_operations netlink_seq_fops = {
2966         .owner          = THIS_MODULE,
2967         .open           = netlink_seq_open,
2968         .read           = seq_read,
2969         .llseek         = seq_lseek,
2970         .release        = seq_release_net,
2971 };
2972 
2973 #endif
2974 
2975 int netlink_register_notifier(struct notifier_block *nb)
2976 {
2977         return atomic_notifier_chain_register(&netlink_chain, nb);
2978 }
2979 EXPORT_SYMBOL(netlink_register_notifier);
2980 
2981 int netlink_unregister_notifier(struct notifier_block *nb)
2982 {
2983         return atomic_notifier_chain_unregister(&netlink_chain, nb);
2984 }
2985 EXPORT_SYMBOL(netlink_unregister_notifier);
2986 
2987 static const struct proto_ops netlink_ops = {
2988         .family =       PF_NETLINK,
2989         .owner =        THIS_MODULE,
2990         .release =      netlink_release,
2991         .bind =         netlink_bind,
2992         .connect =      netlink_connect,
2993         .socketpair =   sock_no_socketpair,
2994         .accept =       sock_no_accept,
2995         .getname =      netlink_getname,
2996         .poll =         netlink_poll,
2997         .ioctl =        sock_no_ioctl,
2998         .listen =       sock_no_listen,
2999         .shutdown =     sock_no_shutdown,
3000         .setsockopt =   netlink_setsockopt,
3001         .getsockopt =   netlink_getsockopt,
3002         .sendmsg =      netlink_sendmsg,
3003         .recvmsg =      netlink_recvmsg,
3004         .mmap =         netlink_mmap,
3005         .sendpage =     sock_no_sendpage,
3006 };
3007 
3008 static const struct net_proto_family netlink_family_ops = {
3009         .family = PF_NETLINK,
3010         .create = netlink_create,
3011         .owner  = THIS_MODULE,  /* for consistency 8) */
3012 };
3013 
3014 static int __net_init netlink_net_init(struct net *net)
3015 {
3016 #ifdef CONFIG_PROC_FS
3017         if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
3018                 return -ENOMEM;
3019 #endif
3020         return 0;
3021 }
3022 
3023 static void __net_exit netlink_net_exit(struct net *net)
3024 {
3025 #ifdef CONFIG_PROC_FS
3026         remove_proc_entry("netlink", net->proc_net);
3027 #endif
3028 }
3029 
3030 static void __init netlink_add_usersock_entry(void)
3031 {
3032         struct listeners *listeners;
3033         int groups = 32;
3034 
3035         listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
3036         if (!listeners)
3037                 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
3038 
3039         netlink_table_grab();
3040 
3041         nl_table[NETLINK_USERSOCK].groups = groups;
3042         rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
3043         nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3044         nl_table[NETLINK_USERSOCK].registered = 1;
3045         nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
3046 
3047         netlink_table_ungrab();
3048 }
3049 
3050 static struct pernet_operations __net_initdata netlink_net_ops = {
3051         .init = netlink_net_init,
3052         .exit = netlink_net_exit,
3053 };
3054 
3055 static int __init netlink_proto_init(void)
3056 {
3057         int i;
3058         unsigned long limit;
3059         unsigned int order;
3060         int err = proto_register(&netlink_proto, 0);
3061 
3062         if (err != 0)
3063                 goto out;
3064 
3065         BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
3066 
3067         nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
3068         if (!nl_table)
3069                 goto panic;
3070 
3071         if (totalram_pages >= (128 * 1024))
3072                 limit = totalram_pages >> (21 - PAGE_SHIFT);
3073         else
3074                 limit = totalram_pages >> (23 - PAGE_SHIFT);
3075 
3076         order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
3077         limit = (1UL << order) / sizeof(struct hlist_head);
3078         order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1;
3079 
3080         for (i = 0; i < MAX_LINKS; i++) {
3081                 struct nl_portid_hash *hash = &nl_table[i].hash;
3082 
3083                 hash->table = nl_portid_hash_zalloc(1 * sizeof(*hash->table));
3084                 if (!hash->table) {
3085                         while (i-- > 0)
3086                                 nl_portid_hash_free(nl_table[i].hash.table,
3087                                                  1 * sizeof(*hash->table));
3088                         kfree(nl_table);
3089                         goto panic;
3090                 }
3091                 hash->max_shift = order;
3092                 hash->shift = 0;
3093                 hash->mask = 0;
3094                 hash->rehash_time = jiffies;
3095 
3096                 nl_table[i].compare = netlink_compare;
3097         }
3098 
3099         INIT_LIST_HEAD(&netlink_tap_all);
3100 
3101         netlink_add_usersock_entry();
3102 
3103         sock_register(&netlink_family_ops);
3104         register_pernet_subsys(&netlink_net_ops);
3105         /* The netlink device handler may be needed early. */
3106         rtnetlink_init();
3107 out:
3108         return err;
3109 panic:
3110         panic("netlink_init: Cannot allocate nl_table\n");
3111 }
3112 
3113 core_initcall(netlink_proto_init);
3114 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp