~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/unix/af_unix.c

Version: ~ [ linux-5.13-rc5 ] ~ [ linux-5.12.9 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.42 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.124 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.193 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.235 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.271 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.271 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * NET4:        Implementation of BSD Unix domain sockets.
  3  *
  4  * Authors:     Alan Cox, <alan@lxorguk.ukuu.org.uk>
  5  *
  6  *              This program is free software; you can redistribute it and/or
  7  *              modify it under the terms of the GNU General Public License
  8  *              as published by the Free Software Foundation; either version
  9  *              2 of the License, or (at your option) any later version.
 10  *
 11  * Fixes:
 12  *              Linus Torvalds  :       Assorted bug cures.
 13  *              Niibe Yutaka    :       async I/O support.
 14  *              Carsten Paeth   :       PF_UNIX check, address fixes.
 15  *              Alan Cox        :       Limit size of allocated blocks.
 16  *              Alan Cox        :       Fixed the stupid socketpair bug.
 17  *              Alan Cox        :       BSD compatibility fine tuning.
 18  *              Alan Cox        :       Fixed a bug in connect when interrupted.
 19  *              Alan Cox        :       Sorted out a proper draft version of
 20  *                                      file descriptor passing hacked up from
 21  *                                      Mike Shaver's work.
 22  *              Marty Leisner   :       Fixes to fd passing
 23  *              Nick Nevin      :       recvmsg bugfix.
 24  *              Alan Cox        :       Started proper garbage collector
 25  *              Heiko EiBfeldt  :       Missing verify_area check
 26  *              Alan Cox        :       Started POSIXisms
 27  *              Andreas Schwab  :       Replace inode by dentry for proper
 28  *                                      reference counting
 29  *              Kirk Petersen   :       Made this a module
 30  *          Christoph Rohland   :       Elegant non-blocking accept/connect algorithm.
 31  *                                      Lots of bug fixes.
 32  *           Alexey Kuznetosv   :       Repaired (I hope) bugs introduces
 33  *                                      by above two patches.
 34  *           Andrea Arcangeli   :       If possible we block in connect(2)
 35  *                                      if the max backlog of the listen socket
 36  *                                      is been reached. This won't break
 37  *                                      old apps and it will avoid huge amount
 38  *                                      of socks hashed (this for unix_gc()
 39  *                                      performances reasons).
 40  *                                      Security fix that limits the max
 41  *                                      number of socks to 2*max_files and
 42  *                                      the number of skb queueable in the
 43  *                                      dgram receiver.
 44  *              Artur Skawina   :       Hash function optimizations
 45  *           Alexey Kuznetsov   :       Full scale SMP. Lot of bugs are introduced 8)
 46  *            Malcolm Beattie   :       Set peercred for socketpair
 47  *           Michal Ostrowski   :       Module initialization cleanup.
 48  *           Arnaldo C. Melo    :       Remove MOD_{INC,DEC}_USE_COUNT,
 49  *                                      the core infrastructure is doing that
 50  *                                      for all net proto families now (2.5.69+)
 51  *
 52  *
 53  * Known differences from reference BSD that was tested:
 54  *
 55  *      [TO FIX]
 56  *      ECONNREFUSED is not returned from one end of a connected() socket to the
 57  *              other the moment one end closes.
 58  *      fstat() doesn't return st_dev=0, and give the blksize as high water mark
 59  *              and a fake inode identifier (nor the BSD first socket fstat twice bug).
 60  *      [NOT TO FIX]
 61  *      accept() returns a path name even if the connecting socket has closed
 62  *              in the meantime (BSD loses the path and gives up).
 63  *      accept() returns 0 length path for an unbound connector. BSD returns 16
 64  *              and a null first byte in the path (but not for gethost/peername - BSD bug ??)
 65  *      socketpair(...SOCK_RAW..) doesn't panic the kernel.
 66  *      BSD af_unix apparently has connect forgetting to block properly.
 67  *              (need to check this with the POSIX spec in detail)
 68  *
 69  * Differences from 2.0.0-11-... (ANK)
 70  *      Bug fixes and improvements.
 71  *              - client shutdown killed server socket.
 72  *              - removed all useless cli/sti pairs.
 73  *
 74  *      Semantic changes/extensions.
 75  *              - generic control message passing.
 76  *              - SCM_CREDENTIALS control message.
 77  *              - "Abstract" (not FS based) socket bindings.
 78  *                Abstract names are sequences of bytes (not zero terminated)
 79  *                started by 0, so that this name space does not intersect
 80  *                with BSD names.
 81  */
 82 
 83 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 84 
 85 #include <linux/module.h>
 86 #include <linux/kernel.h>
 87 #include <linux/signal.h>
 88 #include <linux/sched.h>
 89 #include <linux/errno.h>
 90 #include <linux/string.h>
 91 #include <linux/stat.h>
 92 #include <linux/dcache.h>
 93 #include <linux/namei.h>
 94 #include <linux/socket.h>
 95 #include <linux/un.h>
 96 #include <linux/fcntl.h>
 97 #include <linux/termios.h>
 98 #include <linux/sockios.h>
 99 #include <linux/net.h>
100 #include <linux/in.h>
101 #include <linux/fs.h>
102 #include <linux/slab.h>
103 #include <asm/uaccess.h>
104 #include <linux/skbuff.h>
105 #include <linux/netdevice.h>
106 #include <net/net_namespace.h>
107 #include <net/sock.h>
108 #include <net/tcp_states.h>
109 #include <net/af_unix.h>
110 #include <linux/proc_fs.h>
111 #include <linux/seq_file.h>
112 #include <net/scm.h>
113 #include <linux/init.h>
114 #include <linux/poll.h>
115 #include <linux/rtnetlink.h>
116 #include <linux/mount.h>
117 #include <net/checksum.h>
118 #include <linux/security.h>
119 #include <linux/freezer.h>
120 
121 struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
122 EXPORT_SYMBOL_GPL(unix_socket_table);
123 DEFINE_SPINLOCK(unix_table_lock);
124 EXPORT_SYMBOL_GPL(unix_table_lock);
125 static atomic_long_t unix_nr_socks;
126 
127 
128 static struct hlist_head *unix_sockets_unbound(void *addr)
129 {
130         unsigned long hash = (unsigned long)addr;
131 
132         hash ^= hash >> 16;
133         hash ^= hash >> 8;
134         hash %= UNIX_HASH_SIZE;
135         return &unix_socket_table[UNIX_HASH_SIZE + hash];
136 }
137 
138 #define UNIX_ABSTRACT(sk)       (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
139 
140 #ifdef CONFIG_SECURITY_NETWORK
141 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
142 {
143         UNIXCB(skb).secid = scm->secid;
144 }
145 
146 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
147 {
148         scm->secid = UNIXCB(skb).secid;
149 }
150 
151 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
152 {
153         return (scm->secid == UNIXCB(skb).secid);
154 }
155 #else
156 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
157 { }
158 
159 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
160 { }
161 
162 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
163 {
164         return true;
165 }
166 #endif /* CONFIG_SECURITY_NETWORK */
167 
168 /*
169  *  SMP locking strategy:
170  *    hash table is protected with spinlock unix_table_lock
171  *    each socket state is protected by separate spin lock.
172  */
173 
174 static inline unsigned int unix_hash_fold(__wsum n)
175 {
176         unsigned int hash = (__force unsigned int)csum_fold(n);
177 
178         hash ^= hash>>8;
179         return hash&(UNIX_HASH_SIZE-1);
180 }
181 
182 #define unix_peer(sk) (unix_sk(sk)->peer)
183 
184 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
185 {
186         return unix_peer(osk) == sk;
187 }
188 
189 static inline int unix_may_send(struct sock *sk, struct sock *osk)
190 {
191         return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
192 }
193 
194 static inline int unix_recvq_full(struct sock const *sk)
195 {
196         return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
197 }
198 
199 struct sock *unix_peer_get(struct sock *s)
200 {
201         struct sock *peer;
202 
203         unix_state_lock(s);
204         peer = unix_peer(s);
205         if (peer)
206                 sock_hold(peer);
207         unix_state_unlock(s);
208         return peer;
209 }
210 EXPORT_SYMBOL_GPL(unix_peer_get);
211 
212 static inline void unix_release_addr(struct unix_address *addr)
213 {
214         if (atomic_dec_and_test(&addr->refcnt))
215                 kfree(addr);
216 }
217 
218 /*
219  *      Check unix socket name:
220  *              - should be not zero length.
221  *              - if started by not zero, should be NULL terminated (FS object)
222  *              - if started by zero, it is abstract name.
223  */
224 
225 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
226 {
227         if (len <= sizeof(short) || len > sizeof(*sunaddr))
228                 return -EINVAL;
229         if (!sunaddr || sunaddr->sun_family != AF_UNIX)
230                 return -EINVAL;
231         if (sunaddr->sun_path[0]) {
232                 /*
233                  * This may look like an off by one error but it is a bit more
234                  * subtle. 108 is the longest valid AF_UNIX path for a binding.
235                  * sun_path[108] doesn't as such exist.  However in kernel space
236                  * we are guaranteed that it is a valid memory location in our
237                  * kernel address buffer.
238                  */
239                 ((char *)sunaddr)[len] = 0;
240                 len = strlen(sunaddr->sun_path)+1+sizeof(short);
241                 return len;
242         }
243 
244         *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
245         return len;
246 }
247 
248 static void __unix_remove_socket(struct sock *sk)
249 {
250         sk_del_node_init(sk);
251 }
252 
253 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
254 {
255         WARN_ON(!sk_unhashed(sk));
256         sk_add_node(sk, list);
257 }
258 
259 static inline void unix_remove_socket(struct sock *sk)
260 {
261         spin_lock(&unix_table_lock);
262         __unix_remove_socket(sk);
263         spin_unlock(&unix_table_lock);
264 }
265 
266 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
267 {
268         spin_lock(&unix_table_lock);
269         __unix_insert_socket(list, sk);
270         spin_unlock(&unix_table_lock);
271 }
272 
273 static struct sock *__unix_find_socket_byname(struct net *net,
274                                               struct sockaddr_un *sunname,
275                                               int len, int type, unsigned int hash)
276 {
277         struct sock *s;
278 
279         sk_for_each(s, &unix_socket_table[hash ^ type]) {
280                 struct unix_sock *u = unix_sk(s);
281 
282                 if (!net_eq(sock_net(s), net))
283                         continue;
284 
285                 if (u->addr->len == len &&
286                     !memcmp(u->addr->name, sunname, len))
287                         goto found;
288         }
289         s = NULL;
290 found:
291         return s;
292 }
293 
294 static inline struct sock *unix_find_socket_byname(struct net *net,
295                                                    struct sockaddr_un *sunname,
296                                                    int len, int type,
297                                                    unsigned int hash)
298 {
299         struct sock *s;
300 
301         spin_lock(&unix_table_lock);
302         s = __unix_find_socket_byname(net, sunname, len, type, hash);
303         if (s)
304                 sock_hold(s);
305         spin_unlock(&unix_table_lock);
306         return s;
307 }
308 
309 static struct sock *unix_find_socket_byinode(struct inode *i)
310 {
311         struct sock *s;
312 
313         spin_lock(&unix_table_lock);
314         sk_for_each(s,
315                     &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
316                 struct dentry *dentry = unix_sk(s)->path.dentry;
317 
318                 if (dentry && d_backing_inode(dentry) == i) {
319                         sock_hold(s);
320                         goto found;
321                 }
322         }
323         s = NULL;
324 found:
325         spin_unlock(&unix_table_lock);
326         return s;
327 }
328 
329 /* Support code for asymmetrically connected dgram sockets
330  *
331  * If a datagram socket is connected to a socket not itself connected
332  * to the first socket (eg, /dev/log), clients may only enqueue more
333  * messages if the present receive queue of the server socket is not
334  * "too large". This means there's a second writeability condition
335  * poll and sendmsg need to test. The dgram recv code will do a wake
336  * up on the peer_wait wait queue of a socket upon reception of a
337  * datagram which needs to be propagated to sleeping would-be writers
338  * since these might not have sent anything so far. This can't be
339  * accomplished via poll_wait because the lifetime of the server
340  * socket might be less than that of its clients if these break their
341  * association with it or if the server socket is closed while clients
342  * are still connected to it and there's no way to inform "a polling
343  * implementation" that it should let go of a certain wait queue
344  *
345  * In order to propagate a wake up, a wait_queue_t of the client
346  * socket is enqueued on the peer_wait queue of the server socket
347  * whose wake function does a wake_up on the ordinary client socket
348  * wait queue. This connection is established whenever a write (or
349  * poll for write) hit the flow control condition and broken when the
350  * association to the server socket is dissolved or after a wake up
351  * was relayed.
352  */
353 
354 static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
355                                       void *key)
356 {
357         struct unix_sock *u;
358         wait_queue_head_t *u_sleep;
359 
360         u = container_of(q, struct unix_sock, peer_wake);
361 
362         __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
363                             q);
364         u->peer_wake.private = NULL;
365 
366         /* relaying can only happen while the wq still exists */
367         u_sleep = sk_sleep(&u->sk);
368         if (u_sleep)
369                 wake_up_interruptible_poll(u_sleep, key);
370 
371         return 0;
372 }
373 
374 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
375 {
376         struct unix_sock *u, *u_other;
377         int rc;
378 
379         u = unix_sk(sk);
380         u_other = unix_sk(other);
381         rc = 0;
382         spin_lock(&u_other->peer_wait.lock);
383 
384         if (!u->peer_wake.private) {
385                 u->peer_wake.private = other;
386                 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
387 
388                 rc = 1;
389         }
390 
391         spin_unlock(&u_other->peer_wait.lock);
392         return rc;
393 }
394 
395 static void unix_dgram_peer_wake_disconnect(struct sock *sk,
396                                             struct sock *other)
397 {
398         struct unix_sock *u, *u_other;
399 
400         u = unix_sk(sk);
401         u_other = unix_sk(other);
402         spin_lock(&u_other->peer_wait.lock);
403 
404         if (u->peer_wake.private == other) {
405                 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
406                 u->peer_wake.private = NULL;
407         }
408 
409         spin_unlock(&u_other->peer_wait.lock);
410 }
411 
412 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
413                                                    struct sock *other)
414 {
415         unix_dgram_peer_wake_disconnect(sk, other);
416         wake_up_interruptible_poll(sk_sleep(sk),
417                                    POLLOUT |
418                                    POLLWRNORM |
419                                    POLLWRBAND);
420 }
421 
422 /* preconditions:
423  *      - unix_peer(sk) == other
424  *      - association is stable
425  */
426 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
427 {
428         int connected;
429 
430         connected = unix_dgram_peer_wake_connect(sk, other);
431 
432         if (unix_recvq_full(other))
433                 return 1;
434 
435         if (connected)
436                 unix_dgram_peer_wake_disconnect(sk, other);
437 
438         return 0;
439 }
440 
441 static inline int unix_writable(struct sock *sk)
442 {
443         return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
444 }
445 
446 static void unix_write_space(struct sock *sk)
447 {
448         struct socket_wq *wq;
449 
450         rcu_read_lock();
451         if (unix_writable(sk)) {
452                 wq = rcu_dereference(sk->sk_wq);
453                 if (wq_has_sleeper(wq))
454                         wake_up_interruptible_sync_poll(&wq->wait,
455                                 POLLOUT | POLLWRNORM | POLLWRBAND);
456                 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
457         }
458         rcu_read_unlock();
459 }
460 
461 /* When dgram socket disconnects (or changes its peer), we clear its receive
462  * queue of packets arrived from previous peer. First, it allows to do
463  * flow control based only on wmem_alloc; second, sk connected to peer
464  * may receive messages only from that peer. */
465 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
466 {
467         if (!skb_queue_empty(&sk->sk_receive_queue)) {
468                 skb_queue_purge(&sk->sk_receive_queue);
469                 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
470 
471                 /* If one link of bidirectional dgram pipe is disconnected,
472                  * we signal error. Messages are lost. Do not make this,
473                  * when peer was not connected to us.
474                  */
475                 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
476                         other->sk_err = ECONNRESET;
477                         other->sk_error_report(other);
478                 }
479         }
480 }
481 
482 static void unix_sock_destructor(struct sock *sk)
483 {
484         struct unix_sock *u = unix_sk(sk);
485 
486         skb_queue_purge(&sk->sk_receive_queue);
487 
488         WARN_ON(atomic_read(&sk->sk_wmem_alloc));
489         WARN_ON(!sk_unhashed(sk));
490         WARN_ON(sk->sk_socket);
491         if (!sock_flag(sk, SOCK_DEAD)) {
492                 pr_info("Attempt to release alive unix socket: %p\n", sk);
493                 return;
494         }
495 
496         if (u->addr)
497                 unix_release_addr(u->addr);
498 
499         atomic_long_dec(&unix_nr_socks);
500         local_bh_disable();
501         sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
502         local_bh_enable();
503 #ifdef UNIX_REFCNT_DEBUG
504         pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
505                 atomic_long_read(&unix_nr_socks));
506 #endif
507 }
508 
509 static void unix_release_sock(struct sock *sk, int embrion)
510 {
511         struct unix_sock *u = unix_sk(sk);
512         struct path path;
513         struct sock *skpair;
514         struct sk_buff *skb;
515         int state;
516 
517         unix_remove_socket(sk);
518 
519         /* Clear state */
520         unix_state_lock(sk);
521         sock_orphan(sk);
522         sk->sk_shutdown = SHUTDOWN_MASK;
523         path         = u->path;
524         u->path.dentry = NULL;
525         u->path.mnt = NULL;
526         state = sk->sk_state;
527         sk->sk_state = TCP_CLOSE;
528         unix_state_unlock(sk);
529 
530         wake_up_interruptible_all(&u->peer_wait);
531 
532         skpair = unix_peer(sk);
533 
534         if (skpair != NULL) {
535                 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
536                         unix_state_lock(skpair);
537                         /* No more writes */
538                         skpair->sk_shutdown = SHUTDOWN_MASK;
539                         if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
540                                 skpair->sk_err = ECONNRESET;
541                         unix_state_unlock(skpair);
542                         skpair->sk_state_change(skpair);
543                         sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
544                 }
545 
546                 unix_dgram_peer_wake_disconnect(sk, skpair);
547                 sock_put(skpair); /* It may now die */
548                 unix_peer(sk) = NULL;
549         }
550 
551         /* Try to flush out this socket. Throw out buffers at least */
552 
553         while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
554                 if (state == TCP_LISTEN)
555                         unix_release_sock(skb->sk, 1);
556                 /* passed fds are erased in the kfree_skb hook        */
557                 UNIXCB(skb).consumed = skb->len;
558                 kfree_skb(skb);
559         }
560 
561         if (path.dentry)
562                 path_put(&path);
563 
564         sock_put(sk);
565 
566         /* ---- Socket is dead now and most probably destroyed ---- */
567 
568         /*
569          * Fixme: BSD difference: In BSD all sockets connected to us get
570          *        ECONNRESET and we die on the spot. In Linux we behave
571          *        like files and pipes do and wait for the last
572          *        dereference.
573          *
574          * Can't we simply set sock->err?
575          *
576          *        What the above comment does talk about? --ANK(980817)
577          */
578 
579         if (unix_tot_inflight)
580                 unix_gc();              /* Garbage collect fds */
581 }
582 
583 static void init_peercred(struct sock *sk)
584 {
585         put_pid(sk->sk_peer_pid);
586         if (sk->sk_peer_cred)
587                 put_cred(sk->sk_peer_cred);
588         sk->sk_peer_pid  = get_pid(task_tgid(current));
589         sk->sk_peer_cred = get_current_cred();
590 }
591 
592 static void copy_peercred(struct sock *sk, struct sock *peersk)
593 {
594         put_pid(sk->sk_peer_pid);
595         if (sk->sk_peer_cred)
596                 put_cred(sk->sk_peer_cred);
597         sk->sk_peer_pid  = get_pid(peersk->sk_peer_pid);
598         sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
599 }
600 
601 static int unix_listen(struct socket *sock, int backlog)
602 {
603         int err;
604         struct sock *sk = sock->sk;
605         struct unix_sock *u = unix_sk(sk);
606         struct pid *old_pid = NULL;
607 
608         err = -EOPNOTSUPP;
609         if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
610                 goto out;       /* Only stream/seqpacket sockets accept */
611         err = -EINVAL;
612         if (!u->addr)
613                 goto out;       /* No listens on an unbound socket */
614         unix_state_lock(sk);
615         if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
616                 goto out_unlock;
617         if (backlog > sk->sk_max_ack_backlog)
618                 wake_up_interruptible_all(&u->peer_wait);
619         sk->sk_max_ack_backlog  = backlog;
620         sk->sk_state            = TCP_LISTEN;
621         /* set credentials so connect can copy them */
622         init_peercred(sk);
623         err = 0;
624 
625 out_unlock:
626         unix_state_unlock(sk);
627         put_pid(old_pid);
628 out:
629         return err;
630 }
631 
632 static int unix_release(struct socket *);
633 static int unix_bind(struct socket *, struct sockaddr *, int);
634 static int unix_stream_connect(struct socket *, struct sockaddr *,
635                                int addr_len, int flags);
636 static int unix_socketpair(struct socket *, struct socket *);
637 static int unix_accept(struct socket *, struct socket *, int);
638 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
639 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
640 static unsigned int unix_dgram_poll(struct file *, struct socket *,
641                                     poll_table *);
642 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
643 static int unix_shutdown(struct socket *, int);
644 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
645 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
646 static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
647                                     size_t size, int flags);
648 static ssize_t unix_stream_splice_read(struct socket *,  loff_t *ppos,
649                                        struct pipe_inode_info *, size_t size,
650                                        unsigned int flags);
651 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
652 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
653 static int unix_dgram_connect(struct socket *, struct sockaddr *,
654                               int, int);
655 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
656 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
657                                   int);
658 
659 static int unix_set_peek_off(struct sock *sk, int val)
660 {
661         struct unix_sock *u = unix_sk(sk);
662 
663         if (mutex_lock_interruptible(&u->readlock))
664                 return -EINTR;
665 
666         sk->sk_peek_off = val;
667         mutex_unlock(&u->readlock);
668 
669         return 0;
670 }
671 
672 
673 static const struct proto_ops unix_stream_ops = {
674         .family =       PF_UNIX,
675         .owner =        THIS_MODULE,
676         .release =      unix_release,
677         .bind =         unix_bind,
678         .connect =      unix_stream_connect,
679         .socketpair =   unix_socketpair,
680         .accept =       unix_accept,
681         .getname =      unix_getname,
682         .poll =         unix_poll,
683         .ioctl =        unix_ioctl,
684         .listen =       unix_listen,
685         .shutdown =     unix_shutdown,
686         .setsockopt =   sock_no_setsockopt,
687         .getsockopt =   sock_no_getsockopt,
688         .sendmsg =      unix_stream_sendmsg,
689         .recvmsg =      unix_stream_recvmsg,
690         .mmap =         sock_no_mmap,
691         .sendpage =     unix_stream_sendpage,
692         .splice_read =  unix_stream_splice_read,
693         .set_peek_off = unix_set_peek_off,
694 };
695 
696 static const struct proto_ops unix_dgram_ops = {
697         .family =       PF_UNIX,
698         .owner =        THIS_MODULE,
699         .release =      unix_release,
700         .bind =         unix_bind,
701         .connect =      unix_dgram_connect,
702         .socketpair =   unix_socketpair,
703         .accept =       sock_no_accept,
704         .getname =      unix_getname,
705         .poll =         unix_dgram_poll,
706         .ioctl =        unix_ioctl,
707         .listen =       sock_no_listen,
708         .shutdown =     unix_shutdown,
709         .setsockopt =   sock_no_setsockopt,
710         .getsockopt =   sock_no_getsockopt,
711         .sendmsg =      unix_dgram_sendmsg,
712         .recvmsg =      unix_dgram_recvmsg,
713         .mmap =         sock_no_mmap,
714         .sendpage =     sock_no_sendpage,
715         .set_peek_off = unix_set_peek_off,
716 };
717 
718 static const struct proto_ops unix_seqpacket_ops = {
719         .family =       PF_UNIX,
720         .owner =        THIS_MODULE,
721         .release =      unix_release,
722         .bind =         unix_bind,
723         .connect =      unix_stream_connect,
724         .socketpair =   unix_socketpair,
725         .accept =       unix_accept,
726         .getname =      unix_getname,
727         .poll =         unix_dgram_poll,
728         .ioctl =        unix_ioctl,
729         .listen =       unix_listen,
730         .shutdown =     unix_shutdown,
731         .setsockopt =   sock_no_setsockopt,
732         .getsockopt =   sock_no_getsockopt,
733         .sendmsg =      unix_seqpacket_sendmsg,
734         .recvmsg =      unix_seqpacket_recvmsg,
735         .mmap =         sock_no_mmap,
736         .sendpage =     sock_no_sendpage,
737         .set_peek_off = unix_set_peek_off,
738 };
739 
740 static struct proto unix_proto = {
741         .name                   = "UNIX",
742         .owner                  = THIS_MODULE,
743         .obj_size               = sizeof(struct unix_sock),
744 };
745 
746 /*
747  * AF_UNIX sockets do not interact with hardware, hence they
748  * dont trigger interrupts - so it's safe for them to have
749  * bh-unsafe locking for their sk_receive_queue.lock. Split off
750  * this special lock-class by reinitializing the spinlock key:
751  */
752 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
753 
754 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
755 {
756         struct sock *sk = NULL;
757         struct unix_sock *u;
758 
759         atomic_long_inc(&unix_nr_socks);
760         if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
761                 goto out;
762 
763         sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, kern);
764         if (!sk)
765                 goto out;
766 
767         sock_init_data(sock, sk);
768         lockdep_set_class(&sk->sk_receive_queue.lock,
769                                 &af_unix_sk_receive_queue_lock_key);
770 
771         sk->sk_write_space      = unix_write_space;
772         sk->sk_max_ack_backlog  = net->unx.sysctl_max_dgram_qlen;
773         sk->sk_destruct         = unix_sock_destructor;
774         u         = unix_sk(sk);
775         u->path.dentry = NULL;
776         u->path.mnt = NULL;
777         spin_lock_init(&u->lock);
778         atomic_long_set(&u->inflight, 0);
779         INIT_LIST_HEAD(&u->link);
780         mutex_init(&u->readlock); /* single task reading lock */
781         init_waitqueue_head(&u->peer_wait);
782         init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
783         unix_insert_socket(unix_sockets_unbound(sk), sk);
784 out:
785         if (sk == NULL)
786                 atomic_long_dec(&unix_nr_socks);
787         else {
788                 local_bh_disable();
789                 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
790                 local_bh_enable();
791         }
792         return sk;
793 }
794 
795 static int unix_create(struct net *net, struct socket *sock, int protocol,
796                        int kern)
797 {
798         if (protocol && protocol != PF_UNIX)
799                 return -EPROTONOSUPPORT;
800 
801         sock->state = SS_UNCONNECTED;
802 
803         switch (sock->type) {
804         case SOCK_STREAM:
805                 sock->ops = &unix_stream_ops;
806                 break;
807                 /*
808                  *      Believe it or not BSD has AF_UNIX, SOCK_RAW though
809                  *      nothing uses it.
810                  */
811         case SOCK_RAW:
812                 sock->type = SOCK_DGRAM;
813         case SOCK_DGRAM:
814                 sock->ops = &unix_dgram_ops;
815                 break;
816         case SOCK_SEQPACKET:
817                 sock->ops = &unix_seqpacket_ops;
818                 break;
819         default:
820                 return -ESOCKTNOSUPPORT;
821         }
822 
823         return unix_create1(net, sock, kern) ? 0 : -ENOMEM;
824 }
825 
826 static int unix_release(struct socket *sock)
827 {
828         struct sock *sk = sock->sk;
829 
830         if (!sk)
831                 return 0;
832 
833         unix_release_sock(sk, 0);
834         sock->sk = NULL;
835 
836         return 0;
837 }
838 
839 static int unix_autobind(struct socket *sock)
840 {
841         struct sock *sk = sock->sk;
842         struct net *net = sock_net(sk);
843         struct unix_sock *u = unix_sk(sk);
844         static u32 ordernum = 1;
845         struct unix_address *addr;
846         int err;
847         unsigned int retries = 0;
848 
849         err = mutex_lock_interruptible(&u->readlock);
850         if (err)
851                 return err;
852 
853         err = 0;
854         if (u->addr)
855                 goto out;
856 
857         err = -ENOMEM;
858         addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
859         if (!addr)
860                 goto out;
861 
862         addr->name->sun_family = AF_UNIX;
863         atomic_set(&addr->refcnt, 1);
864 
865 retry:
866         addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
867         addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
868 
869         spin_lock(&unix_table_lock);
870         ordernum = (ordernum+1)&0xFFFFF;
871 
872         if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
873                                       addr->hash)) {
874                 spin_unlock(&unix_table_lock);
875                 /*
876                  * __unix_find_socket_byname() may take long time if many names
877                  * are already in use.
878                  */
879                 cond_resched();
880                 /* Give up if all names seems to be in use. */
881                 if (retries++ == 0xFFFFF) {
882                         err = -ENOSPC;
883                         kfree(addr);
884                         goto out;
885                 }
886                 goto retry;
887         }
888         addr->hash ^= sk->sk_type;
889 
890         __unix_remove_socket(sk);
891         u->addr = addr;
892         __unix_insert_socket(&unix_socket_table[addr->hash], sk);
893         spin_unlock(&unix_table_lock);
894         err = 0;
895 
896 out:    mutex_unlock(&u->readlock);
897         return err;
898 }
899 
900 static struct sock *unix_find_other(struct net *net,
901                                     struct sockaddr_un *sunname, int len,
902                                     int type, unsigned int hash, int *error)
903 {
904         struct sock *u;
905         struct path path;
906         int err = 0;
907 
908         if (sunname->sun_path[0]) {
909                 struct inode *inode;
910                 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
911                 if (err)
912                         goto fail;
913                 inode = d_backing_inode(path.dentry);
914                 err = inode_permission(inode, MAY_WRITE);
915                 if (err)
916                         goto put_fail;
917 
918                 err = -ECONNREFUSED;
919                 if (!S_ISSOCK(inode->i_mode))
920                         goto put_fail;
921                 u = unix_find_socket_byinode(inode);
922                 if (!u)
923                         goto put_fail;
924 
925                 if (u->sk_type == type)
926                         touch_atime(&path);
927 
928                 path_put(&path);
929 
930                 err = -EPROTOTYPE;
931                 if (u->sk_type != type) {
932                         sock_put(u);
933                         goto fail;
934                 }
935         } else {
936                 err = -ECONNREFUSED;
937                 u = unix_find_socket_byname(net, sunname, len, type, hash);
938                 if (u) {
939                         struct dentry *dentry;
940                         dentry = unix_sk(u)->path.dentry;
941                         if (dentry)
942                                 touch_atime(&unix_sk(u)->path);
943                 } else
944                         goto fail;
945         }
946         return u;
947 
948 put_fail:
949         path_put(&path);
950 fail:
951         *error = err;
952         return NULL;
953 }
954 
955 static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
956 {
957         struct dentry *dentry;
958         struct path path;
959         int err = 0;
960         /*
961          * Get the parent directory, calculate the hash for last
962          * component.
963          */
964         dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
965         err = PTR_ERR(dentry);
966         if (IS_ERR(dentry))
967                 return err;
968 
969         /*
970          * All right, let's create it.
971          */
972         err = security_path_mknod(&path, dentry, mode, 0);
973         if (!err) {
974                 err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
975                 if (!err) {
976                         res->mnt = mntget(path.mnt);
977                         res->dentry = dget(dentry);
978                 }
979         }
980         done_path_create(&path, dentry);
981         return err;
982 }
983 
984 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
985 {
986         struct sock *sk = sock->sk;
987         struct net *net = sock_net(sk);
988         struct unix_sock *u = unix_sk(sk);
989         struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
990         char *sun_path = sunaddr->sun_path;
991         int err;
992         unsigned int hash;
993         struct unix_address *addr;
994         struct hlist_head *list;
995 
996         err = -EINVAL;
997         if (sunaddr->sun_family != AF_UNIX)
998                 goto out;
999 
1000         if (addr_len == sizeof(short)) {
1001                 err = unix_autobind(sock);
1002                 goto out;
1003         }
1004 
1005         err = unix_mkname(sunaddr, addr_len, &hash);
1006         if (err < 0)
1007                 goto out;
1008         addr_len = err;
1009 
1010         err = mutex_lock_interruptible(&u->readlock);
1011         if (err)
1012                 goto out;
1013 
1014         err = -EINVAL;
1015         if (u->addr)
1016                 goto out_up;
1017 
1018         err = -ENOMEM;
1019         addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
1020         if (!addr)
1021                 goto out_up;
1022 
1023         memcpy(addr->name, sunaddr, addr_len);
1024         addr->len = addr_len;
1025         addr->hash = hash ^ sk->sk_type;
1026         atomic_set(&addr->refcnt, 1);
1027 
1028         if (sun_path[0]) {
1029                 struct path path;
1030                 umode_t mode = S_IFSOCK |
1031                        (SOCK_INODE(sock)->i_mode & ~current_umask());
1032                 err = unix_mknod(sun_path, mode, &path);
1033                 if (err) {
1034                         if (err == -EEXIST)
1035                                 err = -EADDRINUSE;
1036                         unix_release_addr(addr);
1037                         goto out_up;
1038                 }
1039                 addr->hash = UNIX_HASH_SIZE;
1040                 hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE-1);
1041                 spin_lock(&unix_table_lock);
1042                 u->path = path;
1043                 list = &unix_socket_table[hash];
1044         } else {
1045                 spin_lock(&unix_table_lock);
1046                 err = -EADDRINUSE;
1047                 if (__unix_find_socket_byname(net, sunaddr, addr_len,
1048                                               sk->sk_type, hash)) {
1049                         unix_release_addr(addr);
1050                         goto out_unlock;
1051                 }
1052 
1053                 list = &unix_socket_table[addr->hash];
1054         }
1055 
1056         err = 0;
1057         __unix_remove_socket(sk);
1058         u->addr = addr;
1059         __unix_insert_socket(list, sk);
1060 
1061 out_unlock:
1062         spin_unlock(&unix_table_lock);
1063 out_up:
1064         mutex_unlock(&u->readlock);
1065 out:
1066         return err;
1067 }
1068 
1069 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1070 {
1071         if (unlikely(sk1 == sk2) || !sk2) {
1072                 unix_state_lock(sk1);
1073                 return;
1074         }
1075         if (sk1 < sk2) {
1076                 unix_state_lock(sk1);
1077                 unix_state_lock_nested(sk2);
1078         } else {
1079                 unix_state_lock(sk2);
1080                 unix_state_lock_nested(sk1);
1081         }
1082 }
1083 
1084 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1085 {
1086         if (unlikely(sk1 == sk2) || !sk2) {
1087                 unix_state_unlock(sk1);
1088                 return;
1089         }
1090         unix_state_unlock(sk1);
1091         unix_state_unlock(sk2);
1092 }
1093 
1094 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1095                               int alen, int flags)
1096 {
1097         struct sock *sk = sock->sk;
1098         struct net *net = sock_net(sk);
1099         struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1100         struct sock *other;
1101         unsigned int hash;
1102         int err;
1103 
1104         if (addr->sa_family != AF_UNSPEC) {
1105                 err = unix_mkname(sunaddr, alen, &hash);
1106                 if (err < 0)
1107                         goto out;
1108                 alen = err;
1109 
1110                 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
1111                     !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
1112                         goto out;
1113 
1114 restart:
1115                 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
1116                 if (!other)
1117                         goto out;
1118 
1119                 unix_state_double_lock(sk, other);
1120 
1121                 /* Apparently VFS overslept socket death. Retry. */
1122                 if (sock_flag(other, SOCK_DEAD)) {
1123                         unix_state_double_unlock(sk, other);
1124                         sock_put(other);
1125                         goto restart;
1126                 }
1127 
1128                 err = -EPERM;
1129                 if (!unix_may_send(sk, other))
1130                         goto out_unlock;
1131 
1132                 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1133                 if (err)
1134                         goto out_unlock;
1135 
1136         } else {
1137                 /*
1138                  *      1003.1g breaking connected state with AF_UNSPEC
1139                  */
1140                 other = NULL;
1141                 unix_state_double_lock(sk, other);
1142         }
1143 
1144         /*
1145          * If it was connected, reconnect.
1146          */
1147         if (unix_peer(sk)) {
1148                 struct sock *old_peer = unix_peer(sk);
1149                 unix_peer(sk) = other;
1150                 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1151 
1152                 unix_state_double_unlock(sk, other);
1153 
1154                 if (other != old_peer)
1155                         unix_dgram_disconnected(sk, old_peer);
1156                 sock_put(old_peer);
1157         } else {
1158                 unix_peer(sk) = other;
1159                 unix_state_double_unlock(sk, other);
1160         }
1161         return 0;
1162 
1163 out_unlock:
1164         unix_state_double_unlock(sk, other);
1165         sock_put(other);
1166 out:
1167         return err;
1168 }
1169 
1170 static long unix_wait_for_peer(struct sock *other, long timeo)
1171 {
1172         struct unix_sock *u = unix_sk(other);
1173         int sched;
1174         DEFINE_WAIT(wait);
1175 
1176         prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1177 
1178         sched = !sock_flag(other, SOCK_DEAD) &&
1179                 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1180                 unix_recvq_full(other);
1181 
1182         unix_state_unlock(other);
1183 
1184         if (sched)
1185                 timeo = schedule_timeout(timeo);
1186 
1187         finish_wait(&u->peer_wait, &wait);
1188         return timeo;
1189 }
1190 
1191 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1192                                int addr_len, int flags)
1193 {
1194         struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1195         struct sock *sk = sock->sk;
1196         struct net *net = sock_net(sk);
1197         struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1198         struct sock *newsk = NULL;
1199         struct sock *other = NULL;
1200         struct sk_buff *skb = NULL;
1201         unsigned int hash;
1202         int st;
1203         int err;
1204         long timeo;
1205 
1206         err = unix_mkname(sunaddr, addr_len, &hash);
1207         if (err < 0)
1208                 goto out;
1209         addr_len = err;
1210 
1211         if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1212             (err = unix_autobind(sock)) != 0)
1213                 goto out;
1214 
1215         timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1216 
1217         /* First of all allocate resources.
1218            If we will make it after state is locked,
1219            we will have to recheck all again in any case.
1220          */
1221 
1222         err = -ENOMEM;
1223 
1224         /* create new sock for complete connection */
1225         newsk = unix_create1(sock_net(sk), NULL, 0);
1226         if (newsk == NULL)
1227                 goto out;
1228 
1229         /* Allocate skb for sending to listening sock */
1230         skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1231         if (skb == NULL)
1232                 goto out;
1233 
1234 restart:
1235         /*  Find listening sock. */
1236         other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1237         if (!other)
1238                 goto out;
1239 
1240         /* Latch state of peer */
1241         unix_state_lock(other);
1242 
1243         /* Apparently VFS overslept socket death. Retry. */
1244         if (sock_flag(other, SOCK_DEAD)) {
1245                 unix_state_unlock(other);
1246                 sock_put(other);
1247                 goto restart;
1248         }
1249 
1250         err = -ECONNREFUSED;
1251         if (other->sk_state != TCP_LISTEN)
1252                 goto out_unlock;
1253         if (other->sk_shutdown & RCV_SHUTDOWN)
1254                 goto out_unlock;
1255 
1256         if (unix_recvq_full(other)) {
1257                 err = -EAGAIN;
1258                 if (!timeo)
1259                         goto out_unlock;
1260 
1261                 timeo = unix_wait_for_peer(other, timeo);
1262 
1263                 err = sock_intr_errno(timeo);
1264                 if (signal_pending(current))
1265                         goto out;
1266                 sock_put(other);
1267                 goto restart;
1268         }
1269 
1270         /* Latch our state.
1271 
1272            It is tricky place. We need to grab our state lock and cannot
1273            drop lock on peer. It is dangerous because deadlock is
1274            possible. Connect to self case and simultaneous
1275            attempt to connect are eliminated by checking socket
1276            state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1277            check this before attempt to grab lock.
1278 
1279            Well, and we have to recheck the state after socket locked.
1280          */
1281         st = sk->sk_state;
1282 
1283         switch (st) {
1284         case TCP_CLOSE:
1285                 /* This is ok... continue with connect */
1286                 break;
1287         case TCP_ESTABLISHED:
1288                 /* Socket is already connected */
1289                 err = -EISCONN;
1290                 goto out_unlock;
1291         default:
1292                 err = -EINVAL;
1293                 goto out_unlock;
1294         }
1295 
1296         unix_state_lock_nested(sk);
1297 
1298         if (sk->sk_state != st) {
1299                 unix_state_unlock(sk);
1300                 unix_state_unlock(other);
1301                 sock_put(other);
1302                 goto restart;
1303         }
1304 
1305         err = security_unix_stream_connect(sk, other, newsk);
1306         if (err) {
1307                 unix_state_unlock(sk);
1308                 goto out_unlock;
1309         }
1310 
1311         /* The way is open! Fastly set all the necessary fields... */
1312 
1313         sock_hold(sk);
1314         unix_peer(newsk)        = sk;
1315         newsk->sk_state         = TCP_ESTABLISHED;
1316         newsk->sk_type          = sk->sk_type;
1317         init_peercred(newsk);
1318         newu = unix_sk(newsk);
1319         RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1320         otheru = unix_sk(other);
1321 
1322         /* copy address information from listening to new sock*/
1323         if (otheru->addr) {
1324                 atomic_inc(&otheru->addr->refcnt);
1325                 newu->addr = otheru->addr;
1326         }
1327         if (otheru->path.dentry) {
1328                 path_get(&otheru->path);
1329                 newu->path = otheru->path;
1330         }
1331 
1332         /* Set credentials */
1333         copy_peercred(sk, other);
1334 
1335         sock->state     = SS_CONNECTED;
1336         sk->sk_state    = TCP_ESTABLISHED;
1337         sock_hold(newsk);
1338 
1339         smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1340         unix_peer(sk)   = newsk;
1341 
1342         unix_state_unlock(sk);
1343 
1344         /* take ten and and send info to listening sock */
1345         spin_lock(&other->sk_receive_queue.lock);
1346         __skb_queue_tail(&other->sk_receive_queue, skb);
1347         spin_unlock(&other->sk_receive_queue.lock);
1348         unix_state_unlock(other);
1349         other->sk_data_ready(other);
1350         sock_put(other);
1351         return 0;
1352 
1353 out_unlock:
1354         if (other)
1355                 unix_state_unlock(other);
1356 
1357 out:
1358         kfree_skb(skb);
1359         if (newsk)
1360                 unix_release_sock(newsk, 0);
1361         if (other)
1362                 sock_put(other);
1363         return err;
1364 }
1365 
1366 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1367 {
1368         struct sock *ska = socka->sk, *skb = sockb->sk;
1369 
1370         /* Join our sockets back to back */
1371         sock_hold(ska);
1372         sock_hold(skb);
1373         unix_peer(ska) = skb;
1374         unix_peer(skb) = ska;
1375         init_peercred(ska);
1376         init_peercred(skb);
1377 
1378         if (ska->sk_type != SOCK_DGRAM) {
1379                 ska->sk_state = TCP_ESTABLISHED;
1380                 skb->sk_state = TCP_ESTABLISHED;
1381                 socka->state  = SS_CONNECTED;
1382                 sockb->state  = SS_CONNECTED;
1383         }
1384         return 0;
1385 }
1386 
1387 static void unix_sock_inherit_flags(const struct socket *old,
1388                                     struct socket *new)
1389 {
1390         if (test_bit(SOCK_PASSCRED, &old->flags))
1391                 set_bit(SOCK_PASSCRED, &new->flags);
1392         if (test_bit(SOCK_PASSSEC, &old->flags))
1393                 set_bit(SOCK_PASSSEC, &new->flags);
1394 }
1395 
1396 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1397 {
1398         struct sock *sk = sock->sk;
1399         struct sock *tsk;
1400         struct sk_buff *skb;
1401         int err;
1402 
1403         err = -EOPNOTSUPP;
1404         if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1405                 goto out;
1406 
1407         err = -EINVAL;
1408         if (sk->sk_state != TCP_LISTEN)
1409                 goto out;
1410 
1411         /* If socket state is TCP_LISTEN it cannot change (for now...),
1412          * so that no locks are necessary.
1413          */
1414 
1415         skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1416         if (!skb) {
1417                 /* This means receive shutdown. */
1418                 if (err == 0)
1419                         err = -EINVAL;
1420                 goto out;
1421         }
1422 
1423         tsk = skb->sk;
1424         skb_free_datagram(sk, skb);
1425         wake_up_interruptible(&unix_sk(sk)->peer_wait);
1426 
1427         /* attach accepted sock to socket */
1428         unix_state_lock(tsk);
1429         newsock->state = SS_CONNECTED;
1430         unix_sock_inherit_flags(sock, newsock);
1431         sock_graft(tsk, newsock);
1432         unix_state_unlock(tsk);
1433         return 0;
1434 
1435 out:
1436         return err;
1437 }
1438 
1439 
1440 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1441 {
1442         struct sock *sk = sock->sk;
1443         struct unix_sock *u;
1444         DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1445         int err = 0;
1446 
1447         if (peer) {
1448                 sk = unix_peer_get(sk);
1449 
1450                 err = -ENOTCONN;
1451                 if (!sk)
1452                         goto out;
1453                 err = 0;
1454         } else {
1455                 sock_hold(sk);
1456         }
1457 
1458         u = unix_sk(sk);
1459         unix_state_lock(sk);
1460         if (!u->addr) {
1461                 sunaddr->sun_family = AF_UNIX;
1462                 sunaddr->sun_path[0] = 0;
1463                 *uaddr_len = sizeof(short);
1464         } else {
1465                 struct unix_address *addr = u->addr;
1466 
1467                 *uaddr_len = addr->len;
1468                 memcpy(sunaddr, addr->name, *uaddr_len);
1469         }
1470         unix_state_unlock(sk);
1471         sock_put(sk);
1472 out:
1473         return err;
1474 }
1475 
1476 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1477 {
1478         int i;
1479 
1480         scm->fp = UNIXCB(skb).fp;
1481         UNIXCB(skb).fp = NULL;
1482 
1483         for (i = scm->fp->count-1; i >= 0; i--)
1484                 unix_notinflight(scm->fp->fp[i]);
1485 }
1486 
1487 static void unix_destruct_scm(struct sk_buff *skb)
1488 {
1489         struct scm_cookie scm;
1490         memset(&scm, 0, sizeof(scm));
1491         scm.pid  = UNIXCB(skb).pid;
1492         if (UNIXCB(skb).fp)
1493                 unix_detach_fds(&scm, skb);
1494 
1495         /* Alas, it calls VFS */
1496         /* So fscking what? fput() had been SMP-safe since the last Summer */
1497         scm_destroy(&scm);
1498         sock_wfree(skb);
1499 }
1500 
1501 #define MAX_RECURSION_LEVEL 4
1502 
1503 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1504 {
1505         int i;
1506         unsigned char max_level = 0;
1507         int unix_sock_count = 0;
1508 
1509         for (i = scm->fp->count - 1; i >= 0; i--) {
1510                 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1511 
1512                 if (sk) {
1513                         unix_sock_count++;
1514                         max_level = max(max_level,
1515                                         unix_sk(sk)->recursion_level);
1516                 }
1517         }
1518         if (unlikely(max_level > MAX_RECURSION_LEVEL))
1519                 return -ETOOMANYREFS;
1520 
1521         /*
1522          * Need to duplicate file references for the sake of garbage
1523          * collection.  Otherwise a socket in the fps might become a
1524          * candidate for GC while the skb is not yet queued.
1525          */
1526         UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1527         if (!UNIXCB(skb).fp)
1528                 return -ENOMEM;
1529 
1530         if (unix_sock_count) {
1531                 for (i = scm->fp->count - 1; i >= 0; i--)
1532                         unix_inflight(scm->fp->fp[i]);
1533         }
1534         return max_level;
1535 }
1536 
1537 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1538 {
1539         int err = 0;
1540 
1541         UNIXCB(skb).pid  = get_pid(scm->pid);
1542         UNIXCB(skb).uid = scm->creds.uid;
1543         UNIXCB(skb).gid = scm->creds.gid;
1544         UNIXCB(skb).fp = NULL;
1545         unix_get_secdata(scm, skb);
1546         if (scm->fp && send_fds)
1547                 err = unix_attach_fds(scm, skb);
1548 
1549         skb->destructor = unix_destruct_scm;
1550         return err;
1551 }
1552 
1553 static bool unix_passcred_enabled(const struct socket *sock,
1554                                   const struct sock *other)
1555 {
1556         return test_bit(SOCK_PASSCRED, &sock->flags) ||
1557                !other->sk_socket ||
1558                test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
1559 }
1560 
1561 /*
1562  * Some apps rely on write() giving SCM_CREDENTIALS
1563  * We include credentials if source or destination socket
1564  * asserted SOCK_PASSCRED.
1565  */
1566 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1567                             const struct sock *other)
1568 {
1569         if (UNIXCB(skb).pid)
1570                 return;
1571         if (unix_passcred_enabled(sock, other)) {
1572                 UNIXCB(skb).pid  = get_pid(task_tgid(current));
1573                 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1574         }
1575 }
1576 
1577 static int maybe_init_creds(struct scm_cookie *scm,
1578                             struct socket *socket,
1579                             const struct sock *other)
1580 {
1581         int err;
1582         struct msghdr msg = { .msg_controllen = 0 };
1583 
1584         err = scm_send(socket, &msg, scm, false);
1585         if (err)
1586                 return err;
1587 
1588         if (unix_passcred_enabled(socket, other)) {
1589                 scm->pid = get_pid(task_tgid(current));
1590                 current_uid_gid(&scm->creds.uid, &scm->creds.gid);
1591         }
1592         return err;
1593 }
1594 
1595 static bool unix_skb_scm_eq(struct sk_buff *skb,
1596                             struct scm_cookie *scm)
1597 {
1598         const struct unix_skb_parms *u = &UNIXCB(skb);
1599 
1600         return u->pid == scm->pid &&
1601                uid_eq(u->uid, scm->creds.uid) &&
1602                gid_eq(u->gid, scm->creds.gid) &&
1603                unix_secdata_eq(scm, skb);
1604 }
1605 
1606 /*
1607  *      Send AF_UNIX data.
1608  */
1609 
1610 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1611                               size_t len)
1612 {
1613         struct sock *sk = sock->sk;
1614         struct net *net = sock_net(sk);
1615         struct unix_sock *u = unix_sk(sk);
1616         DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1617         struct sock *other = NULL;
1618         int namelen = 0; /* fake GCC */
1619         int err;
1620         unsigned int hash;
1621         struct sk_buff *skb;
1622         long timeo;
1623         struct scm_cookie scm;
1624         int max_level;
1625         int data_len = 0;
1626         int sk_locked;
1627 
1628         wait_for_unix_gc();
1629         err = scm_send(sock, msg, &scm, false);
1630         if (err < 0)
1631                 return err;
1632 
1633         err = -EOPNOTSUPP;
1634         if (msg->msg_flags&MSG_OOB)
1635                 goto out;
1636 
1637         if (msg->msg_namelen) {
1638                 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1639                 if (err < 0)
1640                         goto out;
1641                 namelen = err;
1642         } else {
1643                 sunaddr = NULL;
1644                 err = -ENOTCONN;
1645                 other = unix_peer_get(sk);
1646                 if (!other)
1647                         goto out;
1648         }
1649 
1650         if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1651             && (err = unix_autobind(sock)) != 0)
1652                 goto out;
1653 
1654         err = -EMSGSIZE;
1655         if (len > sk->sk_sndbuf - 32)
1656                 goto out;
1657 
1658         if (len > SKB_MAX_ALLOC) {
1659                 data_len = min_t(size_t,
1660                                  len - SKB_MAX_ALLOC,
1661                                  MAX_SKB_FRAGS * PAGE_SIZE);
1662                 data_len = PAGE_ALIGN(data_len);
1663 
1664                 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1665         }
1666 
1667         skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1668                                    msg->msg_flags & MSG_DONTWAIT, &err,
1669                                    PAGE_ALLOC_COSTLY_ORDER);
1670         if (skb == NULL)
1671                 goto out;
1672 
1673         err = unix_scm_to_skb(&scm, skb, true);
1674         if (err < 0)
1675                 goto out_free;
1676         max_level = err + 1;
1677 
1678         skb_put(skb, len - data_len);
1679         skb->data_len = data_len;
1680         skb->len = len;
1681         err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1682         if (err)
1683                 goto out_free;
1684 
1685         timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1686 
1687 restart:
1688         if (!other) {
1689                 err = -ECONNRESET;
1690                 if (sunaddr == NULL)
1691                         goto out_free;
1692 
1693                 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1694                                         hash, &err);
1695                 if (other == NULL)
1696                         goto out_free;
1697         }
1698 
1699         if (sk_filter(other, skb) < 0) {
1700                 /* Toss the packet but do not return any error to the sender */
1701                 err = len;
1702                 goto out_free;
1703         }
1704 
1705         sk_locked = 0;
1706         unix_state_lock(other);
1707 restart_locked:
1708         err = -EPERM;
1709         if (!unix_may_send(sk, other))
1710                 goto out_unlock;
1711 
1712         if (unlikely(sock_flag(other, SOCK_DEAD))) {
1713                 /*
1714                  *      Check with 1003.1g - what should
1715                  *      datagram error
1716                  */
1717                 unix_state_unlock(other);
1718                 sock_put(other);
1719 
1720                 if (!sk_locked)
1721                         unix_state_lock(sk);
1722 
1723                 err = 0;
1724                 if (unix_peer(sk) == other) {
1725                         unix_peer(sk) = NULL;
1726                         unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1727 
1728                         unix_state_unlock(sk);
1729 
1730                         unix_dgram_disconnected(sk, other);
1731                         sock_put(other);
1732                         err = -ECONNREFUSED;
1733                 } else {
1734                         unix_state_unlock(sk);
1735                 }
1736 
1737                 other = NULL;
1738                 if (err)
1739                         goto out_free;
1740                 goto restart;
1741         }
1742 
1743         err = -EPIPE;
1744         if (other->sk_shutdown & RCV_SHUTDOWN)
1745                 goto out_unlock;
1746 
1747         if (sk->sk_type != SOCK_SEQPACKET) {
1748                 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1749                 if (err)
1750                         goto out_unlock;
1751         }
1752 
1753         if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
1754                 if (timeo) {
1755                         timeo = unix_wait_for_peer(other, timeo);
1756 
1757                         err = sock_intr_errno(timeo);
1758                         if (signal_pending(current))
1759                                 goto out_free;
1760 
1761                         goto restart;
1762                 }
1763 
1764                 if (!sk_locked) {
1765                         unix_state_unlock(other);
1766                         unix_state_double_lock(sk, other);
1767                 }
1768 
1769                 if (unix_peer(sk) != other ||
1770                     unix_dgram_peer_wake_me(sk, other)) {
1771                         err = -EAGAIN;
1772                         sk_locked = 1;
1773                         goto out_unlock;
1774                 }
1775 
1776                 if (!sk_locked) {
1777                         sk_locked = 1;
1778                         goto restart_locked;
1779                 }
1780         }
1781 
1782         if (unlikely(sk_locked))
1783                 unix_state_unlock(sk);
1784 
1785         if (sock_flag(other, SOCK_RCVTSTAMP))
1786                 __net_timestamp(skb);
1787         maybe_add_creds(skb, sock, other);
1788         skb_queue_tail(&other->sk_receive_queue, skb);
1789         if (max_level > unix_sk(other)->recursion_level)
1790                 unix_sk(other)->recursion_level = max_level;
1791         unix_state_unlock(other);
1792         other->sk_data_ready(other);
1793         sock_put(other);
1794         scm_destroy(&scm);
1795         return len;
1796 
1797 out_unlock:
1798         if (sk_locked)
1799                 unix_state_unlock(sk);
1800         unix_state_unlock(other);
1801 out_free:
1802         kfree_skb(skb);
1803 out:
1804         if (other)
1805                 sock_put(other);
1806         scm_destroy(&scm);
1807         return err;
1808 }
1809 
1810 /* We use paged skbs for stream sockets, and limit occupancy to 32768
1811  * bytes, and a minimun of a full page.
1812  */
1813 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
1814 
1815 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1816                                size_t len)
1817 {
1818         struct sock *sk = sock->sk;
1819         struct sock *other = NULL;
1820         int err, size;
1821         struct sk_buff *skb;
1822         int sent = 0;
1823         struct scm_cookie scm;
1824         bool fds_sent = false;
1825         int max_level;
1826         int data_len;
1827 
1828         wait_for_unix_gc();
1829         err = scm_send(sock, msg, &scm, false);
1830         if (err < 0)
1831                 return err;
1832 
1833         err = -EOPNOTSUPP;
1834         if (msg->msg_flags&MSG_OOB)
1835                 goto out_err;
1836 
1837         if (msg->msg_namelen) {
1838                 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1839                 goto out_err;
1840         } else {
1841                 err = -ENOTCONN;
1842                 other = unix_peer(sk);
1843                 if (!other)
1844                         goto out_err;
1845         }
1846 
1847         if (sk->sk_shutdown & SEND_SHUTDOWN)
1848                 goto pipe_err;
1849 
1850         while (sent < len) {
1851                 size = len - sent;
1852 
1853                 /* Keep two messages in the pipe so it schedules better */
1854                 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
1855 
1856                 /* allow fallback to order-0 allocations */
1857                 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
1858 
1859                 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
1860 
1861                 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
1862 
1863                 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
1864                                            msg->msg_flags & MSG_DONTWAIT, &err,
1865                                            get_order(UNIX_SKB_FRAGS_SZ));
1866                 if (!skb)
1867                         goto out_err;
1868 
1869                 /* Only send the fds in the first buffer */
1870                 err = unix_scm_to_skb(&scm, skb, !fds_sent);
1871                 if (err < 0) {
1872                         kfree_skb(skb);
1873                         goto out_err;
1874                 }
1875                 max_level = err + 1;
1876                 fds_sent = true;
1877 
1878                 skb_put(skb, size - data_len);
1879                 skb->data_len = data_len;
1880                 skb->len = size;
1881                 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
1882                 if (err) {
1883                         kfree_skb(skb);
1884                         goto out_err;
1885                 }
1886 
1887                 unix_state_lock(other);
1888 
1889                 if (sock_flag(other, SOCK_DEAD) ||
1890                     (other->sk_shutdown & RCV_SHUTDOWN))
1891                         goto pipe_err_free;
1892 
1893                 maybe_add_creds(skb, sock, other);
1894                 skb_queue_tail(&other->sk_receive_queue, skb);
1895                 if (max_level > unix_sk(other)->recursion_level)
1896                         unix_sk(other)->recursion_level = max_level;
1897                 unix_state_unlock(other);
1898                 other->sk_data_ready(other);
1899                 sent += size;
1900         }
1901 
1902         scm_destroy(&scm);
1903 
1904         return sent;
1905 
1906 pipe_err_free:
1907         unix_state_unlock(other);
1908         kfree_skb(skb);
1909 pipe_err:
1910         if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1911                 send_sig(SIGPIPE, current, 0);
1912         err = -EPIPE;
1913 out_err:
1914         scm_destroy(&scm);
1915         return sent ? : err;
1916 }
1917 
1918 static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
1919                                     int offset, size_t size, int flags)
1920 {
1921         int err;
1922         bool send_sigpipe = false;
1923         bool init_scm = true;
1924         struct scm_cookie scm;
1925         struct sock *other, *sk = socket->sk;
1926         struct sk_buff *skb, *newskb = NULL, *tail = NULL;
1927 
1928         if (flags & MSG_OOB)
1929                 return -EOPNOTSUPP;
1930 
1931         other = unix_peer(sk);
1932         if (!other || sk->sk_state != TCP_ESTABLISHED)
1933                 return -ENOTCONN;
1934 
1935         if (false) {
1936 alloc_skb:
1937                 unix_state_unlock(other);
1938                 mutex_unlock(&unix_sk(other)->readlock);
1939                 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
1940                                               &err, 0);
1941                 if (!newskb)
1942                         goto err;
1943         }
1944 
1945         /* we must acquire readlock as we modify already present
1946          * skbs in the sk_receive_queue and mess with skb->len
1947          */
1948         err = mutex_lock_interruptible(&unix_sk(other)->readlock);
1949         if (err) {
1950                 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
1951                 goto err;
1952         }
1953 
1954         if (sk->sk_shutdown & SEND_SHUTDOWN) {
1955                 err = -EPIPE;
1956                 send_sigpipe = true;
1957                 goto err_unlock;
1958         }
1959 
1960         unix_state_lock(other);
1961 
1962         if (sock_flag(other, SOCK_DEAD) ||
1963             other->sk_shutdown & RCV_SHUTDOWN) {
1964                 err = -EPIPE;
1965                 send_sigpipe = true;
1966                 goto err_state_unlock;
1967         }
1968 
1969         if (init_scm) {
1970                 err = maybe_init_creds(&scm, socket, other);
1971                 if (err)
1972                         goto err_state_unlock;
1973                 init_scm = false;
1974         }
1975 
1976         skb = skb_peek_tail(&other->sk_receive_queue);
1977         if (tail && tail == skb) {
1978                 skb = newskb;
1979         } else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
1980                 if (newskb) {
1981                         skb = newskb;
1982                 } else {
1983                         tail = skb;
1984                         goto alloc_skb;
1985                 }
1986         } else if (newskb) {
1987                 /* this is fast path, we don't necessarily need to
1988                  * call to kfree_skb even though with newskb == NULL
1989                  * this - does no harm
1990                  */
1991                 consume_skb(newskb);
1992                 newskb = NULL;
1993         }
1994 
1995         if (skb_append_pagefrags(skb, page, offset, size)) {
1996                 tail = skb;
1997                 goto alloc_skb;
1998         }
1999 
2000         skb->len += size;
2001         skb->data_len += size;
2002         skb->truesize += size;
2003         atomic_add(size, &sk->sk_wmem_alloc);
2004 
2005         if (newskb) {
2006                 err = unix_scm_to_skb(&scm, skb, false);
2007                 if (err)
2008                         goto err_state_unlock;
2009                 spin_lock(&other->sk_receive_queue.lock);
2010                 __skb_queue_tail(&other->sk_receive_queue, newskb);
2011                 spin_unlock(&other->sk_receive_queue.lock);
2012         }
2013 
2014         unix_state_unlock(other);
2015         mutex_unlock(&unix_sk(other)->readlock);
2016 
2017         other->sk_data_ready(other);
2018         scm_destroy(&scm);
2019         return size;
2020 
2021 err_state_unlock:
2022         unix_state_unlock(other);
2023 err_unlock:
2024         mutex_unlock(&unix_sk(other)->readlock);
2025 err:
2026         kfree_skb(newskb);
2027         if (send_sigpipe && !(flags & MSG_NOSIGNAL))
2028                 send_sig(SIGPIPE, current, 0);
2029         if (!init_scm)
2030                 scm_destroy(&scm);
2031         return err;
2032 }
2033 
2034 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2035                                   size_t len)
2036 {
2037         int err;
2038         struct sock *sk = sock->sk;
2039 
2040         err = sock_error(sk);
2041         if (err)
2042                 return err;
2043 
2044         if (sk->sk_state != TCP_ESTABLISHED)
2045                 return -ENOTCONN;
2046 
2047         if (msg->msg_namelen)
2048                 msg->msg_namelen = 0;
2049 
2050         return unix_dgram_sendmsg(sock, msg, len);
2051 }
2052 
2053 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2054                                   size_t size, int flags)
2055 {
2056         struct sock *sk = sock->sk;
2057 
2058         if (sk->sk_state != TCP_ESTABLISHED)
2059                 return -ENOTCONN;
2060 
2061         return unix_dgram_recvmsg(sock, msg, size, flags);
2062 }
2063 
2064 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2065 {
2066         struct unix_sock *u = unix_sk(sk);
2067 
2068         if (u->addr) {
2069                 msg->msg_namelen = u->addr->len;
2070                 memcpy(msg->msg_name, u->addr->name, u->addr->len);
2071         }
2072 }
2073 
2074 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
2075                               size_t size, int flags)
2076 {
2077         struct scm_cookie scm;
2078         struct sock *sk = sock->sk;
2079         struct unix_sock *u = unix_sk(sk);
2080         int noblock = flags & MSG_DONTWAIT;
2081         struct sk_buff *skb;
2082         int err;
2083         int peeked, skip;
2084 
2085         err = -EOPNOTSUPP;
2086         if (flags&MSG_OOB)
2087                 goto out;
2088 
2089         err = mutex_lock_interruptible(&u->readlock);
2090         if (unlikely(err)) {
2091                 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
2092                  * sk_rcvtimeo is not honored by mutex_lock_interruptible()
2093                  */
2094                 err = noblock ? -EAGAIN : -ERESTARTSYS;
2095                 goto out;
2096         }
2097 
2098         skip = sk_peek_offset(sk, flags);
2099 
2100         skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
2101         if (!skb) {
2102                 unix_state_lock(sk);
2103                 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2104                 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2105                     (sk->sk_shutdown & RCV_SHUTDOWN))
2106                         err = 0;
2107                 unix_state_unlock(sk);
2108                 goto out_unlock;
2109         }
2110 
2111         wake_up_interruptible_sync_poll(&u->peer_wait,
2112                                         POLLOUT | POLLWRNORM | POLLWRBAND);
2113 
2114         if (ccs_socket_post_recvmsg_permission(sk, skb, flags)) {
2115                 err = -EAGAIN; /* Hope less harmful than -EPERM. */
2116                 goto out_unlock;
2117         }
2118         if (msg->msg_name)
2119                 unix_copy_addr(msg, skb->sk);
2120 
2121         if (size > skb->len - skip)
2122                 size = skb->len - skip;
2123         else if (size < skb->len - skip)
2124                 msg->msg_flags |= MSG_TRUNC;
2125 
2126         err = skb_copy_datagram_msg(skb, skip, msg, size);
2127         if (err)
2128                 goto out_free;
2129 
2130         if (sock_flag(sk, SOCK_RCVTSTAMP))
2131                 __sock_recv_timestamp(msg, sk, skb);
2132 
2133         memset(&scm, 0, sizeof(scm));
2134 
2135         scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2136         unix_set_secdata(&scm, skb);
2137 
2138         if (!(flags & MSG_PEEK)) {
2139                 if (UNIXCB(skb).fp)
2140                         unix_detach_fds(&scm, skb);
2141 
2142                 sk_peek_offset_bwd(sk, skb->len);
2143         } else {
2144                 /* It is questionable: on PEEK we could:
2145                    - do not return fds - good, but too simple 8)
2146                    - return fds, and do not return them on read (old strategy,
2147                      apparently wrong)
2148                    - clone fds (I chose it for now, it is the most universal
2149                      solution)
2150 
2151                    POSIX 1003.1g does not actually define this clearly
2152                    at all. POSIX 1003.1g doesn't define a lot of things
2153                    clearly however!
2154 
2155                 */
2156 
2157                 sk_peek_offset_fwd(sk, size);
2158 
2159                 if (UNIXCB(skb).fp)
2160                         scm.fp = scm_fp_dup(UNIXCB(skb).fp);
2161         }
2162         err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2163 
2164         scm_recv(sock, msg, &scm, flags);
2165 
2166 out_free:
2167         skb_free_datagram(sk, skb);
2168 out_unlock:
2169         mutex_unlock(&u->readlock);
2170 out:
2171         return err;
2172 }
2173 
2174 /*
2175  *      Sleep until more data has arrived. But check for races..
2176  */
2177 static long unix_stream_data_wait(struct sock *sk, long timeo,
2178                                   struct sk_buff *last, unsigned int last_len)
2179 {
2180         struct sk_buff *tail;
2181         DEFINE_WAIT(wait);
2182 
2183         unix_state_lock(sk);
2184 
2185         for (;;) {
2186                 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2187 
2188                 tail = skb_peek_tail(&sk->sk_receive_queue);
2189                 if (tail != last ||
2190                     (tail && tail->len != last_len) ||
2191                     sk->sk_err ||
2192                     (sk->sk_shutdown & RCV_SHUTDOWN) ||
2193                     signal_pending(current) ||
2194                     !timeo)
2195                         break;
2196 
2197                 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2198                 unix_state_unlock(sk);
2199                 timeo = freezable_schedule_timeout(timeo);
2200                 unix_state_lock(sk);
2201 
2202                 if (sock_flag(sk, SOCK_DEAD))
2203                         break;
2204 
2205                 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2206         }
2207 
2208         finish_wait(sk_sleep(sk), &wait);
2209         unix_state_unlock(sk);
2210         return timeo;
2211 }
2212 
2213 static unsigned int unix_skb_len(const struct sk_buff *skb)
2214 {
2215         return skb->len - UNIXCB(skb).consumed;
2216 }
2217 
2218 struct unix_stream_read_state {
2219         int (*recv_actor)(struct sk_buff *, int, int,
2220                           struct unix_stream_read_state *);
2221         struct socket *socket;
2222         struct msghdr *msg;
2223         struct pipe_inode_info *pipe;
2224         size_t size;
2225         int flags;
2226         unsigned int splice_flags;
2227 };
2228 
2229 static int unix_stream_read_generic(struct unix_stream_read_state *state)
2230 {
2231         struct scm_cookie scm;
2232         struct socket *sock = state->socket;
2233         struct sock *sk = sock->sk;
2234         struct unix_sock *u = unix_sk(sk);
2235         int copied = 0;
2236         int flags = state->flags;
2237         int noblock = flags & MSG_DONTWAIT;
2238         bool check_creds = false;
2239         int target;
2240         int err = 0;
2241         long timeo;
2242         int skip;
2243         size_t size = state->size;
2244         unsigned int last_len;
2245 
2246         err = -EINVAL;
2247         if (sk->sk_state != TCP_ESTABLISHED)
2248                 goto out;
2249 
2250         err = -EOPNOTSUPP;
2251         if (flags & MSG_OOB)
2252                 goto out;
2253 
2254         target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2255         timeo = sock_rcvtimeo(sk, noblock);
2256 
2257         memset(&scm, 0, sizeof(scm));
2258 
2259         /* Lock the socket to prevent queue disordering
2260          * while sleeps in memcpy_tomsg
2261          */
2262         err = mutex_lock_interruptible(&u->readlock);
2263         if (unlikely(err)) {
2264                 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
2265                  * sk_rcvtimeo is not honored by mutex_lock_interruptible()
2266                  */
2267                 err = noblock ? -EAGAIN : -ERESTARTSYS;
2268                 goto out;
2269         }
2270 
2271         if (flags & MSG_PEEK)
2272                 skip = sk_peek_offset(sk, flags);
2273         else
2274                 skip = 0;
2275 
2276         do {
2277                 int chunk;
2278                 bool drop_skb;
2279                 struct sk_buff *skb, *last;
2280 
2281                 unix_state_lock(sk);
2282                 if (sock_flag(sk, SOCK_DEAD)) {
2283                         err = -ECONNRESET;
2284                         goto unlock;
2285                 }
2286                 last = skb = skb_peek(&sk->sk_receive_queue);
2287                 last_len = last ? last->len : 0;
2288 again:
2289                 if (skb == NULL) {
2290                         unix_sk(sk)->recursion_level = 0;
2291                         if (copied >= target)
2292                                 goto unlock;
2293 
2294                         /*
2295                          *      POSIX 1003.1g mandates this order.
2296                          */
2297 
2298                         err = sock_error(sk);
2299                         if (err)
2300                                 goto unlock;
2301                         if (sk->sk_shutdown & RCV_SHUTDOWN)
2302                                 goto unlock;
2303 
2304                         unix_state_unlock(sk);
2305                         err = -EAGAIN;
2306                         if (!timeo)
2307                                 break;
2308                         mutex_unlock(&u->readlock);
2309 
2310                         timeo = unix_stream_data_wait(sk, timeo, last,
2311                                                       last_len);
2312 
2313                         if (signal_pending(current) ||
2314                             mutex_lock_interruptible(&u->readlock)) {
2315                                 err = sock_intr_errno(timeo);
2316                                 goto out;
2317                         }
2318 
2319                         continue;
2320 unlock:
2321                         unix_state_unlock(sk);
2322                         break;
2323                 }
2324 
2325                 while (skip >= unix_skb_len(skb)) {
2326                         skip -= unix_skb_len(skb);
2327                         last = skb;
2328                         last_len = skb->len;
2329                         skb = skb_peek_next(skb, &sk->sk_receive_queue);
2330                         if (!skb)
2331                                 goto again;
2332                 }
2333 
2334                 unix_state_unlock(sk);
2335 
2336                 if (check_creds) {
2337                         /* Never glue messages from different writers */
2338                         if (!unix_skb_scm_eq(skb, &scm))
2339                                 break;
2340                 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2341                         /* Copy credentials */
2342                         scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2343                         unix_set_secdata(&scm, skb);
2344                         check_creds = true;
2345                 }
2346 
2347                 /* Copy address just once */
2348                 if (state->msg && state->msg->msg_name) {
2349                         DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2350                                          state->msg->msg_name);
2351                         unix_copy_addr(state->msg, skb->sk);
2352                         sunaddr = NULL;
2353                 }
2354 
2355                 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2356                 skb_get(skb);
2357                 chunk = state->recv_actor(skb, skip, chunk, state);
2358                 drop_skb = !unix_skb_len(skb);
2359                 /* skb is only safe to use if !drop_skb */
2360                 consume_skb(skb);
2361                 if (chunk < 0) {
2362                         if (copied == 0)
2363                                 copied = -EFAULT;
2364                         break;
2365                 }
2366                 copied += chunk;
2367                 size -= chunk;
2368 
2369                 if (drop_skb) {
2370                         /* the skb was touched by a concurrent reader;
2371                          * we should not expect anything from this skb
2372                          * anymore and assume it invalid - we can be
2373                          * sure it was dropped from the socket queue
2374                          *
2375                          * let's report a short read
2376                          */
2377                         err = 0;
2378                         break;
2379                 }
2380 
2381                 /* Mark read part of skb as used */
2382                 if (!(flags & MSG_PEEK)) {
2383                         UNIXCB(skb).consumed += chunk;
2384 
2385                         sk_peek_offset_bwd(sk, chunk);
2386 
2387                         if (UNIXCB(skb).fp)
2388                                 unix_detach_fds(&scm, skb);
2389 
2390                         if (unix_skb_len(skb))
2391                                 break;
2392 
2393                         skb_unlink(skb, &sk->sk_receive_queue);
2394                         consume_skb(skb);
2395 
2396                         if (scm.fp)
2397                                 break;
2398                 } else {
2399                         /* It is questionable, see note in unix_dgram_recvmsg.
2400                          */
2401                         if (UNIXCB(skb).fp)
2402                                 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
2403 
2404                         sk_peek_offset_fwd(sk, chunk);
2405 
2406                         if (UNIXCB(skb).fp)
2407                                 break;
2408 
2409                         skip = 0;
2410                         last = skb;
2411                         last_len = skb->len;
2412                         unix_state_lock(sk);
2413                         skb = skb_peek_next(skb, &sk->sk_receive_queue);
2414                         if (skb)
2415                                 goto again;
2416                         unix_state_unlock(sk);
2417                         break;
2418                 }
2419         } while (size);
2420 
2421         mutex_unlock(&u->readlock);
2422         if (state->msg)
2423                 scm_recv(sock, state->msg, &scm, flags);
2424         else
2425                 scm_destroy(&scm);
2426 out:
2427         return copied ? : err;
2428 }
2429 
2430 static int unix_stream_read_actor(struct sk_buff *skb,
2431                                   int skip, int chunk,
2432                                   struct unix_stream_read_state *state)
2433 {
2434         int ret;
2435 
2436         ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2437                                     state->msg, chunk);
2438         return ret ?: chunk;
2439 }
2440 
2441 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2442                                size_t size, int flags)
2443 {
2444         struct unix_stream_read_state state = {
2445                 .recv_actor = unix_stream_read_actor,
2446                 .socket = sock,
2447                 .msg = msg,
2448                 .size = size,
2449                 .flags = flags
2450         };
2451 
2452         return unix_stream_read_generic(&state);
2453 }
2454 
2455 static ssize_t skb_unix_socket_splice(struct sock *sk,
2456                                       struct pipe_inode_info *pipe,
2457                                       struct splice_pipe_desc *spd)
2458 {
2459         int ret;
2460         struct unix_sock *u = unix_sk(sk);
2461 
2462         mutex_unlock(&u->readlock);
2463         ret = splice_to_pipe(pipe, spd);
2464         mutex_lock(&u->readlock);
2465 
2466         return ret;
2467 }
2468 
2469 static int unix_stream_splice_actor(struct sk_buff *skb,
2470                                     int skip, int chunk,
2471                                     struct unix_stream_read_state *state)
2472 {
2473         return skb_splice_bits(skb, state->socket->sk,
2474                                UNIXCB(skb).consumed + skip,
2475                                state->pipe, chunk, state->splice_flags,
2476                                skb_unix_socket_splice);
2477 }
2478 
2479 static ssize_t unix_stream_splice_read(struct socket *sock,  loff_t *ppos,
2480                                        struct pipe_inode_info *pipe,
2481                                        size_t size, unsigned int flags)
2482 {
2483         struct unix_stream_read_state state = {
2484                 .recv_actor = unix_stream_splice_actor,
2485                 .socket = sock,
2486                 .pipe = pipe,
2487                 .size = size,
2488                 .splice_flags = flags,
2489         };
2490 
2491         if (unlikely(*ppos))
2492                 return -ESPIPE;
2493 
2494         if (sock->file->f_flags & O_NONBLOCK ||
2495             flags & SPLICE_F_NONBLOCK)
2496                 state.flags = MSG_DONTWAIT;
2497 
2498         return unix_stream_read_generic(&state);
2499 }
2500 
2501 static int unix_shutdown(struct socket *sock, int mode)
2502 {
2503         struct sock *sk = sock->sk;
2504         struct sock *other;
2505 
2506         if (mode < SHUT_RD || mode > SHUT_RDWR)
2507                 return -EINVAL;
2508         /* This maps:
2509          * SHUT_RD   (0) -> RCV_SHUTDOWN  (1)
2510          * SHUT_WR   (1) -> SEND_SHUTDOWN (2)
2511          * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2512          */
2513         ++mode;
2514 
2515         unix_state_lock(sk);
2516         sk->sk_shutdown |= mode;
2517         other = unix_peer(sk);
2518         if (other)
2519                 sock_hold(other);
2520         unix_state_unlock(sk);
2521         sk->sk_state_change(sk);
2522 
2523         if (other &&
2524                 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2525 
2526                 int peer_mode = 0;
2527 
2528                 if (mode&RCV_SHUTDOWN)
2529                         peer_mode |= SEND_SHUTDOWN;
2530                 if (mode&SEND_SHUTDOWN)
2531                         peer_mode |= RCV_SHUTDOWN;
2532                 unix_state_lock(other);
2533                 other->sk_shutdown |= peer_mode;
2534                 unix_state_unlock(other);
2535                 other->sk_state_change(other);
2536                 if (peer_mode == SHUTDOWN_MASK)
2537                         sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2538                 else if (peer_mode & RCV_SHUTDOWN)
2539                         sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2540         }
2541         if (other)
2542                 sock_put(other);
2543 
2544         return 0;
2545 }
2546 
2547 long unix_inq_len(struct sock *sk)
2548 {
2549         struct sk_buff *skb;
2550         long amount = 0;
2551 
2552         if (sk->sk_state == TCP_LISTEN)
2553                 return -EINVAL;
2554 
2555         spin_lock(&sk->sk_receive_queue.lock);
2556         if (sk->sk_type == SOCK_STREAM ||
2557             sk->sk_type == SOCK_SEQPACKET) {
2558                 skb_queue_walk(&sk->sk_receive_queue, skb)
2559                         amount += unix_skb_len(skb);
2560         } else {
2561                 skb = skb_peek(&sk->sk_receive_queue);
2562                 if (skb)
2563                         amount = skb->len;
2564         }
2565         spin_unlock(&sk->sk_receive_queue.lock);
2566 
2567         return amount;
2568 }
2569 EXPORT_SYMBOL_GPL(unix_inq_len);
2570 
2571 long unix_outq_len(struct sock *sk)
2572 {
2573         return sk_wmem_alloc_get(sk);
2574 }
2575 EXPORT_SYMBOL_GPL(unix_outq_len);
2576 
2577 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2578 {
2579         struct sock *sk = sock->sk;
2580         long amount = 0;
2581         int err;
2582 
2583         switch (cmd) {
2584         case SIOCOUTQ:
2585                 amount = unix_outq_len(sk);
2586                 err = put_user(amount, (int __user *)arg);
2587                 break;
2588         case SIOCINQ:
2589                 amount = unix_inq_len(sk);
2590                 if (amount < 0)
2591                         err = amount;
2592                 else
2593                         err = put_user(amount, (int __user *)arg);
2594                 break;
2595         default:
2596                 err = -ENOIOCTLCMD;
2597                 break;
2598         }
2599         return err;
2600 }
2601 
2602 static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2603 {
2604         struct sock *sk = sock->sk;
2605         unsigned int mask;
2606 
2607         sock_poll_wait(file, sk_sleep(sk), wait);
2608         mask = 0;
2609 
2610         /* exceptional events? */
2611         if (sk->sk_err)
2612                 mask |= POLLERR;
2613         if (sk->sk_shutdown == SHUTDOWN_MASK)
2614                 mask |= POLLHUP;
2615         if (sk->sk_shutdown & RCV_SHUTDOWN)
2616                 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2617 
2618         /* readable? */
2619         if (!skb_queue_empty(&sk->sk_receive_queue))
2620                 mask |= POLLIN | POLLRDNORM;
2621 
2622         /* Connection-based need to check for termination and startup */
2623         if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2624             sk->sk_state == TCP_CLOSE)
2625                 mask |= POLLHUP;
2626 
2627         /*
2628          * we set writable also when the other side has shut down the
2629          * connection. This prevents stuck sockets.
2630          */
2631         if (unix_writable(sk))
2632                 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2633 
2634         return mask;
2635 }
2636 
2637 static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2638                                     poll_table *wait)
2639 {
2640         struct sock *sk = sock->sk, *other;
2641         unsigned int mask, writable;
2642 
2643         sock_poll_wait(file, sk_sleep(sk), wait);
2644         mask = 0;
2645 
2646         /* exceptional events? */
2647         if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2648                 mask |= POLLERR |
2649                         (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
2650 
2651         if (sk->sk_shutdown & RCV_SHUTDOWN)
2652                 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2653         if (sk->sk_shutdown == SHUTDOWN_MASK)
2654                 mask |= POLLHUP;
2655 
2656         /* readable? */
2657         if (!skb_queue_empty(&sk->sk_receive_queue))
2658                 mask |= POLLIN | POLLRDNORM;
2659 
2660         /* Connection-based need to check for termination and startup */
2661         if (sk->sk_type == SOCK_SEQPACKET) {
2662                 if (sk->sk_state == TCP_CLOSE)
2663                         mask |= POLLHUP;
2664                 /* connection hasn't started yet? */
2665                 if (sk->sk_state == TCP_SYN_SENT)
2666                         return mask;
2667         }
2668 
2669         /* No write status requested, avoid expensive OUT tests. */
2670         if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
2671                 return mask;
2672 
2673         writable = unix_writable(sk);
2674         if (writable) {
2675                 unix_state_lock(sk);
2676 
2677                 other = unix_peer(sk);
2678                 if (other && unix_peer(other) != sk &&
2679                     unix_recvq_full(other) &&
2680                     unix_dgram_peer_wake_me(sk, other))
2681                         writable = 0;
2682 
2683                 unix_state_unlock(sk);
2684         }
2685 
2686         if (writable)
2687                 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2688         else
2689                 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2690 
2691         return mask;
2692 }
2693 
2694 #ifdef CONFIG_PROC_FS
2695 
2696 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2697 
2698 #define get_bucket(x) ((x) >> BUCKET_SPACE)
2699 #define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2700 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
2701 
2702 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
2703 {
2704         unsigned long offset = get_offset(*pos);
2705         unsigned long bucket = get_bucket(*pos);
2706         struct sock *sk;
2707         unsigned long count = 0;
2708 
2709         for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2710                 if (sock_net(sk) != seq_file_net(seq))
2711                         continue;
2712                 if (++count == offset)
2713                         break;
2714         }
2715 
2716         return sk;
2717 }
2718 
2719 static struct sock *unix_next_socket(struct seq_file *seq,
2720                                      struct sock *sk,
2721                                      loff_t *pos)
2722 {
2723         unsigned long bucket;
2724 
2725         while (sk > (struct sock *)SEQ_START_TOKEN) {
2726                 sk = sk_next(sk);
2727                 if (!sk)
2728                         goto next_bucket;
2729                 if (sock_net(sk) == seq_file_net(seq))
2730                         return sk;
2731         }
2732 
2733         do {
2734                 sk = unix_from_bucket(seq, pos);
2735                 if (sk)
2736                         return sk;
2737 
2738 next_bucket:
2739                 bucket = get_bucket(*pos) + 1;
2740                 *pos = set_bucket_offset(bucket, 1);
2741         } while (bucket < ARRAY_SIZE(unix_socket_table));
2742 
2743         return NULL;
2744 }
2745 
2746 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2747         __acquires(unix_table_lock)
2748 {
2749         spin_lock(&unix_table_lock);
2750 
2751         if (!*pos)
2752                 return SEQ_START_TOKEN;
2753 
2754         if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2755                 return NULL;
2756 
2757         return unix_next_socket(seq, NULL, pos);
2758 }
2759 
2760 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2761 {
2762         ++*pos;
2763         return unix_next_socket(seq, v, pos);
2764 }
2765 
2766 static void unix_seq_stop(struct seq_file *seq, void *v)
2767         __releases(unix_table_lock)
2768 {
2769         spin_unlock(&unix_table_lock);
2770 }
2771 
2772 static int unix_seq_show(struct seq_file *seq, void *v)
2773 {
2774 
2775         if (v == SEQ_START_TOKEN)
2776                 seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
2777                          "Inode Path\n");
2778         else {
2779                 struct sock *s = v;
2780                 struct unix_sock *u = unix_sk(s);
2781                 unix_state_lock(s);
2782 
2783                 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2784                         s,
2785                         atomic_read(&s->sk_refcnt),
2786                         0,
2787                         s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2788                         s->sk_type,
2789                         s->sk_socket ?
2790                         (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2791                         (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2792                         sock_i_ino(s));
2793 
2794                 if (u->addr) {
2795                         int i, len;
2796                         seq_putc(seq, ' ');
2797 
2798                         i = 0;
2799                         len = u->addr->len - sizeof(short);
2800                         if (!UNIX_ABSTRACT(s))
2801                                 len--;
2802                         else {
2803                                 seq_putc(seq, '@');
2804                                 i++;
2805                         }
2806                         for ( ; i < len; i++)
2807                                 seq_putc(seq, u->addr->name->sun_path[i]);
2808                 }
2809                 unix_state_unlock(s);
2810                 seq_putc(seq, '\n');
2811         }
2812 
2813         return 0;
2814 }
2815 
2816 static const struct seq_operations unix_seq_ops = {
2817         .start  = unix_seq_start,
2818         .next   = unix_seq_next,
2819         .stop   = unix_seq_stop,
2820         .show   = unix_seq_show,
2821 };
2822 
2823 static int unix_seq_open(struct inode *inode, struct file *file)
2824 {
2825         return seq_open_net(inode, file, &unix_seq_ops,
2826                             sizeof(struct seq_net_private));
2827 }
2828 
2829 static const struct file_operations unix_seq_fops = {
2830         .owner          = THIS_MODULE,
2831         .open           = unix_seq_open,
2832         .read           = seq_read,
2833         .llseek         = seq_lseek,
2834         .release        = seq_release_net,
2835 };
2836 
2837 #endif
2838 
2839 static const struct net_proto_family unix_family_ops = {
2840         .family = PF_UNIX,
2841         .create = unix_create,
2842         .owner  = THIS_MODULE,
2843 };
2844 
2845 
2846 static int __net_init unix_net_init(struct net *net)
2847 {
2848         int error = -ENOMEM;
2849 
2850         net->unx.sysctl_max_dgram_qlen = 10;
2851         if (unix_sysctl_register(net))
2852                 goto out;
2853 
2854 #ifdef CONFIG_PROC_FS
2855         if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
2856                 unix_sysctl_unregister(net);
2857                 goto out;
2858         }
2859 #endif
2860         error = 0;
2861 out:
2862         return error;
2863 }
2864 
2865 static void __net_exit unix_net_exit(struct net *net)
2866 {
2867         unix_sysctl_unregister(net);
2868         remove_proc_entry("unix", net->proc_net);
2869 }
2870 
2871 static struct pernet_operations unix_net_ops = {
2872         .init = unix_net_init,
2873         .exit = unix_net_exit,
2874 };
2875 
2876 static int __init af_unix_init(void)
2877 {
2878         int rc = -1;
2879 
2880         BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
2881 
2882         rc = proto_register(&unix_proto, 1);
2883         if (rc != 0) {
2884                 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
2885                 goto out;
2886         }
2887 
2888         sock_register(&unix_family_ops);
2889         register_pernet_subsys(&unix_net_ops);
2890 out:
2891         return rc;
2892 }
2893 
2894 static void __exit af_unix_exit(void)
2895 {
2896         sock_unregister(PF_UNIX);
2897         proto_unregister(&unix_proto);
2898         unregister_pernet_subsys(&unix_net_ops);
2899 }
2900 
2901 /* Earlier than device_initcall() so that other drivers invoking
2902    request_module() don't end up in a loop when modprobe tries
2903    to use a UNIX socket. But later than subsys_initcall() because
2904    we depend on stuff initialised there */
2905 fs_initcall(af_unix_init);
2906 module_exit(af_unix_exit);
2907 
2908 MODULE_LICENSE("GPL");
2909 MODULE_ALIAS_NETPROTO(PF_UNIX);
2910 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp