blob: a398f624c28dcd75256d1c9022bd27741ee0fc31 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET4: Implementation of BSD Unix domain sockets.
3 *
Alan Cox113aa832008-10-13 19:01:08 -07004 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
73 *
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
81 */
82
wangweidong5cc208b2013-12-06 18:03:36 +080083#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
84
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070086#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070087#include <linux/signal.h>
88#include <linux/sched.h>
89#include <linux/errno.h>
90#include <linux/string.h>
91#include <linux/stat.h>
92#include <linux/dcache.h>
93#include <linux/namei.h>
94#include <linux/socket.h>
95#include <linux/un.h>
96#include <linux/fcntl.h>
97#include <linux/termios.h>
98#include <linux/sockios.h>
99#include <linux/net.h>
100#include <linux/in.h>
101#include <linux/fs.h>
102#include <linux/slab.h>
103#include <asm/uaccess.h>
104#include <linux/skbuff.h>
105#include <linux/netdevice.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200106#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <net/sock.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -0700108#include <net/tcp_states.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#include <net/af_unix.h>
110#include <linux/proc_fs.h>
111#include <linux/seq_file.h>
112#include <net/scm.h>
113#include <linux/init.h>
114#include <linux/poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <linux/rtnetlink.h>
116#include <linux/mount.h>
117#include <net/checksum.h>
118#include <linux/security.h>
Colin Cross2b15af62013-05-06 23:50:21 +0000119#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
Eric Dumazet7123aaa2012-06-08 05:03:21 +0000121struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
Pavel Emelyanovfa7ff562011-12-15 02:44:03 +0000122EXPORT_SYMBOL_GPL(unix_socket_table);
123DEFINE_SPINLOCK(unix_table_lock);
124EXPORT_SYMBOL_GPL(unix_table_lock);
Eric Dumazet518de9b2010-10-26 14:22:44 -0700125static atomic_long_t unix_nr_socks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Eric Dumazet7123aaa2012-06-08 05:03:21 +0000128static struct hlist_head *unix_sockets_unbound(void *addr)
129{
130 unsigned long hash = (unsigned long)addr;
131
132 hash ^= hash >> 16;
133 hash ^= hash >> 8;
134 hash %= UNIX_HASH_SIZE;
135 return &unix_socket_table[UNIX_HASH_SIZE + hash];
136}
137
138#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700140#ifdef CONFIG_SECURITY_NETWORK
Catherine Zhangdc49c1f2006-08-02 14:12:06 -0700141static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700142{
Catherine Zhangdc49c1f2006-08-02 14:12:06 -0700143 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700144}
145
146static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
147{
Catherine Zhangdc49c1f2006-08-02 14:12:06 -0700148 scm->secid = *UNIXSID(skb);
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700149}
150#else
Catherine Zhangdc49c1f2006-08-02 14:12:06 -0700151static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700152{ }
153
154static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
155{ }
156#endif /* CONFIG_SECURITY_NETWORK */
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158/*
159 * SMP locking strategy:
David S. Millerfbe9cc42005-12-13 23:26:29 -0800160 * hash table is protected with spinlock unix_table_lock
Stephen Hemminger663717f2010-02-18 14:12:06 -0800161 * each socket state is protected by separate spin lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 */
163
Eric Dumazet95c96172012-04-15 05:58:06 +0000164static inline unsigned int unix_hash_fold(__wsum n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165{
Anton Blanchard0a134042014-03-05 14:29:58 +1100166 unsigned int hash = (__force unsigned int)csum_fold(n);
Eric Dumazet95c96172012-04-15 05:58:06 +0000167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 hash ^= hash>>8;
169 return hash&(UNIX_HASH_SIZE-1);
170}
171
172#define unix_peer(sk) (unix_sk(sk)->peer)
173
174static inline int unix_our_peer(struct sock *sk, struct sock *osk)
175{
176 return unix_peer(osk) == sk;
177}
178
179static inline int unix_may_send(struct sock *sk, struct sock *osk)
180{
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800181 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182}
183
Rainer Weikusat3c734192008-06-17 22:28:05 -0700184static inline int unix_recvq_full(struct sock const *sk)
185{
186 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
187}
188
Pavel Emelyanovfa7ff562011-12-15 02:44:03 +0000189struct sock *unix_peer_get(struct sock *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190{
191 struct sock *peer;
192
David S. Miller1c92b4e2007-05-31 13:24:26 -0700193 unix_state_lock(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 peer = unix_peer(s);
195 if (peer)
196 sock_hold(peer);
David S. Miller1c92b4e2007-05-31 13:24:26 -0700197 unix_state_unlock(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 return peer;
199}
Pavel Emelyanovfa7ff562011-12-15 02:44:03 +0000200EXPORT_SYMBOL_GPL(unix_peer_get);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
202static inline void unix_release_addr(struct unix_address *addr)
203{
204 if (atomic_dec_and_test(&addr->refcnt))
205 kfree(addr);
206}
207
208/*
209 * Check unix socket name:
210 * - should be not zero length.
211 * - if started by not zero, should be NULL terminated (FS object)
212 * - if started by zero, it is abstract name.
213 */
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +0900214
Eric Dumazet95c96172012-04-15 05:58:06 +0000215static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216{
217 if (len <= sizeof(short) || len > sizeof(*sunaddr))
218 return -EINVAL;
219 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
220 return -EINVAL;
221 if (sunaddr->sun_path[0]) {
222 /*
223 * This may look like an off by one error but it is a bit more
224 * subtle. 108 is the longest valid AF_UNIX path for a binding.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300225 * sun_path[108] doesn't as such exist. However in kernel space
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 * we are guaranteed that it is a valid memory location in our
227 * kernel address buffer.
228 */
Jianjun Konge27dfce2008-11-01 21:38:31 -0700229 ((char *)sunaddr)[len] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 len = strlen(sunaddr->sun_path)+1+sizeof(short);
231 return len;
232 }
233
Joe Perches07f07572008-11-19 15:44:53 -0800234 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 return len;
236}
237
238static void __unix_remove_socket(struct sock *sk)
239{
240 sk_del_node_init(sk);
241}
242
243static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
244{
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700245 WARN_ON(!sk_unhashed(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 sk_add_node(sk, list);
247}
248
249static inline void unix_remove_socket(struct sock *sk)
250{
David S. Millerfbe9cc42005-12-13 23:26:29 -0800251 spin_lock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 __unix_remove_socket(sk);
David S. Millerfbe9cc42005-12-13 23:26:29 -0800253 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254}
255
256static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
257{
David S. Millerfbe9cc42005-12-13 23:26:29 -0800258 spin_lock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 __unix_insert_socket(list, sk);
David S. Millerfbe9cc42005-12-13 23:26:29 -0800260 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261}
262
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800263static struct sock *__unix_find_socket_byname(struct net *net,
264 struct sockaddr_un *sunname,
Eric Dumazet95c96172012-04-15 05:58:06 +0000265 int len, int type, unsigned int hash)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 struct sock *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Sasha Levinb67bfe02013-02-27 17:06:00 -0800269 sk_for_each(s, &unix_socket_table[hash ^ type]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 struct unix_sock *u = unix_sk(s);
271
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +0900272 if (!net_eq(sock_net(s), net))
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800273 continue;
274
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 if (u->addr->len == len &&
276 !memcmp(u->addr->name, sunname, len))
277 goto found;
278 }
279 s = NULL;
280found:
281 return s;
282}
283
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800284static inline struct sock *unix_find_socket_byname(struct net *net,
285 struct sockaddr_un *sunname,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 int len, int type,
Eric Dumazet95c96172012-04-15 05:58:06 +0000287 unsigned int hash)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288{
289 struct sock *s;
290
David S. Millerfbe9cc42005-12-13 23:26:29 -0800291 spin_lock(&unix_table_lock);
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800292 s = __unix_find_socket_byname(net, sunname, len, type, hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 if (s)
294 sock_hold(s);
David S. Millerfbe9cc42005-12-13 23:26:29 -0800295 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 return s;
297}
298
Eric W. Biederman6616f782010-06-13 03:35:48 +0000299static struct sock *unix_find_socket_byinode(struct inode *i)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300{
301 struct sock *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
David S. Millerfbe9cc42005-12-13 23:26:29 -0800303 spin_lock(&unix_table_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800304 sk_for_each(s,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
Al Viro40ffe672012-03-14 21:54:32 -0400306 struct dentry *dentry = unix_sk(s)->path.dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
David Howellsa25b3762015-03-17 22:26:21 +0000308 if (dentry && d_backing_inode(dentry) == i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 sock_hold(s);
310 goto found;
311 }
312 }
313 s = NULL;
314found:
David S. Millerfbe9cc42005-12-13 23:26:29 -0800315 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 return s;
317}
318
Rainer Weikusat5c77e262015-11-20 22:07:23 +0000319/* Support code for asymmetrically connected dgram sockets
320 *
321 * If a datagram socket is connected to a socket not itself connected
322 * to the first socket (eg, /dev/log), clients may only enqueue more
323 * messages if the present receive queue of the server socket is not
324 * "too large". This means there's a second writeability condition
325 * poll and sendmsg need to test. The dgram recv code will do a wake
326 * up on the peer_wait wait queue of a socket upon reception of a
327 * datagram which needs to be propagated to sleeping would-be writers
328 * since these might not have sent anything so far. This can't be
329 * accomplished via poll_wait because the lifetime of the server
330 * socket might be less than that of its clients if these break their
331 * association with it or if the server socket is closed while clients
332 * are still connected to it and there's no way to inform "a polling
333 * implementation" that it should let go of a certain wait queue
334 *
335 * In order to propagate a wake up, a wait_queue_t of the client
336 * socket is enqueued on the peer_wait queue of the server socket
337 * whose wake function does a wake_up on the ordinary client socket
338 * wait queue. This connection is established whenever a write (or
339 * poll for write) hit the flow control condition and broken when the
340 * association to the server socket is dissolved or after a wake up
341 * was relayed.
342 */
343
344static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
345 void *key)
346{
347 struct unix_sock *u;
348 wait_queue_head_t *u_sleep;
349
350 u = container_of(q, struct unix_sock, peer_wake);
351
352 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
353 q);
354 u->peer_wake.private = NULL;
355
356 /* relaying can only happen while the wq still exists */
357 u_sleep = sk_sleep(&u->sk);
358 if (u_sleep)
359 wake_up_interruptible_poll(u_sleep, key);
360
361 return 0;
362}
363
364static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
365{
366 struct unix_sock *u, *u_other;
367 int rc;
368
369 u = unix_sk(sk);
370 u_other = unix_sk(other);
371 rc = 0;
372 spin_lock(&u_other->peer_wait.lock);
373
374 if (!u->peer_wake.private) {
375 u->peer_wake.private = other;
376 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
377
378 rc = 1;
379 }
380
381 spin_unlock(&u_other->peer_wait.lock);
382 return rc;
383}
384
385static void unix_dgram_peer_wake_disconnect(struct sock *sk,
386 struct sock *other)
387{
388 struct unix_sock *u, *u_other;
389
390 u = unix_sk(sk);
391 u_other = unix_sk(other);
392 spin_lock(&u_other->peer_wait.lock);
393
394 if (u->peer_wake.private == other) {
395 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
396 u->peer_wake.private = NULL;
397 }
398
399 spin_unlock(&u_other->peer_wait.lock);
400}
401
402static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
403 struct sock *other)
404{
405 unix_dgram_peer_wake_disconnect(sk, other);
406 wake_up_interruptible_poll(sk_sleep(sk),
407 POLLOUT |
408 POLLWRNORM |
409 POLLWRBAND);
410}
411
412/* preconditions:
413 * - unix_peer(sk) == other
414 * - association is stable
415 */
416static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
417{
418 int connected;
419
420 connected = unix_dgram_peer_wake_connect(sk, other);
421
422 if (unix_recvq_full(other))
423 return 1;
424
425 if (connected)
426 unix_dgram_peer_wake_disconnect(sk, other);
427
428 return 0;
429}
430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431static inline int unix_writable(struct sock *sk)
432{
433 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
434}
435
436static void unix_write_space(struct sock *sk)
437{
Eric Dumazet43815482010-04-29 11:01:49 +0000438 struct socket_wq *wq;
439
440 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 if (unix_writable(sk)) {
Eric Dumazet43815482010-04-29 11:01:49 +0000442 wq = rcu_dereference(sk->sk_wq);
443 if (wq_has_sleeper(wq))
Eric Dumazet67426b72010-10-29 20:44:44 +0000444 wake_up_interruptible_sync_poll(&wq->wait,
445 POLLOUT | POLLWRNORM | POLLWRBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +0800446 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 }
Eric Dumazet43815482010-04-29 11:01:49 +0000448 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449}
450
451/* When dgram socket disconnects (or changes its peer), we clear its receive
452 * queue of packets arrived from previous peer. First, it allows to do
453 * flow control based only on wmem_alloc; second, sk connected to peer
454 * may receive messages only from that peer. */
455static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
456{
David S. Millerb03efcf2005-07-08 14:57:23 -0700457 if (!skb_queue_empty(&sk->sk_receive_queue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 skb_queue_purge(&sk->sk_receive_queue);
459 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
460
461 /* If one link of bidirectional dgram pipe is disconnected,
462 * we signal error. Messages are lost. Do not make this,
463 * when peer was not connected to us.
464 */
465 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
466 other->sk_err = ECONNRESET;
467 other->sk_error_report(other);
468 }
469 }
470}
471
472static void unix_sock_destructor(struct sock *sk)
473{
474 struct unix_sock *u = unix_sk(sk);
475
476 skb_queue_purge(&sk->sk_receive_queue);
477
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700478 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
479 WARN_ON(!sk_unhashed(sk));
480 WARN_ON(sk->sk_socket);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 if (!sock_flag(sk, SOCK_DEAD)) {
wangweidong5cc208b2013-12-06 18:03:36 +0800482 pr_info("Attempt to release alive unix socket: %p\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 return;
484 }
485
486 if (u->addr)
487 unix_release_addr(u->addr);
488
Eric Dumazet518de9b2010-10-26 14:22:44 -0700489 atomic_long_dec(&unix_nr_socks);
David S. Miller6f756a82008-11-23 17:34:03 -0800490 local_bh_disable();
Eric Dumazeta8076d82008-11-17 02:38:49 -0800491 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
David S. Miller6f756a82008-11-23 17:34:03 -0800492 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493#ifdef UNIX_REFCNT_DEBUG
wangweidong5cc208b2013-12-06 18:03:36 +0800494 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
Eric Dumazet518de9b2010-10-26 14:22:44 -0700495 atomic_long_read(&unix_nr_socks));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496#endif
497}
498
Paul Mooreded34e02013-03-25 03:18:33 +0000499static void unix_release_sock(struct sock *sk, int embrion)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500{
501 struct unix_sock *u = unix_sk(sk);
Al Viro40ffe672012-03-14 21:54:32 -0400502 struct path path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 struct sock *skpair;
504 struct sk_buff *skb;
505 int state;
506
507 unix_remove_socket(sk);
508
509 /* Clear state */
David S. Miller1c92b4e2007-05-31 13:24:26 -0700510 unix_state_lock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 sock_orphan(sk);
512 sk->sk_shutdown = SHUTDOWN_MASK;
Al Viro40ffe672012-03-14 21:54:32 -0400513 path = u->path;
514 u->path.dentry = NULL;
515 u->path.mnt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 state = sk->sk_state;
517 sk->sk_state = TCP_CLOSE;
David S. Miller1c92b4e2007-05-31 13:24:26 -0700518 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
520 wake_up_interruptible_all(&u->peer_wait);
521
Jianjun Konge27dfce2008-11-01 21:38:31 -0700522 skpair = unix_peer(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
Jianjun Konge27dfce2008-11-01 21:38:31 -0700524 if (skpair != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
David S. Miller1c92b4e2007-05-31 13:24:26 -0700526 unix_state_lock(skpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 /* No more writes */
528 skpair->sk_shutdown = SHUTDOWN_MASK;
529 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
530 skpair->sk_err = ECONNRESET;
David S. Miller1c92b4e2007-05-31 13:24:26 -0700531 unix_state_unlock(skpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 skpair->sk_state_change(skpair);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +0800533 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 }
Rainer Weikusat5c77e262015-11-20 22:07:23 +0000535
536 unix_dgram_peer_wake_disconnect(sk, skpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 sock_put(skpair); /* It may now die */
538 unix_peer(sk) = NULL;
539 }
540
541 /* Try to flush out this socket. Throw out buffers at least */
542
543 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
Jianjun Konge27dfce2008-11-01 21:38:31 -0700544 if (state == TCP_LISTEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 unix_release_sock(skb->sk, 1);
546 /* passed fds are erased in the kfree_skb hook */
547 kfree_skb(skb);
548 }
549
Al Viro40ffe672012-03-14 21:54:32 -0400550 if (path.dentry)
551 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552
553 sock_put(sk);
554
555 /* ---- Socket is dead now and most probably destroyed ---- */
556
557 /*
Alan Coxe04dae82012-09-17 00:52:41 +0000558 * Fixme: BSD difference: In BSD all sockets connected to us get
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 * ECONNRESET and we die on the spot. In Linux we behave
560 * like files and pipes do and wait for the last
561 * dereference.
562 *
563 * Can't we simply set sock->err?
564 *
565 * What the above comment does talk about? --ANK(980817)
566 */
567
Pavel Emelyanov9305cfa2007-11-10 22:06:01 -0800568 if (unix_tot_inflight)
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +0900569 unix_gc(); /* Garbage collect fds */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570}
571
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000572static void init_peercred(struct sock *sk)
573{
574 put_pid(sk->sk_peer_pid);
575 if (sk->sk_peer_cred)
576 put_cred(sk->sk_peer_cred);
577 sk->sk_peer_pid = get_pid(task_tgid(current));
578 sk->sk_peer_cred = get_current_cred();
579}
580
581static void copy_peercred(struct sock *sk, struct sock *peersk)
582{
583 put_pid(sk->sk_peer_pid);
584 if (sk->sk_peer_cred)
585 put_cred(sk->sk_peer_cred);
586 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
587 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
588}
589
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590static int unix_listen(struct socket *sock, int backlog)
591{
592 int err;
593 struct sock *sk = sock->sk;
594 struct unix_sock *u = unix_sk(sk);
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000595 struct pid *old_pid = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
597 err = -EOPNOTSUPP;
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800598 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
599 goto out; /* Only stream/seqpacket sockets accept */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 err = -EINVAL;
601 if (!u->addr)
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800602 goto out; /* No listens on an unbound socket */
David S. Miller1c92b4e2007-05-31 13:24:26 -0700603 unix_state_lock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
605 goto out_unlock;
606 if (backlog > sk->sk_max_ack_backlog)
607 wake_up_interruptible_all(&u->peer_wait);
608 sk->sk_max_ack_backlog = backlog;
609 sk->sk_state = TCP_LISTEN;
610 /* set credentials so connect can copy them */
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000611 init_peercred(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 err = 0;
613
614out_unlock:
David S. Miller1c92b4e2007-05-31 13:24:26 -0700615 unix_state_unlock(sk);
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000616 put_pid(old_pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617out:
618 return err;
619}
620
621static int unix_release(struct socket *);
622static int unix_bind(struct socket *, struct sockaddr *, int);
623static int unix_stream_connect(struct socket *, struct sockaddr *,
624 int addr_len, int flags);
625static int unix_socketpair(struct socket *, struct socket *);
626static int unix_accept(struct socket *, struct socket *, int);
627static int unix_getname(struct socket *, struct sockaddr *, int *, int);
628static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
Rainer Weikusatec0d2152008-06-27 19:34:18 -0700629static unsigned int unix_dgram_poll(struct file *, struct socket *,
630 poll_table *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631static int unix_ioctl(struct socket *, unsigned int, unsigned long);
632static int unix_shutdown(struct socket *, int);
Ying Xue1b784142015-03-02 15:37:48 +0800633static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
634static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
635static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
636static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637static int unix_dgram_connect(struct socket *, struct sockaddr *,
638 int, int);
Ying Xue1b784142015-03-02 15:37:48 +0800639static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
640static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
641 int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642
Sasha Levin12663bf2013-12-07 17:26:27 -0500643static int unix_set_peek_off(struct sock *sk, int val)
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000644{
645 struct unix_sock *u = unix_sk(sk);
646
Sasha Levin12663bf2013-12-07 17:26:27 -0500647 if (mutex_lock_interruptible(&u->readlock))
648 return -EINTR;
649
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000650 sk->sk_peek_off = val;
651 mutex_unlock(&u->readlock);
Sasha Levin12663bf2013-12-07 17:26:27 -0500652
653 return 0;
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000654}
655
656
Eric Dumazet90ddc4f2005-12-22 12:49:22 -0800657static const struct proto_ops unix_stream_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 .family = PF_UNIX,
659 .owner = THIS_MODULE,
660 .release = unix_release,
661 .bind = unix_bind,
662 .connect = unix_stream_connect,
663 .socketpair = unix_socketpair,
664 .accept = unix_accept,
665 .getname = unix_getname,
666 .poll = unix_poll,
667 .ioctl = unix_ioctl,
668 .listen = unix_listen,
669 .shutdown = unix_shutdown,
670 .setsockopt = sock_no_setsockopt,
671 .getsockopt = sock_no_getsockopt,
672 .sendmsg = unix_stream_sendmsg,
673 .recvmsg = unix_stream_recvmsg,
674 .mmap = sock_no_mmap,
675 .sendpage = sock_no_sendpage,
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +0000676 .set_peek_off = unix_set_peek_off,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677};
678
Eric Dumazet90ddc4f2005-12-22 12:49:22 -0800679static const struct proto_ops unix_dgram_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 .family = PF_UNIX,
681 .owner = THIS_MODULE,
682 .release = unix_release,
683 .bind = unix_bind,
684 .connect = unix_dgram_connect,
685 .socketpair = unix_socketpair,
686 .accept = sock_no_accept,
687 .getname = unix_getname,
Rainer Weikusatec0d2152008-06-27 19:34:18 -0700688 .poll = unix_dgram_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 .ioctl = unix_ioctl,
690 .listen = sock_no_listen,
691 .shutdown = unix_shutdown,
692 .setsockopt = sock_no_setsockopt,
693 .getsockopt = sock_no_getsockopt,
694 .sendmsg = unix_dgram_sendmsg,
695 .recvmsg = unix_dgram_recvmsg,
696 .mmap = sock_no_mmap,
697 .sendpage = sock_no_sendpage,
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000698 .set_peek_off = unix_set_peek_off,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699};
700
Eric Dumazet90ddc4f2005-12-22 12:49:22 -0800701static const struct proto_ops unix_seqpacket_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 .family = PF_UNIX,
703 .owner = THIS_MODULE,
704 .release = unix_release,
705 .bind = unix_bind,
706 .connect = unix_stream_connect,
707 .socketpair = unix_socketpair,
708 .accept = unix_accept,
709 .getname = unix_getname,
Rainer Weikusatec0d2152008-06-27 19:34:18 -0700710 .poll = unix_dgram_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 .ioctl = unix_ioctl,
712 .listen = unix_listen,
713 .shutdown = unix_shutdown,
714 .setsockopt = sock_no_setsockopt,
715 .getsockopt = sock_no_getsockopt,
716 .sendmsg = unix_seqpacket_sendmsg,
Eric W. Biedermana05d2ad2011-04-24 01:54:57 +0000717 .recvmsg = unix_seqpacket_recvmsg,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 .mmap = sock_no_mmap,
719 .sendpage = sock_no_sendpage,
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000720 .set_peek_off = unix_set_peek_off,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721};
722
723static struct proto unix_proto = {
Eric Dumazet248969a2008-11-17 00:00:30 -0800724 .name = "UNIX",
725 .owner = THIS_MODULE,
Eric Dumazet248969a2008-11-17 00:00:30 -0800726 .obj_size = sizeof(struct unix_sock),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727};
728
Ingo Molnara09785a2006-07-03 00:25:12 -0700729/*
730 * AF_UNIX sockets do not interact with hardware, hence they
731 * dont trigger interrupts - so it's safe for them to have
732 * bh-unsafe locking for their sk_receive_queue.lock. Split off
733 * this special lock-class by reinitializing the spinlock key:
734 */
735static struct lock_class_key af_unix_sk_receive_queue_lock_key;
736
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800737static struct sock *unix_create1(struct net *net, struct socket *sock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738{
739 struct sock *sk = NULL;
740 struct unix_sock *u;
741
Eric Dumazet518de9b2010-10-26 14:22:44 -0700742 atomic_long_inc(&unix_nr_socks);
743 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 goto out;
745
Pavel Emelyanov6257ff22007-11-01 00:39:31 -0700746 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 if (!sk)
748 goto out;
749
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800750 sock_init_data(sock, sk);
Ingo Molnara09785a2006-07-03 00:25:12 -0700751 lockdep_set_class(&sk->sk_receive_queue.lock,
752 &af_unix_sk_receive_queue_lock_key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
754 sk->sk_write_space = unix_write_space;
Denis V. Luneva0a53c82007-12-11 04:19:17 -0800755 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 sk->sk_destruct = unix_sock_destructor;
757 u = unix_sk(sk);
Al Viro40ffe672012-03-14 21:54:32 -0400758 u->path.dentry = NULL;
759 u->path.mnt = NULL;
Benjamin LaHaisefd19f322006-01-03 14:10:46 -0800760 spin_lock_init(&u->lock);
Al Viro516e0cc2008-07-26 00:39:17 -0400761 atomic_long_set(&u->inflight, 0);
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700762 INIT_LIST_HEAD(&u->link);
Ingo Molnar57b47a52006-03-20 22:35:41 -0800763 mutex_init(&u->readlock); /* single task reading lock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 init_waitqueue_head(&u->peer_wait);
Rainer Weikusat5c77e262015-11-20 22:07:23 +0000765 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
Eric Dumazet7123aaa2012-06-08 05:03:21 +0000766 unix_insert_socket(unix_sockets_unbound(sk), sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767out:
Pavel Emelyanov284b3272007-11-10 22:08:30 -0800768 if (sk == NULL)
Eric Dumazet518de9b2010-10-26 14:22:44 -0700769 atomic_long_dec(&unix_nr_socks);
Eric Dumazet920de802008-11-24 00:09:29 -0800770 else {
771 local_bh_disable();
Eric Dumazeta8076d82008-11-17 02:38:49 -0800772 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
Eric Dumazet920de802008-11-24 00:09:29 -0800773 local_bh_enable();
774 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 return sk;
776}
777
Eric Paris3f378b62009-11-05 22:18:14 -0800778static int unix_create(struct net *net, struct socket *sock, int protocol,
779 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780{
781 if (protocol && protocol != PF_UNIX)
782 return -EPROTONOSUPPORT;
783
784 sock->state = SS_UNCONNECTED;
785
786 switch (sock->type) {
787 case SOCK_STREAM:
788 sock->ops = &unix_stream_ops;
789 break;
790 /*
791 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
792 * nothing uses it.
793 */
794 case SOCK_RAW:
Jianjun Konge27dfce2008-11-01 21:38:31 -0700795 sock->type = SOCK_DGRAM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 case SOCK_DGRAM:
797 sock->ops = &unix_dgram_ops;
798 break;
799 case SOCK_SEQPACKET:
800 sock->ops = &unix_seqpacket_ops;
801 break;
802 default:
803 return -ESOCKTNOSUPPORT;
804 }
805
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -0700806 return unix_create1(net, sock) ? 0 : -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807}
808
809static int unix_release(struct socket *sock)
810{
811 struct sock *sk = sock->sk;
812
813 if (!sk)
814 return 0;
815
Paul Mooreded34e02013-03-25 03:18:33 +0000816 unix_release_sock(sk, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 sock->sk = NULL;
818
Paul Mooreded34e02013-03-25 03:18:33 +0000819 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820}
821
822static int unix_autobind(struct socket *sock)
823{
824 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900825 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 struct unix_sock *u = unix_sk(sk);
827 static u32 ordernum = 1;
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800828 struct unix_address *addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 int err;
Tetsuo Handa8df73ff2010-09-04 01:34:28 +0000830 unsigned int retries = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831
Sasha Levin37ab4fa2013-12-13 10:54:22 -0500832 err = mutex_lock_interruptible(&u->readlock);
833 if (err)
834 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835
836 err = 0;
837 if (u->addr)
838 goto out;
839
840 err = -ENOMEM;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700841 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 if (!addr)
843 goto out;
844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 addr->name->sun_family = AF_UNIX;
846 atomic_set(&addr->refcnt, 1);
847
848retry:
849 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
Joe Perches07f07572008-11-19 15:44:53 -0800850 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
David S. Millerfbe9cc42005-12-13 23:26:29 -0800852 spin_lock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 ordernum = (ordernum+1)&0xFFFFF;
854
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800855 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 addr->hash)) {
David S. Millerfbe9cc42005-12-13 23:26:29 -0800857 spin_unlock(&unix_table_lock);
Tetsuo Handa8df73ff2010-09-04 01:34:28 +0000858 /*
859 * __unix_find_socket_byname() may take long time if many names
860 * are already in use.
861 */
862 cond_resched();
863 /* Give up if all names seems to be in use. */
864 if (retries++ == 0xFFFFF) {
865 err = -ENOSPC;
866 kfree(addr);
867 goto out;
868 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 goto retry;
870 }
871 addr->hash ^= sk->sk_type;
872
873 __unix_remove_socket(sk);
874 u->addr = addr;
875 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
David S. Millerfbe9cc42005-12-13 23:26:29 -0800876 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 err = 0;
878
Ingo Molnar57b47a52006-03-20 22:35:41 -0800879out: mutex_unlock(&u->readlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 return err;
881}
882
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800883static struct sock *unix_find_other(struct net *net,
884 struct sockaddr_un *sunname, int len,
Eric Dumazet95c96172012-04-15 05:58:06 +0000885 int type, unsigned int hash, int *error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886{
887 struct sock *u;
Al Viro421748e2008-08-02 01:04:36 -0400888 struct path path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 int err = 0;
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +0900890
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 if (sunname->sun_path[0]) {
Al Viro421748e2008-08-02 01:04:36 -0400892 struct inode *inode;
893 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 if (err)
895 goto fail;
David Howellsa25b3762015-03-17 22:26:21 +0000896 inode = d_backing_inode(path.dentry);
Al Viro421748e2008-08-02 01:04:36 -0400897 err = inode_permission(inode, MAY_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 if (err)
899 goto put_fail;
900
901 err = -ECONNREFUSED;
Al Viro421748e2008-08-02 01:04:36 -0400902 if (!S_ISSOCK(inode->i_mode))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 goto put_fail;
Eric W. Biederman6616f782010-06-13 03:35:48 +0000904 u = unix_find_socket_byinode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 if (!u)
906 goto put_fail;
907
908 if (u->sk_type == type)
Al Viro68ac1232012-03-15 08:21:57 -0400909 touch_atime(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910
Al Viro421748e2008-08-02 01:04:36 -0400911 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912
Jianjun Konge27dfce2008-11-01 21:38:31 -0700913 err = -EPROTOTYPE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 if (u->sk_type != type) {
915 sock_put(u);
916 goto fail;
917 }
918 } else {
919 err = -ECONNREFUSED;
Jianjun Konge27dfce2008-11-01 21:38:31 -0700920 u = unix_find_socket_byname(net, sunname, len, type, hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 if (u) {
922 struct dentry *dentry;
Al Viro40ffe672012-03-14 21:54:32 -0400923 dentry = unix_sk(u)->path.dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 if (dentry)
Al Viro68ac1232012-03-15 08:21:57 -0400925 touch_atime(&unix_sk(u)->path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 } else
927 goto fail;
928 }
929 return u;
930
931put_fail:
Al Viro421748e2008-08-02 01:04:36 -0400932 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933fail:
Jianjun Konge27dfce2008-11-01 21:38:31 -0700934 *error = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 return NULL;
936}
937
Al Virofaf02012012-07-20 02:37:29 +0400938static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
939{
940 struct dentry *dentry;
941 struct path path;
942 int err = 0;
943 /*
944 * Get the parent directory, calculate the hash for last
945 * component.
946 */
947 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
948 err = PTR_ERR(dentry);
949 if (IS_ERR(dentry))
950 return err;
951
952 /*
953 * All right, let's create it.
954 */
955 err = security_path_mknod(&path, dentry, mode, 0);
956 if (!err) {
David Howellsee8ac4d2015-03-06 14:05:26 +0000957 err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
Al Virofaf02012012-07-20 02:37:29 +0400958 if (!err) {
959 res->mnt = mntget(path.mnt);
960 res->dentry = dget(dentry);
961 }
962 }
963 done_path_create(&path, dentry);
964 return err;
965}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966
967static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
968{
969 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900970 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 struct unix_sock *u = unix_sk(sk);
Jianjun Konge27dfce2008-11-01 21:38:31 -0700972 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
Al Virodae6ad82011-06-26 11:50:15 -0400973 char *sun_path = sunaddr->sun_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 int err;
Eric Dumazet95c96172012-04-15 05:58:06 +0000975 unsigned int hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 struct unix_address *addr;
977 struct hlist_head *list;
978
979 err = -EINVAL;
980 if (sunaddr->sun_family != AF_UNIX)
981 goto out;
982
Jianjun Konge27dfce2008-11-01 21:38:31 -0700983 if (addr_len == sizeof(short)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 err = unix_autobind(sock);
985 goto out;
986 }
987
988 err = unix_mkname(sunaddr, addr_len, &hash);
989 if (err < 0)
990 goto out;
991 addr_len = err;
992
Sasha Levin37ab4fa2013-12-13 10:54:22 -0500993 err = mutex_lock_interruptible(&u->readlock);
994 if (err)
995 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
997 err = -EINVAL;
998 if (u->addr)
999 goto out_up;
1000
1001 err = -ENOMEM;
1002 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
1003 if (!addr)
1004 goto out_up;
1005
1006 memcpy(addr->name, sunaddr, addr_len);
1007 addr->len = addr_len;
1008 addr->hash = hash ^ sk->sk_type;
1009 atomic_set(&addr->refcnt, 1);
1010
Al Virodae6ad82011-06-26 11:50:15 -04001011 if (sun_path[0]) {
Al Virofaf02012012-07-20 02:37:29 +04001012 struct path path;
1013 umode_t mode = S_IFSOCK |
Al Viroce3b0f82009-03-29 19:08:22 -04001014 (SOCK_INODE(sock)->i_mode & ~current_umask());
Al Virofaf02012012-07-20 02:37:29 +04001015 err = unix_mknod(sun_path, mode, &path);
1016 if (err) {
1017 if (err == -EEXIST)
1018 err = -EADDRINUSE;
1019 unix_release_addr(addr);
1020 goto out_up;
1021 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 addr->hash = UNIX_HASH_SIZE;
David Howellsa25b3762015-03-17 22:26:21 +00001023 hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE-1);
Al Virofaf02012012-07-20 02:37:29 +04001024 spin_lock(&unix_table_lock);
1025 u->path = path;
1026 list = &unix_socket_table[hash];
1027 } else {
1028 spin_lock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 err = -EADDRINUSE;
Denis V. Lunev097e66c2007-11-19 22:29:30 -08001030 if (__unix_find_socket_byname(net, sunaddr, addr_len,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 sk->sk_type, hash)) {
1032 unix_release_addr(addr);
1033 goto out_unlock;
1034 }
1035
1036 list = &unix_socket_table[addr->hash];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 }
1038
1039 err = 0;
1040 __unix_remove_socket(sk);
1041 u->addr = addr;
1042 __unix_insert_socket(list, sk);
1043
1044out_unlock:
David S. Millerfbe9cc42005-12-13 23:26:29 -08001045 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046out_up:
Ingo Molnar57b47a52006-03-20 22:35:41 -08001047 mutex_unlock(&u->readlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048out:
1049 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050}
1051
David S. Miller278a3de2007-05-31 15:19:20 -07001052static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1053{
1054 if (unlikely(sk1 == sk2) || !sk2) {
1055 unix_state_lock(sk1);
1056 return;
1057 }
1058 if (sk1 < sk2) {
1059 unix_state_lock(sk1);
1060 unix_state_lock_nested(sk2);
1061 } else {
1062 unix_state_lock(sk2);
1063 unix_state_lock_nested(sk1);
1064 }
1065}
1066
1067static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1068{
1069 if (unlikely(sk1 == sk2) || !sk2) {
1070 unix_state_unlock(sk1);
1071 return;
1072 }
1073 unix_state_unlock(sk1);
1074 unix_state_unlock(sk2);
1075}
1076
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1078 int alen, int flags)
1079{
1080 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001081 struct net *net = sock_net(sk);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001082 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 struct sock *other;
Eric Dumazet95c96172012-04-15 05:58:06 +00001084 unsigned int hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 int err;
1086
1087 if (addr->sa_family != AF_UNSPEC) {
1088 err = unix_mkname(sunaddr, alen, &hash);
1089 if (err < 0)
1090 goto out;
1091 alen = err;
1092
1093 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
1094 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
1095 goto out;
1096
David S. Miller278a3de2007-05-31 15:19:20 -07001097restart:
Jianjun Konge27dfce2008-11-01 21:38:31 -07001098 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 if (!other)
1100 goto out;
1101
David S. Miller278a3de2007-05-31 15:19:20 -07001102 unix_state_double_lock(sk, other);
1103
1104 /* Apparently VFS overslept socket death. Retry. */
1105 if (sock_flag(other, SOCK_DEAD)) {
1106 unix_state_double_unlock(sk, other);
1107 sock_put(other);
1108 goto restart;
1109 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
1111 err = -EPERM;
1112 if (!unix_may_send(sk, other))
1113 goto out_unlock;
1114
1115 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1116 if (err)
1117 goto out_unlock;
1118
1119 } else {
1120 /*
1121 * 1003.1g breaking connected state with AF_UNSPEC
1122 */
1123 other = NULL;
David S. Miller278a3de2007-05-31 15:19:20 -07001124 unix_state_double_lock(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 }
1126
1127 /*
1128 * If it was connected, reconnect.
1129 */
1130 if (unix_peer(sk)) {
1131 struct sock *old_peer = unix_peer(sk);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001132 unix_peer(sk) = other;
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001133 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1134
David S. Miller278a3de2007-05-31 15:19:20 -07001135 unix_state_double_unlock(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136
1137 if (other != old_peer)
1138 unix_dgram_disconnected(sk, old_peer);
1139 sock_put(old_peer);
1140 } else {
Jianjun Konge27dfce2008-11-01 21:38:31 -07001141 unix_peer(sk) = other;
David S. Miller278a3de2007-05-31 15:19:20 -07001142 unix_state_double_unlock(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 }
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001144 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
1146out_unlock:
David S. Miller278a3de2007-05-31 15:19:20 -07001147 unix_state_double_unlock(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 sock_put(other);
1149out:
1150 return err;
1151}
1152
1153static long unix_wait_for_peer(struct sock *other, long timeo)
1154{
1155 struct unix_sock *u = unix_sk(other);
1156 int sched;
1157 DEFINE_WAIT(wait);
1158
1159 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1160
1161 sched = !sock_flag(other, SOCK_DEAD) &&
1162 !(other->sk_shutdown & RCV_SHUTDOWN) &&
Rainer Weikusat3c734192008-06-17 22:28:05 -07001163 unix_recvq_full(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
David S. Miller1c92b4e2007-05-31 13:24:26 -07001165 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166
1167 if (sched)
1168 timeo = schedule_timeout(timeo);
1169
1170 finish_wait(&u->peer_wait, &wait);
1171 return timeo;
1172}
1173
1174static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1175 int addr_len, int flags)
1176{
Jianjun Konge27dfce2008-11-01 21:38:31 -07001177 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001179 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1181 struct sock *newsk = NULL;
1182 struct sock *other = NULL;
1183 struct sk_buff *skb = NULL;
Eric Dumazet95c96172012-04-15 05:58:06 +00001184 unsigned int hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 int st;
1186 int err;
1187 long timeo;
1188
1189 err = unix_mkname(sunaddr, addr_len, &hash);
1190 if (err < 0)
1191 goto out;
1192 addr_len = err;
1193
Joe Perchesf64f9e72009-11-29 16:55:45 -08001194 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1195 (err = unix_autobind(sock)) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 goto out;
1197
1198 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1199
1200 /* First of all allocate resources.
1201 If we will make it after state is locked,
1202 we will have to recheck all again in any case.
1203 */
1204
1205 err = -ENOMEM;
1206
1207 /* create new sock for complete connection */
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001208 newsk = unix_create1(sock_net(sk), NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 if (newsk == NULL)
1210 goto out;
1211
1212 /* Allocate skb for sending to listening sock */
1213 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1214 if (skb == NULL)
1215 goto out;
1216
1217restart:
1218 /* Find listening sock. */
Denis V. Lunev097e66c2007-11-19 22:29:30 -08001219 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 if (!other)
1221 goto out;
1222
1223 /* Latch state of peer */
David S. Miller1c92b4e2007-05-31 13:24:26 -07001224 unix_state_lock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225
1226 /* Apparently VFS overslept socket death. Retry. */
1227 if (sock_flag(other, SOCK_DEAD)) {
David S. Miller1c92b4e2007-05-31 13:24:26 -07001228 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 sock_put(other);
1230 goto restart;
1231 }
1232
1233 err = -ECONNREFUSED;
1234 if (other->sk_state != TCP_LISTEN)
1235 goto out_unlock;
Tomoki Sekiyama77238f22009-10-18 23:17:37 -07001236 if (other->sk_shutdown & RCV_SHUTDOWN)
1237 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238
Rainer Weikusat3c734192008-06-17 22:28:05 -07001239 if (unix_recvq_full(other)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 err = -EAGAIN;
1241 if (!timeo)
1242 goto out_unlock;
1243
1244 timeo = unix_wait_for_peer(other, timeo);
1245
1246 err = sock_intr_errno(timeo);
1247 if (signal_pending(current))
1248 goto out;
1249 sock_put(other);
1250 goto restart;
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001251 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
1253 /* Latch our state.
1254
Daniel Balutae5537bf2011-03-14 15:25:33 -07001255 It is tricky place. We need to grab our state lock and cannot
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 drop lock on peer. It is dangerous because deadlock is
1257 possible. Connect to self case and simultaneous
1258 attempt to connect are eliminated by checking socket
1259 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1260 check this before attempt to grab lock.
1261
1262 Well, and we have to recheck the state after socket locked.
1263 */
1264 st = sk->sk_state;
1265
1266 switch (st) {
1267 case TCP_CLOSE:
1268 /* This is ok... continue with connect */
1269 break;
1270 case TCP_ESTABLISHED:
1271 /* Socket is already connected */
1272 err = -EISCONN;
1273 goto out_unlock;
1274 default:
1275 err = -EINVAL;
1276 goto out_unlock;
1277 }
1278
David S. Miller1c92b4e2007-05-31 13:24:26 -07001279 unix_state_lock_nested(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280
1281 if (sk->sk_state != st) {
David S. Miller1c92b4e2007-05-31 13:24:26 -07001282 unix_state_unlock(sk);
1283 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 sock_put(other);
1285 goto restart;
1286 }
1287
David S. Miller3610cda2011-01-05 15:38:53 -08001288 err = security_unix_stream_connect(sk, other, newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 if (err) {
David S. Miller1c92b4e2007-05-31 13:24:26 -07001290 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 goto out_unlock;
1292 }
1293
1294 /* The way is open! Fastly set all the necessary fields... */
1295
1296 sock_hold(sk);
1297 unix_peer(newsk) = sk;
1298 newsk->sk_state = TCP_ESTABLISHED;
1299 newsk->sk_type = sk->sk_type;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001300 init_peercred(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 newu = unix_sk(newsk);
Eric Dumazeteaefd112011-02-18 03:26:36 +00001302 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 otheru = unix_sk(other);
1304
1305 /* copy address information from listening to new sock*/
1306 if (otheru->addr) {
1307 atomic_inc(&otheru->addr->refcnt);
1308 newu->addr = otheru->addr;
1309 }
Al Viro40ffe672012-03-14 21:54:32 -04001310 if (otheru->path.dentry) {
1311 path_get(&otheru->path);
1312 newu->path = otheru->path;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 }
1314
1315 /* Set credentials */
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001316 copy_peercred(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 sock->state = SS_CONNECTED;
1319 sk->sk_state = TCP_ESTABLISHED;
Benjamin LaHaise830a1e52005-12-13 23:22:32 -08001320 sock_hold(newsk);
1321
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001322 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
Benjamin LaHaise830a1e52005-12-13 23:22:32 -08001323 unix_peer(sk) = newsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324
David S. Miller1c92b4e2007-05-31 13:24:26 -07001325 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326
1327 /* take ten and and send info to listening sock */
1328 spin_lock(&other->sk_receive_queue.lock);
1329 __skb_queue_tail(&other->sk_receive_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 spin_unlock(&other->sk_receive_queue.lock);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001331 unix_state_unlock(other);
David S. Miller676d2362014-04-11 16:15:36 -04001332 other->sk_data_ready(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 sock_put(other);
1334 return 0;
1335
1336out_unlock:
1337 if (other)
David S. Miller1c92b4e2007-05-31 13:24:26 -07001338 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339
1340out:
Wei Yongjun40d44442009-02-25 00:32:45 +00001341 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 if (newsk)
1343 unix_release_sock(newsk, 0);
1344 if (other)
1345 sock_put(other);
1346 return err;
1347}
1348
1349static int unix_socketpair(struct socket *socka, struct socket *sockb)
1350{
Jianjun Konge27dfce2008-11-01 21:38:31 -07001351 struct sock *ska = socka->sk, *skb = sockb->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352
1353 /* Join our sockets back to back */
1354 sock_hold(ska);
1355 sock_hold(skb);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001356 unix_peer(ska) = skb;
1357 unix_peer(skb) = ska;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001358 init_peercred(ska);
1359 init_peercred(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
1361 if (ska->sk_type != SOCK_DGRAM) {
1362 ska->sk_state = TCP_ESTABLISHED;
1363 skb->sk_state = TCP_ESTABLISHED;
1364 socka->state = SS_CONNECTED;
1365 sockb->state = SS_CONNECTED;
1366 }
1367 return 0;
1368}
1369
Daniel Borkmann90c6bd32013-10-17 22:51:31 +02001370static void unix_sock_inherit_flags(const struct socket *old,
1371 struct socket *new)
1372{
1373 if (test_bit(SOCK_PASSCRED, &old->flags))
1374 set_bit(SOCK_PASSCRED, &new->flags);
1375 if (test_bit(SOCK_PASSSEC, &old->flags))
1376 set_bit(SOCK_PASSSEC, &new->flags);
1377}
1378
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1380{
1381 struct sock *sk = sock->sk;
1382 struct sock *tsk;
1383 struct sk_buff *skb;
1384 int err;
1385
1386 err = -EOPNOTSUPP;
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001387 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 goto out;
1389
1390 err = -EINVAL;
1391 if (sk->sk_state != TCP_LISTEN)
1392 goto out;
1393
1394 /* If socket state is TCP_LISTEN it cannot change (for now...),
1395 * so that no locks are necessary.
1396 */
1397
1398 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1399 if (!skb) {
1400 /* This means receive shutdown. */
1401 if (err == 0)
1402 err = -EINVAL;
1403 goto out;
1404 }
1405
1406 tsk = skb->sk;
1407 skb_free_datagram(sk, skb);
1408 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1409
1410 /* attach accepted sock to socket */
David S. Miller1c92b4e2007-05-31 13:24:26 -07001411 unix_state_lock(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 newsock->state = SS_CONNECTED;
Daniel Borkmann90c6bd32013-10-17 22:51:31 +02001413 unix_sock_inherit_flags(sock, newsock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 sock_graft(tsk, newsock);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001415 unix_state_unlock(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 return 0;
1417
1418out:
1419 return err;
1420}
1421
1422
1423static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1424{
1425 struct sock *sk = sock->sk;
1426 struct unix_sock *u;
Cyrill Gorcunov13cfa972009-11-08 05:51:19 +00001427 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 int err = 0;
1429
1430 if (peer) {
1431 sk = unix_peer_get(sk);
1432
1433 err = -ENOTCONN;
1434 if (!sk)
1435 goto out;
1436 err = 0;
1437 } else {
1438 sock_hold(sk);
1439 }
1440
1441 u = unix_sk(sk);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001442 unix_state_lock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 if (!u->addr) {
1444 sunaddr->sun_family = AF_UNIX;
1445 sunaddr->sun_path[0] = 0;
1446 *uaddr_len = sizeof(short);
1447 } else {
1448 struct unix_address *addr = u->addr;
1449
1450 *uaddr_len = addr->len;
1451 memcpy(sunaddr, addr->name, *uaddr_len);
1452 }
David S. Miller1c92b4e2007-05-31 13:24:26 -07001453 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 sock_put(sk);
1455out:
1456 return err;
1457}
1458
1459static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1460{
1461 int i;
1462
1463 scm->fp = UNIXCB(skb).fp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 UNIXCB(skb).fp = NULL;
1465
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001466 for (i = scm->fp->count-1; i >= 0; i--)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 unix_notinflight(scm->fp->fp[i]);
1468}
1469
Eric W. Biederman7361c362010-06-13 03:34:33 +00001470static void unix_destruct_scm(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471{
1472 struct scm_cookie scm;
1473 memset(&scm, 0, sizeof(scm));
Eric W. Biederman7361c362010-06-13 03:34:33 +00001474 scm.pid = UNIXCB(skb).pid;
Eric W. Biederman7361c362010-06-13 03:34:33 +00001475 if (UNIXCB(skb).fp)
1476 unix_detach_fds(&scm, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477
1478 /* Alas, it calls VFS */
1479 /* So fscking what? fput() had been SMP-safe since the last Summer */
1480 scm_destroy(&scm);
1481 sock_wfree(skb);
1482}
1483
Eric Dumazet25888e32010-11-25 04:11:39 +00001484#define MAX_RECURSION_LEVEL 4
1485
Miklos Szeredi62093442008-11-09 15:23:57 +01001486static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487{
1488 int i;
Eric Dumazet25888e32010-11-25 04:11:39 +00001489 unsigned char max_level = 0;
1490 int unix_sock_count = 0;
1491
1492 for (i = scm->fp->count - 1; i >= 0; i--) {
1493 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1494
1495 if (sk) {
1496 unix_sock_count++;
1497 max_level = max(max_level,
1498 unix_sk(sk)->recursion_level);
1499 }
1500 }
1501 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1502 return -ETOOMANYREFS;
Miklos Szeredi62093442008-11-09 15:23:57 +01001503
1504 /*
1505 * Need to duplicate file references for the sake of garbage
1506 * collection. Otherwise a socket in the fps might become a
1507 * candidate for GC while the skb is not yet queued.
1508 */
1509 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1510 if (!UNIXCB(skb).fp)
1511 return -ENOMEM;
1512
Eric Dumazet25888e32010-11-25 04:11:39 +00001513 if (unix_sock_count) {
1514 for (i = scm->fp->count - 1; i >= 0; i--)
1515 unix_inflight(scm->fp->fp[i]);
1516 }
1517 return max_level;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518}
1519
David S. Millerf78a5fd2011-09-16 19:34:00 -04001520static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
Eric W. Biederman7361c362010-06-13 03:34:33 +00001521{
1522 int err = 0;
Eric Dumazet16e57262011-09-19 05:52:27 +00001523
David S. Millerf78a5fd2011-09-16 19:34:00 -04001524 UNIXCB(skb).pid = get_pid(scm->pid);
Eric W. Biederman6b0ee8c02013-04-03 17:28:16 +00001525 UNIXCB(skb).uid = scm->creds.uid;
1526 UNIXCB(skb).gid = scm->creds.gid;
Eric W. Biederman7361c362010-06-13 03:34:33 +00001527 UNIXCB(skb).fp = NULL;
1528 if (scm->fp && send_fds)
1529 err = unix_attach_fds(scm, skb);
1530
1531 skb->destructor = unix_destruct_scm;
1532 return err;
1533}
1534
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535/*
Eric Dumazet16e57262011-09-19 05:52:27 +00001536 * Some apps rely on write() giving SCM_CREDENTIALS
1537 * We include credentials if source or destination socket
1538 * asserted SOCK_PASSCRED.
1539 */
1540static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1541 const struct sock *other)
1542{
Eric W. Biederman6b0ee8c02013-04-03 17:28:16 +00001543 if (UNIXCB(skb).pid)
Eric Dumazet16e57262011-09-19 05:52:27 +00001544 return;
1545 if (test_bit(SOCK_PASSCRED, &sock->flags) ||
Eric W. Biederman25da0e32013-04-03 16:13:35 +00001546 !other->sk_socket ||
1547 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
Eric Dumazet16e57262011-09-19 05:52:27 +00001548 UNIXCB(skb).pid = get_pid(task_tgid(current));
David S. Miller6e0895c2013-04-22 20:32:51 -04001549 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
Eric Dumazet16e57262011-09-19 05:52:27 +00001550 }
1551}
1552
1553/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 * Send AF_UNIX data.
1555 */
1556
Ying Xue1b784142015-03-02 15:37:48 +08001557static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1558 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001561 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 struct unix_sock *u = unix_sk(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +01001563 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 struct sock *other = NULL;
1565 int namelen = 0; /* fake GCC */
1566 int err;
Eric Dumazet95c96172012-04-15 05:58:06 +00001567 unsigned int hash;
David S. Millerf78a5fd2011-09-16 19:34:00 -04001568 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 long timeo;
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001570 struct scm_cookie scm;
Eric Dumazet25888e32010-11-25 04:11:39 +00001571 int max_level;
Eric Dumazeteb6a2482012-04-03 05:28:28 +00001572 int data_len = 0;
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001573 int sk_locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574
dann frazier5f23b732008-11-26 15:32:27 -08001575 wait_for_unix_gc();
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001576 err = scm_send(sock, msg, &scm, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 if (err < 0)
1578 return err;
1579
1580 err = -EOPNOTSUPP;
1581 if (msg->msg_flags&MSG_OOB)
1582 goto out;
1583
1584 if (msg->msg_namelen) {
1585 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1586 if (err < 0)
1587 goto out;
1588 namelen = err;
1589 } else {
1590 sunaddr = NULL;
1591 err = -ENOTCONN;
1592 other = unix_peer_get(sk);
1593 if (!other)
1594 goto out;
1595 }
1596
Joe Perchesf64f9e72009-11-29 16:55:45 -08001597 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1598 && (err = unix_autobind(sock)) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 goto out;
1600
1601 err = -EMSGSIZE;
1602 if (len > sk->sk_sndbuf - 32)
1603 goto out;
1604
Kirill Tkhai31ff6aa2014-05-15 19:56:28 +04001605 if (len > SKB_MAX_ALLOC) {
Eric Dumazeteb6a2482012-04-03 05:28:28 +00001606 data_len = min_t(size_t,
1607 len - SKB_MAX_ALLOC,
1608 MAX_SKB_FRAGS * PAGE_SIZE);
Kirill Tkhai31ff6aa2014-05-15 19:56:28 +04001609 data_len = PAGE_ALIGN(data_len);
1610
1611 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1612 }
Eric Dumazeteb6a2482012-04-03 05:28:28 +00001613
1614 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
Eric Dumazet28d64272013-08-08 14:38:47 -07001615 msg->msg_flags & MSG_DONTWAIT, &err,
1616 PAGE_ALLOC_COSTLY_ORDER);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001617 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 goto out;
1619
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001620 err = unix_scm_to_skb(&scm, skb, true);
Eric Dumazet25888e32010-11-25 04:11:39 +00001621 if (err < 0)
Eric W. Biederman7361c362010-06-13 03:34:33 +00001622 goto out_free;
Eric Dumazet25888e32010-11-25 04:11:39 +00001623 max_level = err + 1;
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001624 unix_get_secdata(&scm, skb);
Catherine Zhang877ce7c2006-06-29 12:27:47 -07001625
Eric Dumazeteb6a2482012-04-03 05:28:28 +00001626 skb_put(skb, len - data_len);
1627 skb->data_len = data_len;
1628 skb->len = len;
Al Viroc0371da2014-11-24 10:42:55 -05001629 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 if (err)
1631 goto out_free;
1632
1633 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1634
1635restart:
1636 if (!other) {
1637 err = -ECONNRESET;
1638 if (sunaddr == NULL)
1639 goto out_free;
1640
Denis V. Lunev097e66c2007-11-19 22:29:30 -08001641 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 hash, &err);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001643 if (other == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 goto out_free;
1645 }
1646
Alban Crequyd6ae3ba2011-01-18 06:39:15 +00001647 if (sk_filter(other, skb) < 0) {
1648 /* Toss the packet but do not return any error to the sender */
1649 err = len;
1650 goto out_free;
1651 }
1652
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001653 sk_locked = 0;
David S. Miller1c92b4e2007-05-31 13:24:26 -07001654 unix_state_lock(other);
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001655restart_locked:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 err = -EPERM;
1657 if (!unix_may_send(sk, other))
1658 goto out_unlock;
1659
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001660 if (unlikely(sock_flag(other, SOCK_DEAD))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 /*
1662 * Check with 1003.1g - what should
1663 * datagram error
1664 */
David S. Miller1c92b4e2007-05-31 13:24:26 -07001665 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 sock_put(other);
1667
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001668 if (!sk_locked)
1669 unix_state_lock(sk);
1670
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 if (unix_peer(sk) == other) {
Jianjun Konge27dfce2008-11-01 21:38:31 -07001673 unix_peer(sk) = NULL;
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001674 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1675
David S. Miller1c92b4e2007-05-31 13:24:26 -07001676 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677
1678 unix_dgram_disconnected(sk, other);
1679 sock_put(other);
1680 err = -ECONNREFUSED;
1681 } else {
David S. Miller1c92b4e2007-05-31 13:24:26 -07001682 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 }
1684
1685 other = NULL;
1686 if (err)
1687 goto out_free;
1688 goto restart;
1689 }
1690
1691 err = -EPIPE;
1692 if (other->sk_shutdown & RCV_SHUTDOWN)
1693 goto out_unlock;
1694
1695 if (sk->sk_type != SOCK_SEQPACKET) {
1696 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1697 if (err)
1698 goto out_unlock;
1699 }
1700
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001701 if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
1702 if (timeo) {
1703 timeo = unix_wait_for_peer(other, timeo);
1704
1705 err = sock_intr_errno(timeo);
1706 if (signal_pending(current))
1707 goto out_free;
1708
1709 goto restart;
1710 }
1711
1712 if (!sk_locked) {
1713 unix_state_unlock(other);
1714 unix_state_double_lock(sk, other);
1715 }
1716
1717 if (unix_peer(sk) != other ||
1718 unix_dgram_peer_wake_me(sk, other)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 err = -EAGAIN;
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001720 sk_locked = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 goto out_unlock;
1722 }
1723
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001724 if (!sk_locked) {
1725 sk_locked = 1;
1726 goto restart_locked;
1727 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 }
1729
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001730 if (unlikely(sk_locked))
1731 unix_state_unlock(sk);
1732
Alban Crequy3f661162010-10-04 08:48:28 +00001733 if (sock_flag(other, SOCK_RCVTSTAMP))
1734 __net_timestamp(skb);
Eric Dumazet16e57262011-09-19 05:52:27 +00001735 maybe_add_creds(skb, sock, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 skb_queue_tail(&other->sk_receive_queue, skb);
Eric Dumazet25888e32010-11-25 04:11:39 +00001737 if (max_level > unix_sk(other)->recursion_level)
1738 unix_sk(other)->recursion_level = max_level;
David S. Miller1c92b4e2007-05-31 13:24:26 -07001739 unix_state_unlock(other);
David S. Miller676d2362014-04-11 16:15:36 -04001740 other->sk_data_ready(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 sock_put(other);
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001742 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 return len;
1744
1745out_unlock:
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001746 if (sk_locked)
1747 unix_state_unlock(sk);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001748 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749out_free:
1750 kfree_skb(skb);
1751out:
1752 if (other)
1753 sock_put(other);
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001754 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 return err;
1756}
1757
Eric Dumazete370a722013-08-08 14:37:32 -07001758/* We use paged skbs for stream sockets, and limit occupancy to 32768
1759 * bytes, and a minimun of a full page.
1760 */
1761#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001762
Ying Xue1b784142015-03-02 15:37:48 +08001763static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1764 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 struct sock *sk = sock->sk;
1767 struct sock *other = NULL;
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001768 int err, size;
David S. Millerf78a5fd2011-09-16 19:34:00 -04001769 struct sk_buff *skb;
Jianjun Konge27dfce2008-11-01 21:38:31 -07001770 int sent = 0;
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001771 struct scm_cookie scm;
Miklos Szeredi8ba69ba2009-09-11 11:31:45 -07001772 bool fds_sent = false;
Eric Dumazet25888e32010-11-25 04:11:39 +00001773 int max_level;
Eric Dumazete370a722013-08-08 14:37:32 -07001774 int data_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775
dann frazier5f23b732008-11-26 15:32:27 -08001776 wait_for_unix_gc();
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001777 err = scm_send(sock, msg, &scm, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 if (err < 0)
1779 return err;
1780
1781 err = -EOPNOTSUPP;
1782 if (msg->msg_flags&MSG_OOB)
1783 goto out_err;
1784
1785 if (msg->msg_namelen) {
1786 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1787 goto out_err;
1788 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 err = -ENOTCONN;
Benjamin LaHaise830a1e52005-12-13 23:22:32 -08001790 other = unix_peer(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 if (!other)
1792 goto out_err;
1793 }
1794
1795 if (sk->sk_shutdown & SEND_SHUTDOWN)
1796 goto pipe_err;
1797
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001798 while (sent < len) {
Eric Dumazete370a722013-08-08 14:37:32 -07001799 size = len - sent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800
1801 /* Keep two messages in the pipe so it schedules better */
Eric Dumazete370a722013-08-08 14:37:32 -07001802 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803
Eric Dumazete370a722013-08-08 14:37:32 -07001804 /* allow fallback to order-0 allocations */
1805 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001806
Eric Dumazete370a722013-08-08 14:37:32 -07001807 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001808
Kirill Tkhai31ff6aa2014-05-15 19:56:28 +04001809 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
1810
Eric Dumazete370a722013-08-08 14:37:32 -07001811 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
Eric Dumazet28d64272013-08-08 14:38:47 -07001812 msg->msg_flags & MSG_DONTWAIT, &err,
1813 get_order(UNIX_SKB_FRAGS_SZ));
Eric Dumazete370a722013-08-08 14:37:32 -07001814 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 goto out_err;
1816
David S. Millerf78a5fd2011-09-16 19:34:00 -04001817 /* Only send the fds in the first buffer */
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001818 err = unix_scm_to_skb(&scm, skb, !fds_sent);
Eric Dumazet25888e32010-11-25 04:11:39 +00001819 if (err < 0) {
Eric W. Biederman7361c362010-06-13 03:34:33 +00001820 kfree_skb(skb);
David S. Millerf78a5fd2011-09-16 19:34:00 -04001821 goto out_err;
Miklos Szeredi62093442008-11-09 15:23:57 +01001822 }
Eric Dumazet25888e32010-11-25 04:11:39 +00001823 max_level = err + 1;
Eric W. Biederman7361c362010-06-13 03:34:33 +00001824 fds_sent = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825
Eric Dumazete370a722013-08-08 14:37:32 -07001826 skb_put(skb, size - data_len);
1827 skb->data_len = data_len;
1828 skb->len = size;
Al Viroc0371da2014-11-24 10:42:55 -05001829 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001830 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 kfree_skb(skb);
David S. Millerf78a5fd2011-09-16 19:34:00 -04001832 goto out_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 }
1834
David S. Miller1c92b4e2007-05-31 13:24:26 -07001835 unix_state_lock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836
1837 if (sock_flag(other, SOCK_DEAD) ||
1838 (other->sk_shutdown & RCV_SHUTDOWN))
1839 goto pipe_err_free;
1840
Eric Dumazet16e57262011-09-19 05:52:27 +00001841 maybe_add_creds(skb, sock, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 skb_queue_tail(&other->sk_receive_queue, skb);
Eric Dumazet25888e32010-11-25 04:11:39 +00001843 if (max_level > unix_sk(other)->recursion_level)
1844 unix_sk(other)->recursion_level = max_level;
David S. Miller1c92b4e2007-05-31 13:24:26 -07001845 unix_state_unlock(other);
David S. Miller676d2362014-04-11 16:15:36 -04001846 other->sk_data_ready(other);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001847 sent += size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001850 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851
1852 return sent;
1853
1854pipe_err_free:
David S. Miller1c92b4e2007-05-31 13:24:26 -07001855 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 kfree_skb(skb);
1857pipe_err:
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001858 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1859 send_sig(SIGPIPE, current, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 err = -EPIPE;
1861out_err:
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001862 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 return sent ? : err;
1864}
1865
Ying Xue1b784142015-03-02 15:37:48 +08001866static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
1867 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868{
1869 int err;
1870 struct sock *sk = sock->sk;
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001871
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 err = sock_error(sk);
1873 if (err)
1874 return err;
1875
1876 if (sk->sk_state != TCP_ESTABLISHED)
1877 return -ENOTCONN;
1878
1879 if (msg->msg_namelen)
1880 msg->msg_namelen = 0;
1881
Ying Xue1b784142015-03-02 15:37:48 +08001882 return unix_dgram_sendmsg(sock, msg, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883}
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001884
Ying Xue1b784142015-03-02 15:37:48 +08001885static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
1886 size_t size, int flags)
Eric W. Biedermana05d2ad2011-04-24 01:54:57 +00001887{
1888 struct sock *sk = sock->sk;
1889
1890 if (sk->sk_state != TCP_ESTABLISHED)
1891 return -ENOTCONN;
1892
Ying Xue1b784142015-03-02 15:37:48 +08001893 return unix_dgram_recvmsg(sock, msg, size, flags);
Eric W. Biedermana05d2ad2011-04-24 01:54:57 +00001894}
1895
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1897{
1898 struct unix_sock *u = unix_sk(sk);
1899
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 if (u->addr) {
1901 msg->msg_namelen = u->addr->len;
1902 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1903 }
1904}
1905
Ying Xue1b784142015-03-02 15:37:48 +08001906static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
1907 size_t size, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908{
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001909 struct scm_cookie scm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910 struct sock *sk = sock->sk;
1911 struct unix_sock *u = unix_sk(sk);
1912 int noblock = flags & MSG_DONTWAIT;
1913 struct sk_buff *skb;
1914 int err;
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00001915 int peeked, skip;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916
1917 err = -EOPNOTSUPP;
1918 if (flags&MSG_OOB)
1919 goto out;
1920
Rainer Weikusatb3ca9b02011-02-28 04:50:55 +00001921 err = mutex_lock_interruptible(&u->readlock);
Eric Dumazetde144392014-03-25 18:42:27 -07001922 if (unlikely(err)) {
1923 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
1924 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
1925 */
1926 err = noblock ? -EAGAIN : -ERESTARTSYS;
Rainer Weikusatb3ca9b02011-02-28 04:50:55 +00001927 goto out;
1928 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00001930 skip = sk_peek_offset(sk, flags);
1931
1932 skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
Florian Zumbiehl0a112252007-11-29 23:19:23 +11001933 if (!skb) {
1934 unix_state_lock(sk);
1935 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1936 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1937 (sk->sk_shutdown & RCV_SHUTDOWN))
1938 err = 0;
1939 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 goto out_unlock;
Florian Zumbiehl0a112252007-11-29 23:19:23 +11001941 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942
Eric Dumazet67426b72010-10-29 20:44:44 +00001943 wake_up_interruptible_sync_poll(&u->peer_wait,
1944 POLLOUT | POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945
1946 if (msg->msg_name)
1947 unix_copy_addr(msg, skb->sk);
1948
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00001949 if (size > skb->len - skip)
1950 size = skb->len - skip;
1951 else if (size < skb->len - skip)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 msg->msg_flags |= MSG_TRUNC;
1953
David S. Miller51f3d022014-11-05 16:46:40 -05001954 err = skb_copy_datagram_msg(skb, skip, msg, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 if (err)
1956 goto out_free;
1957
Alban Crequy3f661162010-10-04 08:48:28 +00001958 if (sock_flag(sk, SOCK_RCVTSTAMP))
1959 __sock_recv_timestamp(msg, sk, skb);
1960
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001961 memset(&scm, 0, sizeof(scm));
1962
1963 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
1964 unix_set_secdata(&scm, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001966 if (!(flags & MSG_PEEK)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 if (UNIXCB(skb).fp)
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001968 unix_detach_fds(&scm, skb);
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00001969
1970 sk_peek_offset_bwd(sk, skb->len);
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001971 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 /* It is questionable: on PEEK we could:
1973 - do not return fds - good, but too simple 8)
1974 - return fds, and do not return them on read (old strategy,
1975 apparently wrong)
1976 - clone fds (I chose it for now, it is the most universal
1977 solution)
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001978
1979 POSIX 1003.1g does not actually define this clearly
1980 at all. POSIX 1003.1g doesn't define a lot of things
1981 clearly however!
1982
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 */
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00001984
1985 sk_peek_offset_fwd(sk, size);
1986
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 if (UNIXCB(skb).fp)
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001988 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 }
Eric Dumazet9f6f9af2012-02-21 23:24:55 +00001990 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001992 scm_recv(sock, msg, &scm, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993
1994out_free:
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001995 skb_free_datagram(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996out_unlock:
Ingo Molnar57b47a52006-03-20 22:35:41 -08001997 mutex_unlock(&u->readlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998out:
1999 return err;
2000}
2001
2002/*
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002003 * Sleep until more data has arrived. But check for races..
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 */
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002005static long unix_stream_data_wait(struct sock *sk, long timeo,
2006 struct sk_buff *last)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007{
2008 DEFINE_WAIT(wait);
2009
David S. Miller1c92b4e2007-05-31 13:24:26 -07002010 unix_state_lock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011
2012 for (;;) {
Eric Dumazetaa395142010-04-20 13:03:51 +00002013 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002015 if (skb_peek_tail(&sk->sk_receive_queue) != last ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016 sk->sk_err ||
2017 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2018 signal_pending(current) ||
2019 !timeo)
2020 break;
2021
2022 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
David S. Miller1c92b4e2007-05-31 13:24:26 -07002023 unix_state_unlock(sk);
Colin Cross2b15af62013-05-06 23:50:21 +00002024 timeo = freezable_schedule_timeout(timeo);
David S. Miller1c92b4e2007-05-31 13:24:26 -07002025 unix_state_lock(sk);
Mark Salyzynb48732e2015-05-26 08:22:19 -07002026
2027 if (sock_flag(sk, SOCK_DEAD))
2028 break;
2029
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2031 }
2032
Eric Dumazetaa395142010-04-20 13:03:51 +00002033 finish_wait(sk_sleep(sk), &wait);
David S. Miller1c92b4e2007-05-31 13:24:26 -07002034 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 return timeo;
2036}
2037
Eric Dumazete370a722013-08-08 14:37:32 -07002038static unsigned int unix_skb_len(const struct sk_buff *skb)
2039{
2040 return skb->len - UNIXCB(skb).consumed;
2041}
2042
Ying Xue1b784142015-03-02 15:37:48 +08002043static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2044 size_t size, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045{
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002046 struct scm_cookie scm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047 struct sock *sk = sock->sk;
2048 struct unix_sock *u = unix_sk(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +01002049 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 int copied = 0;
Eric Dumazetde144392014-03-25 18:42:27 -07002051 int noblock = flags & MSG_DONTWAIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 int check_creds = 0;
2053 int target;
2054 int err = 0;
2055 long timeo;
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002056 int skip;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057
2058 err = -EINVAL;
2059 if (sk->sk_state != TCP_ESTABLISHED)
2060 goto out;
2061
2062 err = -EOPNOTSUPP;
2063 if (flags&MSG_OOB)
2064 goto out;
2065
2066 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
Eric Dumazetde144392014-03-25 18:42:27 -07002067 timeo = sock_rcvtimeo(sk, noblock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 /* Lock the socket to prevent queue disordering
2070 * while sleeps in memcpy_tomsg
2071 */
2072
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002073 memset(&scm, 0, sizeof(scm));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074
Rainer Weikusatcc01a0a2015-12-16 20:09:25 +00002075 mutex_lock(&u->readlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076
Andrey Vagine09e8892015-10-02 00:05:36 +03002077 if (flags & MSG_PEEK)
2078 skip = sk_peek_offset(sk, flags);
2079 else
2080 skip = 0;
2081
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002082 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 int chunk;
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002084 struct sk_buff *skb, *last;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002086 unix_state_lock(sk);
Mark Salyzynb48732e2015-05-26 08:22:19 -07002087 if (sock_flag(sk, SOCK_DEAD)) {
2088 err = -ECONNRESET;
2089 goto unlock;
2090 }
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002091 last = skb = skb_peek(&sk->sk_receive_queue);
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002092again:
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002093 if (skb == NULL) {
Eric Dumazet25888e32010-11-25 04:11:39 +00002094 unix_sk(sk)->recursion_level = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 if (copied >= target)
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002096 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097
2098 /*
2099 * POSIX 1003.1g mandates this order.
2100 */
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09002101
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002102 err = sock_error(sk);
2103 if (err)
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002104 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105 if (sk->sk_shutdown & RCV_SHUTDOWN)
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002106 goto unlock;
2107
2108 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109 err = -EAGAIN;
2110 if (!timeo)
2111 break;
Ingo Molnar57b47a52006-03-20 22:35:41 -08002112 mutex_unlock(&u->readlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002114 timeo = unix_stream_data_wait(sk, timeo, last);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115
Rainer Weikusatcc01a0a2015-12-16 20:09:25 +00002116 if (signal_pending(current)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 err = sock_intr_errno(timeo);
2118 goto out;
2119 }
Rainer Weikusatb3ca9b02011-02-28 04:50:55 +00002120
Rainer Weikusatcc01a0a2015-12-16 20:09:25 +00002121 mutex_lock(&u->readlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 continue;
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002123 unlock:
2124 unix_state_unlock(sk);
2125 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 }
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002127
Eric Dumazete370a722013-08-08 14:37:32 -07002128 while (skip >= unix_skb_len(skb)) {
2129 skip -= unix_skb_len(skb);
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002130 last = skb;
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002131 skb = skb_peek_next(skb, &sk->sk_receive_queue);
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002132 if (!skb)
2133 goto again;
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002134 }
2135
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002136 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137
2138 if (check_creds) {
2139 /* Never glue messages from different writers */
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002140 if ((UNIXCB(skb).pid != scm.pid) ||
2141 !uid_eq(UNIXCB(skb).uid, scm.creds.uid) ||
2142 !gid_eq(UNIXCB(skb).gid, scm.creds.gid))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 break;
Eric W. Biederman0e82e7f6d2013-04-03 16:14:47 +00002144 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 /* Copy credentials */
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002146 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 check_creds = 1;
2148 }
2149
2150 /* Copy address just once */
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002151 if (sunaddr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 unix_copy_addr(msg, skb->sk);
2153 sunaddr = NULL;
2154 }
2155
Eric Dumazete370a722013-08-08 14:37:32 -07002156 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
David S. Miller51f3d022014-11-05 16:46:40 -05002157 if (skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2158 msg, chunk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 if (copied == 0)
2160 copied = -EFAULT;
2161 break;
2162 }
2163 copied += chunk;
2164 size -= chunk;
2165
2166 /* Mark read part of skb as used */
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002167 if (!(flags & MSG_PEEK)) {
Eric Dumazete370a722013-08-08 14:37:32 -07002168 UNIXCB(skb).consumed += chunk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002170 sk_peek_offset_bwd(sk, chunk);
2171
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 if (UNIXCB(skb).fp)
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002173 unix_detach_fds(&scm, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174
Eric Dumazete370a722013-08-08 14:37:32 -07002175 if (unix_skb_len(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177
Eric Dumazet6f01fd62012-01-28 16:11:03 +00002178 skb_unlink(skb, &sk->sk_receive_queue);
Neil Horman70d4bf62010-07-20 06:45:56 +00002179 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002181 if (scm.fp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 break;
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002183 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 /* It is questionable, see note in unix_dgram_recvmsg.
2185 */
2186 if (UNIXCB(skb).fp)
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002187 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188
Andrey Vagine09e8892015-10-02 00:05:36 +03002189 sk_peek_offset_fwd(sk, chunk);
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002190
Aaron Conole9bf31c52015-09-26 18:50:43 -04002191 if (UNIXCB(skb).fp)
2192 break;
2193
Andrey Vagine09e8892015-10-02 00:05:36 +03002194 skip = 0;
Aaron Conole9bf31c52015-09-26 18:50:43 -04002195 last = skb;
2196 unix_state_lock(sk);
2197 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2198 if (skb)
2199 goto again;
2200 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 break;
2202 }
2203 } while (size);
2204
Ingo Molnar57b47a52006-03-20 22:35:41 -08002205 mutex_unlock(&u->readlock);
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002206 scm_recv(sock, msg, &scm, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207out:
2208 return copied ? : err;
2209}
2210
2211static int unix_shutdown(struct socket *sock, int mode)
2212{
2213 struct sock *sk = sock->sk;
2214 struct sock *other;
2215
Xi Wangfc61b922012-08-26 16:47:13 +00002216 if (mode < SHUT_RD || mode > SHUT_RDWR)
2217 return -EINVAL;
2218 /* This maps:
2219 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2220 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2221 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2222 */
2223 ++mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
Alban Crequy7180a032011-01-19 04:56:36 +00002225 unix_state_lock(sk);
2226 sk->sk_shutdown |= mode;
2227 other = unix_peer(sk);
2228 if (other)
2229 sock_hold(other);
2230 unix_state_unlock(sk);
2231 sk->sk_state_change(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232
Alban Crequy7180a032011-01-19 04:56:36 +00002233 if (other &&
2234 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235
Alban Crequy7180a032011-01-19 04:56:36 +00002236 int peer_mode = 0;
2237
2238 if (mode&RCV_SHUTDOWN)
2239 peer_mode |= SEND_SHUTDOWN;
2240 if (mode&SEND_SHUTDOWN)
2241 peer_mode |= RCV_SHUTDOWN;
2242 unix_state_lock(other);
2243 other->sk_shutdown |= peer_mode;
2244 unix_state_unlock(other);
2245 other->sk_state_change(other);
2246 if (peer_mode == SHUTDOWN_MASK)
2247 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2248 else if (peer_mode & RCV_SHUTDOWN)
2249 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 }
Alban Crequy7180a032011-01-19 04:56:36 +00002251 if (other)
2252 sock_put(other);
2253
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 return 0;
2255}
2256
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002257long unix_inq_len(struct sock *sk)
2258{
2259 struct sk_buff *skb;
2260 long amount = 0;
2261
2262 if (sk->sk_state == TCP_LISTEN)
2263 return -EINVAL;
2264
2265 spin_lock(&sk->sk_receive_queue.lock);
2266 if (sk->sk_type == SOCK_STREAM ||
2267 sk->sk_type == SOCK_SEQPACKET) {
2268 skb_queue_walk(&sk->sk_receive_queue, skb)
Eric Dumazete370a722013-08-08 14:37:32 -07002269 amount += unix_skb_len(skb);
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002270 } else {
2271 skb = skb_peek(&sk->sk_receive_queue);
2272 if (skb)
2273 amount = skb->len;
2274 }
2275 spin_unlock(&sk->sk_receive_queue.lock);
2276
2277 return amount;
2278}
2279EXPORT_SYMBOL_GPL(unix_inq_len);
2280
2281long unix_outq_len(struct sock *sk)
2282{
2283 return sk_wmem_alloc_get(sk);
2284}
2285EXPORT_SYMBOL_GPL(unix_outq_len);
2286
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2288{
2289 struct sock *sk = sock->sk;
Jianjun Konge27dfce2008-11-01 21:38:31 -07002290 long amount = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 int err;
2292
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002293 switch (cmd) {
2294 case SIOCOUTQ:
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002295 amount = unix_outq_len(sk);
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002296 err = put_user(amount, (int __user *)arg);
2297 break;
2298 case SIOCINQ:
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002299 amount = unix_inq_len(sk);
2300 if (amount < 0)
2301 err = amount;
2302 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 err = put_user(amount, (int __user *)arg);
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002304 break;
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002305 default:
2306 err = -ENOIOCTLCMD;
2307 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 }
2309 return err;
2310}
2311
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002312static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313{
2314 struct sock *sk = sock->sk;
2315 unsigned int mask;
2316
Eric Dumazetaa395142010-04-20 13:03:51 +00002317 sock_poll_wait(file, sk_sleep(sk), wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 mask = 0;
2319
2320 /* exceptional events? */
2321 if (sk->sk_err)
2322 mask |= POLLERR;
2323 if (sk->sk_shutdown == SHUTDOWN_MASK)
2324 mask |= POLLHUP;
Davide Libenzif348d702006-03-25 03:07:39 -08002325 if (sk->sk_shutdown & RCV_SHUTDOWN)
Eric Dumazetdb409802010-09-06 11:13:50 +00002326 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327
2328 /* readable? */
Eric Dumazetdb409802010-09-06 11:13:50 +00002329 if (!skb_queue_empty(&sk->sk_receive_queue))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 mask |= POLLIN | POLLRDNORM;
2331
2332 /* Connection-based need to check for termination and startup */
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002333 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2334 sk->sk_state == TCP_CLOSE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 mask |= POLLHUP;
2336
2337 /*
2338 * we set writable also when the other side has shut down the
2339 * connection. This prevents stuck sockets.
2340 */
2341 if (unix_writable(sk))
2342 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2343
2344 return mask;
2345}
2346
Rainer Weikusatec0d2152008-06-27 19:34:18 -07002347static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2348 poll_table *wait)
Rainer Weikusat3c734192008-06-17 22:28:05 -07002349{
Rainer Weikusatec0d2152008-06-27 19:34:18 -07002350 struct sock *sk = sock->sk, *other;
2351 unsigned int mask, writable;
Rainer Weikusat3c734192008-06-17 22:28:05 -07002352
Eric Dumazetaa395142010-04-20 13:03:51 +00002353 sock_poll_wait(file, sk_sleep(sk), wait);
Rainer Weikusat3c734192008-06-17 22:28:05 -07002354 mask = 0;
2355
2356 /* exceptional events? */
2357 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +00002358 mask |= POLLERR |
Jacob Keller8facd5f2013-04-02 13:55:40 -07002359 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +00002360
Rainer Weikusat3c734192008-06-17 22:28:05 -07002361 if (sk->sk_shutdown & RCV_SHUTDOWN)
Eric Dumazet5456f092010-10-31 05:36:23 +00002362 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
Rainer Weikusat3c734192008-06-17 22:28:05 -07002363 if (sk->sk_shutdown == SHUTDOWN_MASK)
2364 mask |= POLLHUP;
2365
2366 /* readable? */
Eric Dumazet5456f092010-10-31 05:36:23 +00002367 if (!skb_queue_empty(&sk->sk_receive_queue))
Rainer Weikusat3c734192008-06-17 22:28:05 -07002368 mask |= POLLIN | POLLRDNORM;
2369
2370 /* Connection-based need to check for termination and startup */
2371 if (sk->sk_type == SOCK_SEQPACKET) {
2372 if (sk->sk_state == TCP_CLOSE)
2373 mask |= POLLHUP;
2374 /* connection hasn't started yet? */
2375 if (sk->sk_state == TCP_SYN_SENT)
2376 return mask;
2377 }
2378
Eric Dumazet973a34a2010-10-31 05:38:25 +00002379 /* No write status requested, avoid expensive OUT tests. */
Hans Verkuil626cf232012-03-23 15:02:27 -07002380 if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
Eric Dumazet973a34a2010-10-31 05:38:25 +00002381 return mask;
2382
Rainer Weikusatec0d2152008-06-27 19:34:18 -07002383 writable = unix_writable(sk);
Rainer Weikusat5c77e262015-11-20 22:07:23 +00002384 if (writable) {
2385 unix_state_lock(sk);
2386
2387 other = unix_peer(sk);
2388 if (other && unix_peer(other) != sk &&
2389 unix_recvq_full(other) &&
2390 unix_dgram_peer_wake_me(sk, other))
2391 writable = 0;
2392
2393 unix_state_unlock(sk);
Rainer Weikusatec0d2152008-06-27 19:34:18 -07002394 }
2395
2396 if (writable)
Rainer Weikusat3c734192008-06-17 22:28:05 -07002397 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2398 else
2399 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2400
Rainer Weikusat3c734192008-06-17 22:28:05 -07002401 return mask;
2402}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403
2404#ifdef CONFIG_PROC_FS
Pavel Emelyanova53eb3f2007-11-23 20:30:01 +08002405
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002406#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2407
2408#define get_bucket(x) ((x) >> BUCKET_SPACE)
2409#define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2410#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
Pavel Emelyanova53eb3f2007-11-23 20:30:01 +08002411
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002412static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413{
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002414 unsigned long offset = get_offset(*pos);
2415 unsigned long bucket = get_bucket(*pos);
2416 struct sock *sk;
2417 unsigned long count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002419 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2420 if (sock_net(sk) != seq_file_net(seq))
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002421 continue;
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002422 if (++count == offset)
2423 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 }
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002425
2426 return sk;
2427}
2428
2429static struct sock *unix_next_socket(struct seq_file *seq,
2430 struct sock *sk,
2431 loff_t *pos)
2432{
2433 unsigned long bucket;
2434
2435 while (sk > (struct sock *)SEQ_START_TOKEN) {
2436 sk = sk_next(sk);
2437 if (!sk)
2438 goto next_bucket;
2439 if (sock_net(sk) == seq_file_net(seq))
2440 return sk;
2441 }
2442
2443 do {
2444 sk = unix_from_bucket(seq, pos);
2445 if (sk)
2446 return sk;
2447
2448next_bucket:
2449 bucket = get_bucket(*pos) + 1;
2450 *pos = set_bucket_offset(bucket, 1);
2451 } while (bucket < ARRAY_SIZE(unix_socket_table));
2452
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 return NULL;
2454}
2455
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002457 __acquires(unix_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458{
David S. Millerfbe9cc42005-12-13 23:26:29 -08002459 spin_lock(&unix_table_lock);
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002460
2461 if (!*pos)
2462 return SEQ_START_TOKEN;
2463
2464 if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2465 return NULL;
2466
2467 return unix_next_socket(seq, NULL, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468}
2469
2470static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2471{
2472 ++*pos;
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002473 return unix_next_socket(seq, v, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474}
2475
2476static void unix_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002477 __releases(unix_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478{
David S. Millerfbe9cc42005-12-13 23:26:29 -08002479 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480}
2481
2482static int unix_seq_show(struct seq_file *seq, void *v)
2483{
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09002484
Joe Perchesb9f31242008-04-12 19:04:38 -07002485 if (v == SEQ_START_TOKEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2487 "Inode Path\n");
2488 else {
2489 struct sock *s = v;
2490 struct unix_sock *u = unix_sk(s);
David S. Miller1c92b4e2007-05-31 13:24:26 -07002491 unix_state_lock(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492
Dan Rosenberg71338aa2011-05-23 12:17:35 +00002493 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 s,
2495 atomic_read(&s->sk_refcnt),
2496 0,
2497 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2498 s->sk_type,
2499 s->sk_socket ?
2500 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2501 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2502 sock_i_ino(s));
2503
2504 if (u->addr) {
2505 int i, len;
2506 seq_putc(seq, ' ');
2507
2508 i = 0;
2509 len = u->addr->len - sizeof(short);
2510 if (!UNIX_ABSTRACT(s))
2511 len--;
2512 else {
2513 seq_putc(seq, '@');
2514 i++;
2515 }
2516 for ( ; i < len; i++)
2517 seq_putc(seq, u->addr->name->sun_path[i]);
2518 }
David S. Miller1c92b4e2007-05-31 13:24:26 -07002519 unix_state_unlock(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 seq_putc(seq, '\n');
2521 }
2522
2523 return 0;
2524}
2525
Philippe De Muyter56b3d972007-07-10 23:07:31 -07002526static const struct seq_operations unix_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 .start = unix_seq_start,
2528 .next = unix_seq_next,
2529 .stop = unix_seq_stop,
2530 .show = unix_seq_show,
2531};
2532
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533static int unix_seq_open(struct inode *inode, struct file *file)
2534{
Denis V. Luneve372c412007-11-19 22:31:54 -08002535 return seq_open_net(inode, file, &unix_seq_ops,
Eric Dumazet8b51b062012-06-08 22:10:20 +00002536 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537}
2538
Arjan van de Venda7071d2007-02-12 00:55:36 -08002539static const struct file_operations unix_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 .owner = THIS_MODULE,
2541 .open = unix_seq_open,
2542 .read = seq_read,
2543 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08002544 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545};
2546
2547#endif
2548
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00002549static const struct net_proto_family unix_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 .family = PF_UNIX,
2551 .create = unix_create,
2552 .owner = THIS_MODULE,
2553};
2554
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002555
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002556static int __net_init unix_net_init(struct net *net)
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002557{
2558 int error = -ENOMEM;
2559
Denis V. Luneva0a53c82007-12-11 04:19:17 -08002560 net->unx.sysctl_max_dgram_qlen = 10;
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +11002561 if (unix_sysctl_register(net))
2562 goto out;
Pavel Emelyanovd392e492007-12-01 23:44:15 +11002563
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002564#ifdef CONFIG_PROC_FS
Gao fengd4beaa62013-02-18 01:34:54 +00002565 if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +11002566 unix_sysctl_unregister(net);
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002567 goto out;
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +11002568 }
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002569#endif
2570 error = 0;
2571out:
Jianjun Kong48dcc33e2008-11-01 21:37:27 -07002572 return error;
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002573}
2574
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002575static void __net_exit unix_net_exit(struct net *net)
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002576{
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +11002577 unix_sysctl_unregister(net);
Gao fengece31ff2013-02-18 01:34:56 +00002578 remove_proc_entry("unix", net->proc_net);
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002579}
2580
2581static struct pernet_operations unix_net_ops = {
2582 .init = unix_net_init,
2583 .exit = unix_net_exit,
2584};
2585
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586static int __init af_unix_init(void)
2587{
2588 int rc = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589
YOSHIFUJI Hideaki / 吉藤英明b4fff5f2013-01-09 07:20:07 +00002590 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591
2592 rc = proto_register(&unix_proto, 1);
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09002593 if (rc != 0) {
wangweidong5cc208b2013-12-06 18:03:36 +08002594 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595 goto out;
2596 }
2597
2598 sock_register(&unix_family_ops);
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002599 register_pernet_subsys(&unix_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600out:
2601 return rc;
2602}
2603
2604static void __exit af_unix_exit(void)
2605{
2606 sock_unregister(PF_UNIX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 proto_unregister(&unix_proto);
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002608 unregister_pernet_subsys(&unix_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609}
2610
David Woodhouse3d366962008-04-24 00:59:25 -07002611/* Earlier than device_initcall() so that other drivers invoking
2612 request_module() don't end up in a loop when modprobe tries
2613 to use a UNIX socket. But later than subsys_initcall() because
2614 we depend on stuff initialised there */
2615fs_initcall(af_unix_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616module_exit(af_unix_exit);
2617
2618MODULE_LICENSE("GPL");
2619MODULE_ALIAS_NETPROTO(PF_UNIX);