blob: d6440428680565823e28846681a4108ca49bfa1f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET4: Implementation of BSD Unix domain sockets.
3 *
Alan Cox113aa832008-10-13 19:01:08 -07004 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
73 *
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
81 */
82
wangweidong5cc208b2013-12-06 18:03:36 +080083#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
84
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070086#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070087#include <linux/signal.h>
88#include <linux/sched.h>
89#include <linux/errno.h>
90#include <linux/string.h>
91#include <linux/stat.h>
92#include <linux/dcache.h>
93#include <linux/namei.h>
94#include <linux/socket.h>
95#include <linux/un.h>
96#include <linux/fcntl.h>
97#include <linux/termios.h>
98#include <linux/sockios.h>
99#include <linux/net.h>
100#include <linux/in.h>
101#include <linux/fs.h>
102#include <linux/slab.h>
103#include <asm/uaccess.h>
104#include <linux/skbuff.h>
105#include <linux/netdevice.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200106#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <net/sock.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -0700108#include <net/tcp_states.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#include <net/af_unix.h>
110#include <linux/proc_fs.h>
111#include <linux/seq_file.h>
112#include <net/scm.h>
113#include <linux/init.h>
114#include <linux/poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <linux/rtnetlink.h>
116#include <linux/mount.h>
117#include <net/checksum.h>
118#include <linux/security.h>
Colin Cross2b15af62013-05-06 23:50:21 +0000119#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
Eric Dumazet7123aaa2012-06-08 05:03:21 +0000121struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
Pavel Emelyanovfa7ff562011-12-15 02:44:03 +0000122EXPORT_SYMBOL_GPL(unix_socket_table);
123DEFINE_SPINLOCK(unix_table_lock);
124EXPORT_SYMBOL_GPL(unix_table_lock);
Eric Dumazet518de9b2010-10-26 14:22:44 -0700125static atomic_long_t unix_nr_socks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Eric Dumazet7123aaa2012-06-08 05:03:21 +0000128static struct hlist_head *unix_sockets_unbound(void *addr)
129{
130 unsigned long hash = (unsigned long)addr;
131
132 hash ^= hash >> 16;
133 hash ^= hash >> 8;
134 hash %= UNIX_HASH_SIZE;
135 return &unix_socket_table[UNIX_HASH_SIZE + hash];
136}
137
138#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700140#ifdef CONFIG_SECURITY_NETWORK
Catherine Zhangdc49c1f2006-08-02 14:12:06 -0700141static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700142{
Catherine Zhangdc49c1f2006-08-02 14:12:06 -0700143 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700144}
145
146static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
147{
Catherine Zhangdc49c1f2006-08-02 14:12:06 -0700148 scm->secid = *UNIXSID(skb);
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700149}
150#else
Catherine Zhangdc49c1f2006-08-02 14:12:06 -0700151static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700152{ }
153
154static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
155{ }
156#endif /* CONFIG_SECURITY_NETWORK */
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158/*
159 * SMP locking strategy:
David S. Millerfbe9cc42005-12-13 23:26:29 -0800160 * hash table is protected with spinlock unix_table_lock
Stephen Hemminger663717f2010-02-18 14:12:06 -0800161 * each socket state is protected by separate spin lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 */
163
Eric Dumazet95c96172012-04-15 05:58:06 +0000164static inline unsigned int unix_hash_fold(__wsum n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165{
Anton Blanchard0a134042014-03-05 14:29:58 +1100166 unsigned int hash = (__force unsigned int)csum_fold(n);
Eric Dumazet95c96172012-04-15 05:58:06 +0000167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 hash ^= hash>>8;
169 return hash&(UNIX_HASH_SIZE-1);
170}
171
172#define unix_peer(sk) (unix_sk(sk)->peer)
173
174static inline int unix_our_peer(struct sock *sk, struct sock *osk)
175{
176 return unix_peer(osk) == sk;
177}
178
179static inline int unix_may_send(struct sock *sk, struct sock *osk)
180{
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800181 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182}
183
Rainer Weikusat3c734192008-06-17 22:28:05 -0700184static inline int unix_recvq_full(struct sock const *sk)
185{
186 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
187}
188
Pavel Emelyanovfa7ff562011-12-15 02:44:03 +0000189struct sock *unix_peer_get(struct sock *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190{
191 struct sock *peer;
192
David S. Miller1c92b4e2007-05-31 13:24:26 -0700193 unix_state_lock(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 peer = unix_peer(s);
195 if (peer)
196 sock_hold(peer);
David S. Miller1c92b4e2007-05-31 13:24:26 -0700197 unix_state_unlock(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 return peer;
199}
Pavel Emelyanovfa7ff562011-12-15 02:44:03 +0000200EXPORT_SYMBOL_GPL(unix_peer_get);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
202static inline void unix_release_addr(struct unix_address *addr)
203{
204 if (atomic_dec_and_test(&addr->refcnt))
205 kfree(addr);
206}
207
208/*
209 * Check unix socket name:
210 * - should be not zero length.
211 * - if started by not zero, should be NULL terminated (FS object)
212 * - if started by zero, it is abstract name.
213 */
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +0900214
Eric Dumazet95c96172012-04-15 05:58:06 +0000215static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216{
217 if (len <= sizeof(short) || len > sizeof(*sunaddr))
218 return -EINVAL;
219 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
220 return -EINVAL;
221 if (sunaddr->sun_path[0]) {
222 /*
223 * This may look like an off by one error but it is a bit more
224 * subtle. 108 is the longest valid AF_UNIX path for a binding.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300225 * sun_path[108] doesn't as such exist. However in kernel space
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 * we are guaranteed that it is a valid memory location in our
227 * kernel address buffer.
228 */
Jianjun Konge27dfce2008-11-01 21:38:31 -0700229 ((char *)sunaddr)[len] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 len = strlen(sunaddr->sun_path)+1+sizeof(short);
231 return len;
232 }
233
Joe Perches07f07572008-11-19 15:44:53 -0800234 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 return len;
236}
237
238static void __unix_remove_socket(struct sock *sk)
239{
240 sk_del_node_init(sk);
241}
242
243static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
244{
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700245 WARN_ON(!sk_unhashed(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 sk_add_node(sk, list);
247}
248
249static inline void unix_remove_socket(struct sock *sk)
250{
David S. Millerfbe9cc42005-12-13 23:26:29 -0800251 spin_lock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 __unix_remove_socket(sk);
David S. Millerfbe9cc42005-12-13 23:26:29 -0800253 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254}
255
256static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
257{
David S. Millerfbe9cc42005-12-13 23:26:29 -0800258 spin_lock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 __unix_insert_socket(list, sk);
David S. Millerfbe9cc42005-12-13 23:26:29 -0800260 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261}
262
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800263static struct sock *__unix_find_socket_byname(struct net *net,
264 struct sockaddr_un *sunname,
Eric Dumazet95c96172012-04-15 05:58:06 +0000265 int len, int type, unsigned int hash)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 struct sock *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Sasha Levinb67bfe02013-02-27 17:06:00 -0800269 sk_for_each(s, &unix_socket_table[hash ^ type]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 struct unix_sock *u = unix_sk(s);
271
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +0900272 if (!net_eq(sock_net(s), net))
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800273 continue;
274
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 if (u->addr->len == len &&
276 !memcmp(u->addr->name, sunname, len))
277 goto found;
278 }
279 s = NULL;
280found:
281 return s;
282}
283
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800284static inline struct sock *unix_find_socket_byname(struct net *net,
285 struct sockaddr_un *sunname,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 int len, int type,
Eric Dumazet95c96172012-04-15 05:58:06 +0000287 unsigned int hash)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288{
289 struct sock *s;
290
David S. Millerfbe9cc42005-12-13 23:26:29 -0800291 spin_lock(&unix_table_lock);
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800292 s = __unix_find_socket_byname(net, sunname, len, type, hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 if (s)
294 sock_hold(s);
David S. Millerfbe9cc42005-12-13 23:26:29 -0800295 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 return s;
297}
298
Eric W. Biederman6616f782010-06-13 03:35:48 +0000299static struct sock *unix_find_socket_byinode(struct inode *i)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300{
301 struct sock *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
David S. Millerfbe9cc42005-12-13 23:26:29 -0800303 spin_lock(&unix_table_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800304 sk_for_each(s,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
Al Viro40ffe672012-03-14 21:54:32 -0400306 struct dentry *dentry = unix_sk(s)->path.dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
David Howellsa25b3762015-03-17 22:26:21 +0000308 if (dentry && d_backing_inode(dentry) == i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 sock_hold(s);
310 goto found;
311 }
312 }
313 s = NULL;
314found:
David S. Millerfbe9cc42005-12-13 23:26:29 -0800315 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 return s;
317}
318
Rainer Weikusat5c77e262015-11-20 22:07:23 +0000319/* Support code for asymmetrically connected dgram sockets
320 *
321 * If a datagram socket is connected to a socket not itself connected
322 * to the first socket (eg, /dev/log), clients may only enqueue more
323 * messages if the present receive queue of the server socket is not
324 * "too large". This means there's a second writeability condition
325 * poll and sendmsg need to test. The dgram recv code will do a wake
326 * up on the peer_wait wait queue of a socket upon reception of a
327 * datagram which needs to be propagated to sleeping would-be writers
328 * since these might not have sent anything so far. This can't be
329 * accomplished via poll_wait because the lifetime of the server
330 * socket might be less than that of its clients if these break their
331 * association with it or if the server socket is closed while clients
332 * are still connected to it and there's no way to inform "a polling
333 * implementation" that it should let go of a certain wait queue
334 *
335 * In order to propagate a wake up, a wait_queue_t of the client
336 * socket is enqueued on the peer_wait queue of the server socket
337 * whose wake function does a wake_up on the ordinary client socket
338 * wait queue. This connection is established whenever a write (or
339 * poll for write) hit the flow control condition and broken when the
340 * association to the server socket is dissolved or after a wake up
341 * was relayed.
342 */
343
344static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
345 void *key)
346{
347 struct unix_sock *u;
348 wait_queue_head_t *u_sleep;
349
350 u = container_of(q, struct unix_sock, peer_wake);
351
352 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
353 q);
354 u->peer_wake.private = NULL;
355
356 /* relaying can only happen while the wq still exists */
357 u_sleep = sk_sleep(&u->sk);
358 if (u_sleep)
359 wake_up_interruptible_poll(u_sleep, key);
360
361 return 0;
362}
363
364static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
365{
366 struct unix_sock *u, *u_other;
367 int rc;
368
369 u = unix_sk(sk);
370 u_other = unix_sk(other);
371 rc = 0;
372 spin_lock(&u_other->peer_wait.lock);
373
374 if (!u->peer_wake.private) {
375 u->peer_wake.private = other;
376 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
377
378 rc = 1;
379 }
380
381 spin_unlock(&u_other->peer_wait.lock);
382 return rc;
383}
384
385static void unix_dgram_peer_wake_disconnect(struct sock *sk,
386 struct sock *other)
387{
388 struct unix_sock *u, *u_other;
389
390 u = unix_sk(sk);
391 u_other = unix_sk(other);
392 spin_lock(&u_other->peer_wait.lock);
393
394 if (u->peer_wake.private == other) {
395 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
396 u->peer_wake.private = NULL;
397 }
398
399 spin_unlock(&u_other->peer_wait.lock);
400}
401
402static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
403 struct sock *other)
404{
405 unix_dgram_peer_wake_disconnect(sk, other);
406 wake_up_interruptible_poll(sk_sleep(sk),
407 POLLOUT |
408 POLLWRNORM |
409 POLLWRBAND);
410}
411
412/* preconditions:
413 * - unix_peer(sk) == other
414 * - association is stable
415 */
416static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
417{
418 int connected;
419
420 connected = unix_dgram_peer_wake_connect(sk, other);
421
422 if (unix_recvq_full(other))
423 return 1;
424
425 if (connected)
426 unix_dgram_peer_wake_disconnect(sk, other);
427
428 return 0;
429}
430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431static inline int unix_writable(struct sock *sk)
432{
433 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
434}
435
436static void unix_write_space(struct sock *sk)
437{
Eric Dumazet43815482010-04-29 11:01:49 +0000438 struct socket_wq *wq;
439
440 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 if (unix_writable(sk)) {
Eric Dumazet43815482010-04-29 11:01:49 +0000442 wq = rcu_dereference(sk->sk_wq);
443 if (wq_has_sleeper(wq))
Eric Dumazet67426b72010-10-29 20:44:44 +0000444 wake_up_interruptible_sync_poll(&wq->wait,
445 POLLOUT | POLLWRNORM | POLLWRBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +0800446 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 }
Eric Dumazet43815482010-04-29 11:01:49 +0000448 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449}
450
451/* When dgram socket disconnects (or changes its peer), we clear its receive
452 * queue of packets arrived from previous peer. First, it allows to do
453 * flow control based only on wmem_alloc; second, sk connected to peer
454 * may receive messages only from that peer. */
455static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
456{
David S. Millerb03efcf2005-07-08 14:57:23 -0700457 if (!skb_queue_empty(&sk->sk_receive_queue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 skb_queue_purge(&sk->sk_receive_queue);
459 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
460
461 /* If one link of bidirectional dgram pipe is disconnected,
462 * we signal error. Messages are lost. Do not make this,
463 * when peer was not connected to us.
464 */
465 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
466 other->sk_err = ECONNRESET;
467 other->sk_error_report(other);
468 }
469 }
470}
471
472static void unix_sock_destructor(struct sock *sk)
473{
474 struct unix_sock *u = unix_sk(sk);
475
476 skb_queue_purge(&sk->sk_receive_queue);
477
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700478 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
479 WARN_ON(!sk_unhashed(sk));
480 WARN_ON(sk->sk_socket);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 if (!sock_flag(sk, SOCK_DEAD)) {
wangweidong5cc208b2013-12-06 18:03:36 +0800482 pr_info("Attempt to release alive unix socket: %p\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 return;
484 }
485
486 if (u->addr)
487 unix_release_addr(u->addr);
488
Eric Dumazet518de9b2010-10-26 14:22:44 -0700489 atomic_long_dec(&unix_nr_socks);
David S. Miller6f756a82008-11-23 17:34:03 -0800490 local_bh_disable();
Eric Dumazeta8076d82008-11-17 02:38:49 -0800491 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
David S. Miller6f756a82008-11-23 17:34:03 -0800492 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493#ifdef UNIX_REFCNT_DEBUG
wangweidong5cc208b2013-12-06 18:03:36 +0800494 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
Eric Dumazet518de9b2010-10-26 14:22:44 -0700495 atomic_long_read(&unix_nr_socks));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496#endif
497}
498
Paul Mooreded34e02013-03-25 03:18:33 +0000499static void unix_release_sock(struct sock *sk, int embrion)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500{
501 struct unix_sock *u = unix_sk(sk);
Al Viro40ffe672012-03-14 21:54:32 -0400502 struct path path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 struct sock *skpair;
504 struct sk_buff *skb;
505 int state;
506
507 unix_remove_socket(sk);
508
509 /* Clear state */
David S. Miller1c92b4e2007-05-31 13:24:26 -0700510 unix_state_lock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 sock_orphan(sk);
512 sk->sk_shutdown = SHUTDOWN_MASK;
Al Viro40ffe672012-03-14 21:54:32 -0400513 path = u->path;
514 u->path.dentry = NULL;
515 u->path.mnt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 state = sk->sk_state;
517 sk->sk_state = TCP_CLOSE;
David S. Miller1c92b4e2007-05-31 13:24:26 -0700518 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
520 wake_up_interruptible_all(&u->peer_wait);
521
Jianjun Konge27dfce2008-11-01 21:38:31 -0700522 skpair = unix_peer(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
Jianjun Konge27dfce2008-11-01 21:38:31 -0700524 if (skpair != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
David S. Miller1c92b4e2007-05-31 13:24:26 -0700526 unix_state_lock(skpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 /* No more writes */
528 skpair->sk_shutdown = SHUTDOWN_MASK;
529 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
530 skpair->sk_err = ECONNRESET;
David S. Miller1c92b4e2007-05-31 13:24:26 -0700531 unix_state_unlock(skpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 skpair->sk_state_change(skpair);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +0800533 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 }
Rainer Weikusat5c77e262015-11-20 22:07:23 +0000535
536 unix_dgram_peer_wake_disconnect(sk, skpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 sock_put(skpair); /* It may now die */
538 unix_peer(sk) = NULL;
539 }
540
541 /* Try to flush out this socket. Throw out buffers at least */
542
543 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
Jianjun Konge27dfce2008-11-01 21:38:31 -0700544 if (state == TCP_LISTEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 unix_release_sock(skb->sk, 1);
546 /* passed fds are erased in the kfree_skb hook */
547 kfree_skb(skb);
548 }
549
Al Viro40ffe672012-03-14 21:54:32 -0400550 if (path.dentry)
551 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552
553 sock_put(sk);
554
555 /* ---- Socket is dead now and most probably destroyed ---- */
556
557 /*
Alan Coxe04dae82012-09-17 00:52:41 +0000558 * Fixme: BSD difference: In BSD all sockets connected to us get
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 * ECONNRESET and we die on the spot. In Linux we behave
560 * like files and pipes do and wait for the last
561 * dereference.
562 *
563 * Can't we simply set sock->err?
564 *
565 * What the above comment does talk about? --ANK(980817)
566 */
567
Pavel Emelyanov9305cfa2007-11-10 22:06:01 -0800568 if (unix_tot_inflight)
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +0900569 unix_gc(); /* Garbage collect fds */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570}
571
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000572static void init_peercred(struct sock *sk)
573{
574 put_pid(sk->sk_peer_pid);
575 if (sk->sk_peer_cred)
576 put_cred(sk->sk_peer_cred);
577 sk->sk_peer_pid = get_pid(task_tgid(current));
578 sk->sk_peer_cred = get_current_cred();
579}
580
581static void copy_peercred(struct sock *sk, struct sock *peersk)
582{
583 put_pid(sk->sk_peer_pid);
584 if (sk->sk_peer_cred)
585 put_cred(sk->sk_peer_cred);
586 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
587 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
588}
589
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590static int unix_listen(struct socket *sock, int backlog)
591{
592 int err;
593 struct sock *sk = sock->sk;
594 struct unix_sock *u = unix_sk(sk);
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000595 struct pid *old_pid = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
597 err = -EOPNOTSUPP;
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800598 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
599 goto out; /* Only stream/seqpacket sockets accept */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 err = -EINVAL;
601 if (!u->addr)
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800602 goto out; /* No listens on an unbound socket */
David S. Miller1c92b4e2007-05-31 13:24:26 -0700603 unix_state_lock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
605 goto out_unlock;
606 if (backlog > sk->sk_max_ack_backlog)
607 wake_up_interruptible_all(&u->peer_wait);
608 sk->sk_max_ack_backlog = backlog;
609 sk->sk_state = TCP_LISTEN;
610 /* set credentials so connect can copy them */
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000611 init_peercred(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 err = 0;
613
614out_unlock:
David S. Miller1c92b4e2007-05-31 13:24:26 -0700615 unix_state_unlock(sk);
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000616 put_pid(old_pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617out:
618 return err;
619}
620
621static int unix_release(struct socket *);
622static int unix_bind(struct socket *, struct sockaddr *, int);
623static int unix_stream_connect(struct socket *, struct sockaddr *,
624 int addr_len, int flags);
625static int unix_socketpair(struct socket *, struct socket *);
626static int unix_accept(struct socket *, struct socket *, int);
627static int unix_getname(struct socket *, struct sockaddr *, int *, int);
628static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
Rainer Weikusatec0d2152008-06-27 19:34:18 -0700629static unsigned int unix_dgram_poll(struct file *, struct socket *,
630 poll_table *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631static int unix_ioctl(struct socket *, unsigned int, unsigned long);
632static int unix_shutdown(struct socket *, int);
Ying Xue1b784142015-03-02 15:37:48 +0800633static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
634static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
635static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
636static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637static int unix_dgram_connect(struct socket *, struct sockaddr *,
638 int, int);
Ying Xue1b784142015-03-02 15:37:48 +0800639static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
640static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
641 int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642
Sasha Levin12663bf2013-12-07 17:26:27 -0500643static int unix_set_peek_off(struct sock *sk, int val)
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000644{
645 struct unix_sock *u = unix_sk(sk);
646
Sasha Levin12663bf2013-12-07 17:26:27 -0500647 if (mutex_lock_interruptible(&u->readlock))
648 return -EINTR;
649
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000650 sk->sk_peek_off = val;
651 mutex_unlock(&u->readlock);
Sasha Levin12663bf2013-12-07 17:26:27 -0500652
653 return 0;
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000654}
655
656
Eric Dumazet90ddc4f2005-12-22 12:49:22 -0800657static const struct proto_ops unix_stream_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 .family = PF_UNIX,
659 .owner = THIS_MODULE,
660 .release = unix_release,
661 .bind = unix_bind,
662 .connect = unix_stream_connect,
663 .socketpair = unix_socketpair,
664 .accept = unix_accept,
665 .getname = unix_getname,
666 .poll = unix_poll,
667 .ioctl = unix_ioctl,
668 .listen = unix_listen,
669 .shutdown = unix_shutdown,
670 .setsockopt = sock_no_setsockopt,
671 .getsockopt = sock_no_getsockopt,
672 .sendmsg = unix_stream_sendmsg,
673 .recvmsg = unix_stream_recvmsg,
674 .mmap = sock_no_mmap,
675 .sendpage = sock_no_sendpage,
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +0000676 .set_peek_off = unix_set_peek_off,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677};
678
Eric Dumazet90ddc4f2005-12-22 12:49:22 -0800679static const struct proto_ops unix_dgram_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 .family = PF_UNIX,
681 .owner = THIS_MODULE,
682 .release = unix_release,
683 .bind = unix_bind,
684 .connect = unix_dgram_connect,
685 .socketpair = unix_socketpair,
686 .accept = sock_no_accept,
687 .getname = unix_getname,
Rainer Weikusatec0d2152008-06-27 19:34:18 -0700688 .poll = unix_dgram_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 .ioctl = unix_ioctl,
690 .listen = sock_no_listen,
691 .shutdown = unix_shutdown,
692 .setsockopt = sock_no_setsockopt,
693 .getsockopt = sock_no_getsockopt,
694 .sendmsg = unix_dgram_sendmsg,
695 .recvmsg = unix_dgram_recvmsg,
696 .mmap = sock_no_mmap,
697 .sendpage = sock_no_sendpage,
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000698 .set_peek_off = unix_set_peek_off,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699};
700
Eric Dumazet90ddc4f2005-12-22 12:49:22 -0800701static const struct proto_ops unix_seqpacket_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 .family = PF_UNIX,
703 .owner = THIS_MODULE,
704 .release = unix_release,
705 .bind = unix_bind,
706 .connect = unix_stream_connect,
707 .socketpair = unix_socketpair,
708 .accept = unix_accept,
709 .getname = unix_getname,
Rainer Weikusatec0d2152008-06-27 19:34:18 -0700710 .poll = unix_dgram_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 .ioctl = unix_ioctl,
712 .listen = unix_listen,
713 .shutdown = unix_shutdown,
714 .setsockopt = sock_no_setsockopt,
715 .getsockopt = sock_no_getsockopt,
716 .sendmsg = unix_seqpacket_sendmsg,
Eric W. Biedermana05d2ad2011-04-24 01:54:57 +0000717 .recvmsg = unix_seqpacket_recvmsg,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 .mmap = sock_no_mmap,
719 .sendpage = sock_no_sendpage,
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000720 .set_peek_off = unix_set_peek_off,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721};
722
723static struct proto unix_proto = {
Eric Dumazet248969a2008-11-17 00:00:30 -0800724 .name = "UNIX",
725 .owner = THIS_MODULE,
Eric Dumazet248969a2008-11-17 00:00:30 -0800726 .obj_size = sizeof(struct unix_sock),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727};
728
Ingo Molnara09785a2006-07-03 00:25:12 -0700729/*
730 * AF_UNIX sockets do not interact with hardware, hence they
731 * dont trigger interrupts - so it's safe for them to have
732 * bh-unsafe locking for their sk_receive_queue.lock. Split off
733 * this special lock-class by reinitializing the spinlock key:
734 */
735static struct lock_class_key af_unix_sk_receive_queue_lock_key;
736
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800737static struct sock *unix_create1(struct net *net, struct socket *sock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738{
739 struct sock *sk = NULL;
740 struct unix_sock *u;
741
Eric Dumazet518de9b2010-10-26 14:22:44 -0700742 atomic_long_inc(&unix_nr_socks);
743 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 goto out;
745
Pavel Emelyanov6257ff22007-11-01 00:39:31 -0700746 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 if (!sk)
748 goto out;
749
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800750 sock_init_data(sock, sk);
Ingo Molnara09785a2006-07-03 00:25:12 -0700751 lockdep_set_class(&sk->sk_receive_queue.lock,
752 &af_unix_sk_receive_queue_lock_key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
754 sk->sk_write_space = unix_write_space;
Denis V. Luneva0a53c82007-12-11 04:19:17 -0800755 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 sk->sk_destruct = unix_sock_destructor;
757 u = unix_sk(sk);
Al Viro40ffe672012-03-14 21:54:32 -0400758 u->path.dentry = NULL;
759 u->path.mnt = NULL;
Benjamin LaHaisefd19f322006-01-03 14:10:46 -0800760 spin_lock_init(&u->lock);
Al Viro516e0cc2008-07-26 00:39:17 -0400761 atomic_long_set(&u->inflight, 0);
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700762 INIT_LIST_HEAD(&u->link);
Ingo Molnar57b47a52006-03-20 22:35:41 -0800763 mutex_init(&u->readlock); /* single task reading lock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 init_waitqueue_head(&u->peer_wait);
Rainer Weikusat5c77e262015-11-20 22:07:23 +0000765 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
Eric Dumazet7123aaa2012-06-08 05:03:21 +0000766 unix_insert_socket(unix_sockets_unbound(sk), sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767out:
Pavel Emelyanov284b3272007-11-10 22:08:30 -0800768 if (sk == NULL)
Eric Dumazet518de9b2010-10-26 14:22:44 -0700769 atomic_long_dec(&unix_nr_socks);
Eric Dumazet920de802008-11-24 00:09:29 -0800770 else {
771 local_bh_disable();
Eric Dumazeta8076d82008-11-17 02:38:49 -0800772 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
Eric Dumazet920de802008-11-24 00:09:29 -0800773 local_bh_enable();
774 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 return sk;
776}
777
Eric Paris3f378b62009-11-05 22:18:14 -0800778static int unix_create(struct net *net, struct socket *sock, int protocol,
779 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780{
781 if (protocol && protocol != PF_UNIX)
782 return -EPROTONOSUPPORT;
783
784 sock->state = SS_UNCONNECTED;
785
786 switch (sock->type) {
787 case SOCK_STREAM:
788 sock->ops = &unix_stream_ops;
789 break;
790 /*
791 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
792 * nothing uses it.
793 */
794 case SOCK_RAW:
Jianjun Konge27dfce2008-11-01 21:38:31 -0700795 sock->type = SOCK_DGRAM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 case SOCK_DGRAM:
797 sock->ops = &unix_dgram_ops;
798 break;
799 case SOCK_SEQPACKET:
800 sock->ops = &unix_seqpacket_ops;
801 break;
802 default:
803 return -ESOCKTNOSUPPORT;
804 }
805
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -0700806 return unix_create1(net, sock) ? 0 : -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807}
808
809static int unix_release(struct socket *sock)
810{
811 struct sock *sk = sock->sk;
812
813 if (!sk)
814 return 0;
815
Paul Mooreded34e02013-03-25 03:18:33 +0000816 unix_release_sock(sk, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 sock->sk = NULL;
818
Paul Mooreded34e02013-03-25 03:18:33 +0000819 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820}
821
822static int unix_autobind(struct socket *sock)
823{
824 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900825 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 struct unix_sock *u = unix_sk(sk);
827 static u32 ordernum = 1;
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800828 struct unix_address *addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 int err;
Tetsuo Handa8df73ff2010-09-04 01:34:28 +0000830 unsigned int retries = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831
Sasha Levin37ab4fa2013-12-13 10:54:22 -0500832 err = mutex_lock_interruptible(&u->readlock);
833 if (err)
834 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835
836 err = 0;
837 if (u->addr)
838 goto out;
839
840 err = -ENOMEM;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700841 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 if (!addr)
843 goto out;
844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 addr->name->sun_family = AF_UNIX;
846 atomic_set(&addr->refcnt, 1);
847
848retry:
849 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
Joe Perches07f07572008-11-19 15:44:53 -0800850 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
David S. Millerfbe9cc42005-12-13 23:26:29 -0800852 spin_lock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 ordernum = (ordernum+1)&0xFFFFF;
854
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800855 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 addr->hash)) {
David S. Millerfbe9cc42005-12-13 23:26:29 -0800857 spin_unlock(&unix_table_lock);
Tetsuo Handa8df73ff2010-09-04 01:34:28 +0000858 /*
859 * __unix_find_socket_byname() may take long time if many names
860 * are already in use.
861 */
862 cond_resched();
863 /* Give up if all names seems to be in use. */
864 if (retries++ == 0xFFFFF) {
865 err = -ENOSPC;
866 kfree(addr);
867 goto out;
868 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 goto retry;
870 }
871 addr->hash ^= sk->sk_type;
872
873 __unix_remove_socket(sk);
874 u->addr = addr;
875 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
David S. Millerfbe9cc42005-12-13 23:26:29 -0800876 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 err = 0;
878
Ingo Molnar57b47a52006-03-20 22:35:41 -0800879out: mutex_unlock(&u->readlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 return err;
881}
882
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800883static struct sock *unix_find_other(struct net *net,
884 struct sockaddr_un *sunname, int len,
Eric Dumazet95c96172012-04-15 05:58:06 +0000885 int type, unsigned int hash, int *error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886{
887 struct sock *u;
Al Viro421748e2008-08-02 01:04:36 -0400888 struct path path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 int err = 0;
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +0900890
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 if (sunname->sun_path[0]) {
Al Viro421748e2008-08-02 01:04:36 -0400892 struct inode *inode;
893 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 if (err)
895 goto fail;
David Howellsa25b3762015-03-17 22:26:21 +0000896 inode = d_backing_inode(path.dentry);
Al Viro421748e2008-08-02 01:04:36 -0400897 err = inode_permission(inode, MAY_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 if (err)
899 goto put_fail;
900
901 err = -ECONNREFUSED;
Al Viro421748e2008-08-02 01:04:36 -0400902 if (!S_ISSOCK(inode->i_mode))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 goto put_fail;
Eric W. Biederman6616f782010-06-13 03:35:48 +0000904 u = unix_find_socket_byinode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 if (!u)
906 goto put_fail;
907
908 if (u->sk_type == type)
Al Viro68ac1232012-03-15 08:21:57 -0400909 touch_atime(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910
Al Viro421748e2008-08-02 01:04:36 -0400911 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912
Jianjun Konge27dfce2008-11-01 21:38:31 -0700913 err = -EPROTOTYPE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 if (u->sk_type != type) {
915 sock_put(u);
916 goto fail;
917 }
918 } else {
919 err = -ECONNREFUSED;
Jianjun Konge27dfce2008-11-01 21:38:31 -0700920 u = unix_find_socket_byname(net, sunname, len, type, hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 if (u) {
922 struct dentry *dentry;
Al Viro40ffe672012-03-14 21:54:32 -0400923 dentry = unix_sk(u)->path.dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 if (dentry)
Al Viro68ac1232012-03-15 08:21:57 -0400925 touch_atime(&unix_sk(u)->path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 } else
927 goto fail;
928 }
929 return u;
930
931put_fail:
Al Viro421748e2008-08-02 01:04:36 -0400932 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933fail:
Jianjun Konge27dfce2008-11-01 21:38:31 -0700934 *error = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 return NULL;
936}
937
Al Virofaf02012012-07-20 02:37:29 +0400938static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
939{
940 struct dentry *dentry;
941 struct path path;
942 int err = 0;
943 /*
944 * Get the parent directory, calculate the hash for last
945 * component.
946 */
947 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
948 err = PTR_ERR(dentry);
949 if (IS_ERR(dentry))
950 return err;
951
952 /*
953 * All right, let's create it.
954 */
955 err = security_path_mknod(&path, dentry, mode, 0);
956 if (!err) {
David Howellsee8ac4d2015-03-06 14:05:26 +0000957 err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
Al Virofaf02012012-07-20 02:37:29 +0400958 if (!err) {
959 res->mnt = mntget(path.mnt);
960 res->dentry = dget(dentry);
961 }
962 }
963 done_path_create(&path, dentry);
964 return err;
965}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966
967static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
968{
969 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900970 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 struct unix_sock *u = unix_sk(sk);
Jianjun Konge27dfce2008-11-01 21:38:31 -0700972 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
Al Virodae6ad82011-06-26 11:50:15 -0400973 char *sun_path = sunaddr->sun_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 int err;
Eric Dumazet95c96172012-04-15 05:58:06 +0000975 unsigned int hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 struct unix_address *addr;
977 struct hlist_head *list;
978
979 err = -EINVAL;
980 if (sunaddr->sun_family != AF_UNIX)
981 goto out;
982
Jianjun Konge27dfce2008-11-01 21:38:31 -0700983 if (addr_len == sizeof(short)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 err = unix_autobind(sock);
985 goto out;
986 }
987
988 err = unix_mkname(sunaddr, addr_len, &hash);
989 if (err < 0)
990 goto out;
991 addr_len = err;
992
Sasha Levin37ab4fa2013-12-13 10:54:22 -0500993 err = mutex_lock_interruptible(&u->readlock);
994 if (err)
995 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
997 err = -EINVAL;
998 if (u->addr)
999 goto out_up;
1000
1001 err = -ENOMEM;
1002 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
1003 if (!addr)
1004 goto out_up;
1005
1006 memcpy(addr->name, sunaddr, addr_len);
1007 addr->len = addr_len;
1008 addr->hash = hash ^ sk->sk_type;
1009 atomic_set(&addr->refcnt, 1);
1010
Al Virodae6ad82011-06-26 11:50:15 -04001011 if (sun_path[0]) {
Al Virofaf02012012-07-20 02:37:29 +04001012 struct path path;
1013 umode_t mode = S_IFSOCK |
Al Viroce3b0f82009-03-29 19:08:22 -04001014 (SOCK_INODE(sock)->i_mode & ~current_umask());
Al Virofaf02012012-07-20 02:37:29 +04001015 err = unix_mknod(sun_path, mode, &path);
1016 if (err) {
1017 if (err == -EEXIST)
1018 err = -EADDRINUSE;
1019 unix_release_addr(addr);
1020 goto out_up;
1021 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 addr->hash = UNIX_HASH_SIZE;
David Howellsa25b3762015-03-17 22:26:21 +00001023 hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE-1);
Al Virofaf02012012-07-20 02:37:29 +04001024 spin_lock(&unix_table_lock);
1025 u->path = path;
1026 list = &unix_socket_table[hash];
1027 } else {
1028 spin_lock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 err = -EADDRINUSE;
Denis V. Lunev097e66c2007-11-19 22:29:30 -08001030 if (__unix_find_socket_byname(net, sunaddr, addr_len,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 sk->sk_type, hash)) {
1032 unix_release_addr(addr);
1033 goto out_unlock;
1034 }
1035
1036 list = &unix_socket_table[addr->hash];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 }
1038
1039 err = 0;
1040 __unix_remove_socket(sk);
1041 u->addr = addr;
1042 __unix_insert_socket(list, sk);
1043
1044out_unlock:
David S. Millerfbe9cc42005-12-13 23:26:29 -08001045 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046out_up:
Ingo Molnar57b47a52006-03-20 22:35:41 -08001047 mutex_unlock(&u->readlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048out:
1049 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050}
1051
David S. Miller278a3de2007-05-31 15:19:20 -07001052static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1053{
1054 if (unlikely(sk1 == sk2) || !sk2) {
1055 unix_state_lock(sk1);
1056 return;
1057 }
1058 if (sk1 < sk2) {
1059 unix_state_lock(sk1);
1060 unix_state_lock_nested(sk2);
1061 } else {
1062 unix_state_lock(sk2);
1063 unix_state_lock_nested(sk1);
1064 }
1065}
1066
1067static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1068{
1069 if (unlikely(sk1 == sk2) || !sk2) {
1070 unix_state_unlock(sk1);
1071 return;
1072 }
1073 unix_state_unlock(sk1);
1074 unix_state_unlock(sk2);
1075}
1076
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1078 int alen, int flags)
1079{
1080 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001081 struct net *net = sock_net(sk);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001082 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 struct sock *other;
Eric Dumazet95c96172012-04-15 05:58:06 +00001084 unsigned int hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 int err;
1086
1087 if (addr->sa_family != AF_UNSPEC) {
1088 err = unix_mkname(sunaddr, alen, &hash);
1089 if (err < 0)
1090 goto out;
1091 alen = err;
1092
1093 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
1094 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
1095 goto out;
1096
David S. Miller278a3de2007-05-31 15:19:20 -07001097restart:
Jianjun Konge27dfce2008-11-01 21:38:31 -07001098 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 if (!other)
1100 goto out;
1101
David S. Miller278a3de2007-05-31 15:19:20 -07001102 unix_state_double_lock(sk, other);
1103
1104 /* Apparently VFS overslept socket death. Retry. */
1105 if (sock_flag(other, SOCK_DEAD)) {
1106 unix_state_double_unlock(sk, other);
1107 sock_put(other);
1108 goto restart;
1109 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
1111 err = -EPERM;
1112 if (!unix_may_send(sk, other))
1113 goto out_unlock;
1114
1115 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1116 if (err)
1117 goto out_unlock;
1118
1119 } else {
1120 /*
1121 * 1003.1g breaking connected state with AF_UNSPEC
1122 */
1123 other = NULL;
David S. Miller278a3de2007-05-31 15:19:20 -07001124 unix_state_double_lock(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 }
1126
1127 /*
1128 * If it was connected, reconnect.
1129 */
1130 if (unix_peer(sk)) {
1131 struct sock *old_peer = unix_peer(sk);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001132 unix_peer(sk) = other;
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001133 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1134
David S. Miller278a3de2007-05-31 15:19:20 -07001135 unix_state_double_unlock(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136
1137 if (other != old_peer)
1138 unix_dgram_disconnected(sk, old_peer);
1139 sock_put(old_peer);
1140 } else {
Jianjun Konge27dfce2008-11-01 21:38:31 -07001141 unix_peer(sk) = other;
David S. Miller278a3de2007-05-31 15:19:20 -07001142 unix_state_double_unlock(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 }
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001144 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
1146out_unlock:
David S. Miller278a3de2007-05-31 15:19:20 -07001147 unix_state_double_unlock(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 sock_put(other);
1149out:
1150 return err;
1151}
1152
1153static long unix_wait_for_peer(struct sock *other, long timeo)
1154{
1155 struct unix_sock *u = unix_sk(other);
1156 int sched;
1157 DEFINE_WAIT(wait);
1158
1159 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1160
1161 sched = !sock_flag(other, SOCK_DEAD) &&
1162 !(other->sk_shutdown & RCV_SHUTDOWN) &&
Rainer Weikusat3c734192008-06-17 22:28:05 -07001163 unix_recvq_full(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
David S. Miller1c92b4e2007-05-31 13:24:26 -07001165 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166
1167 if (sched)
1168 timeo = schedule_timeout(timeo);
1169
1170 finish_wait(&u->peer_wait, &wait);
1171 return timeo;
1172}
1173
1174static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1175 int addr_len, int flags)
1176{
Jianjun Konge27dfce2008-11-01 21:38:31 -07001177 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001179 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1181 struct sock *newsk = NULL;
1182 struct sock *other = NULL;
1183 struct sk_buff *skb = NULL;
Eric Dumazet95c96172012-04-15 05:58:06 +00001184 unsigned int hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 int st;
1186 int err;
1187 long timeo;
1188
1189 err = unix_mkname(sunaddr, addr_len, &hash);
1190 if (err < 0)
1191 goto out;
1192 addr_len = err;
1193
Joe Perchesf64f9e72009-11-29 16:55:45 -08001194 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1195 (err = unix_autobind(sock)) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 goto out;
1197
1198 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1199
1200 /* First of all allocate resources.
1201 If we will make it after state is locked,
1202 we will have to recheck all again in any case.
1203 */
1204
1205 err = -ENOMEM;
1206
1207 /* create new sock for complete connection */
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001208 newsk = unix_create1(sock_net(sk), NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 if (newsk == NULL)
1210 goto out;
1211
1212 /* Allocate skb for sending to listening sock */
1213 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1214 if (skb == NULL)
1215 goto out;
1216
1217restart:
1218 /* Find listening sock. */
Denis V. Lunev097e66c2007-11-19 22:29:30 -08001219 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 if (!other)
1221 goto out;
1222
1223 /* Latch state of peer */
David S. Miller1c92b4e2007-05-31 13:24:26 -07001224 unix_state_lock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225
1226 /* Apparently VFS overslept socket death. Retry. */
1227 if (sock_flag(other, SOCK_DEAD)) {
David S. Miller1c92b4e2007-05-31 13:24:26 -07001228 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 sock_put(other);
1230 goto restart;
1231 }
1232
1233 err = -ECONNREFUSED;
1234 if (other->sk_state != TCP_LISTEN)
1235 goto out_unlock;
Tomoki Sekiyama77238f22009-10-18 23:17:37 -07001236 if (other->sk_shutdown & RCV_SHUTDOWN)
1237 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238
Rainer Weikusat3c734192008-06-17 22:28:05 -07001239 if (unix_recvq_full(other)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 err = -EAGAIN;
1241 if (!timeo)
1242 goto out_unlock;
1243
1244 timeo = unix_wait_for_peer(other, timeo);
1245
1246 err = sock_intr_errno(timeo);
1247 if (signal_pending(current))
1248 goto out;
1249 sock_put(other);
1250 goto restart;
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001251 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
1253 /* Latch our state.
1254
Daniel Balutae5537bf2011-03-14 15:25:33 -07001255 It is tricky place. We need to grab our state lock and cannot
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 drop lock on peer. It is dangerous because deadlock is
1257 possible. Connect to self case and simultaneous
1258 attempt to connect are eliminated by checking socket
1259 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1260 check this before attempt to grab lock.
1261
1262 Well, and we have to recheck the state after socket locked.
1263 */
1264 st = sk->sk_state;
1265
1266 switch (st) {
1267 case TCP_CLOSE:
1268 /* This is ok... continue with connect */
1269 break;
1270 case TCP_ESTABLISHED:
1271 /* Socket is already connected */
1272 err = -EISCONN;
1273 goto out_unlock;
1274 default:
1275 err = -EINVAL;
1276 goto out_unlock;
1277 }
1278
David S. Miller1c92b4e2007-05-31 13:24:26 -07001279 unix_state_lock_nested(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280
1281 if (sk->sk_state != st) {
David S. Miller1c92b4e2007-05-31 13:24:26 -07001282 unix_state_unlock(sk);
1283 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 sock_put(other);
1285 goto restart;
1286 }
1287
David S. Miller3610cda2011-01-05 15:38:53 -08001288 err = security_unix_stream_connect(sk, other, newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 if (err) {
David S. Miller1c92b4e2007-05-31 13:24:26 -07001290 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 goto out_unlock;
1292 }
1293
1294 /* The way is open! Fastly set all the necessary fields... */
1295
1296 sock_hold(sk);
1297 unix_peer(newsk) = sk;
1298 newsk->sk_state = TCP_ESTABLISHED;
1299 newsk->sk_type = sk->sk_type;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001300 init_peercred(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 newu = unix_sk(newsk);
Eric Dumazeteaefd112011-02-18 03:26:36 +00001302 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 otheru = unix_sk(other);
1304
1305 /* copy address information from listening to new sock*/
1306 if (otheru->addr) {
1307 atomic_inc(&otheru->addr->refcnt);
1308 newu->addr = otheru->addr;
1309 }
Al Viro40ffe672012-03-14 21:54:32 -04001310 if (otheru->path.dentry) {
1311 path_get(&otheru->path);
1312 newu->path = otheru->path;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 }
1314
1315 /* Set credentials */
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001316 copy_peercred(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 sock->state = SS_CONNECTED;
1319 sk->sk_state = TCP_ESTABLISHED;
Benjamin LaHaise830a1e52005-12-13 23:22:32 -08001320 sock_hold(newsk);
1321
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001322 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
Benjamin LaHaise830a1e52005-12-13 23:22:32 -08001323 unix_peer(sk) = newsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324
David S. Miller1c92b4e2007-05-31 13:24:26 -07001325 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326
1327 /* take ten and and send info to listening sock */
1328 spin_lock(&other->sk_receive_queue.lock);
1329 __skb_queue_tail(&other->sk_receive_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 spin_unlock(&other->sk_receive_queue.lock);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001331 unix_state_unlock(other);
David S. Miller676d2362014-04-11 16:15:36 -04001332 other->sk_data_ready(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 sock_put(other);
1334 return 0;
1335
1336out_unlock:
1337 if (other)
David S. Miller1c92b4e2007-05-31 13:24:26 -07001338 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339
1340out:
Wei Yongjun40d44442009-02-25 00:32:45 +00001341 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 if (newsk)
1343 unix_release_sock(newsk, 0);
1344 if (other)
1345 sock_put(other);
1346 return err;
1347}
1348
1349static int unix_socketpair(struct socket *socka, struct socket *sockb)
1350{
Jianjun Konge27dfce2008-11-01 21:38:31 -07001351 struct sock *ska = socka->sk, *skb = sockb->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352
1353 /* Join our sockets back to back */
1354 sock_hold(ska);
1355 sock_hold(skb);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001356 unix_peer(ska) = skb;
1357 unix_peer(skb) = ska;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001358 init_peercred(ska);
1359 init_peercred(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
1361 if (ska->sk_type != SOCK_DGRAM) {
1362 ska->sk_state = TCP_ESTABLISHED;
1363 skb->sk_state = TCP_ESTABLISHED;
1364 socka->state = SS_CONNECTED;
1365 sockb->state = SS_CONNECTED;
1366 }
1367 return 0;
1368}
1369
Daniel Borkmann90c6bd32013-10-17 22:51:31 +02001370static void unix_sock_inherit_flags(const struct socket *old,
1371 struct socket *new)
1372{
1373 if (test_bit(SOCK_PASSCRED, &old->flags))
1374 set_bit(SOCK_PASSCRED, &new->flags);
1375 if (test_bit(SOCK_PASSSEC, &old->flags))
1376 set_bit(SOCK_PASSSEC, &new->flags);
1377}
1378
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1380{
1381 struct sock *sk = sock->sk;
1382 struct sock *tsk;
1383 struct sk_buff *skb;
1384 int err;
1385
1386 err = -EOPNOTSUPP;
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001387 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 goto out;
1389
1390 err = -EINVAL;
1391 if (sk->sk_state != TCP_LISTEN)
1392 goto out;
1393
1394 /* If socket state is TCP_LISTEN it cannot change (for now...),
1395 * so that no locks are necessary.
1396 */
1397
1398 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1399 if (!skb) {
1400 /* This means receive shutdown. */
1401 if (err == 0)
1402 err = -EINVAL;
1403 goto out;
1404 }
1405
1406 tsk = skb->sk;
1407 skb_free_datagram(sk, skb);
1408 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1409
1410 /* attach accepted sock to socket */
David S. Miller1c92b4e2007-05-31 13:24:26 -07001411 unix_state_lock(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 newsock->state = SS_CONNECTED;
Daniel Borkmann90c6bd32013-10-17 22:51:31 +02001413 unix_sock_inherit_flags(sock, newsock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 sock_graft(tsk, newsock);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001415 unix_state_unlock(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 return 0;
1417
1418out:
1419 return err;
1420}
1421
1422
1423static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1424{
1425 struct sock *sk = sock->sk;
1426 struct unix_sock *u;
Cyrill Gorcunov13cfa972009-11-08 05:51:19 +00001427 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 int err = 0;
1429
1430 if (peer) {
1431 sk = unix_peer_get(sk);
1432
1433 err = -ENOTCONN;
1434 if (!sk)
1435 goto out;
1436 err = 0;
1437 } else {
1438 sock_hold(sk);
1439 }
1440
1441 u = unix_sk(sk);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001442 unix_state_lock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 if (!u->addr) {
1444 sunaddr->sun_family = AF_UNIX;
1445 sunaddr->sun_path[0] = 0;
1446 *uaddr_len = sizeof(short);
1447 } else {
1448 struct unix_address *addr = u->addr;
1449
1450 *uaddr_len = addr->len;
1451 memcpy(sunaddr, addr->name, *uaddr_len);
1452 }
David S. Miller1c92b4e2007-05-31 13:24:26 -07001453 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 sock_put(sk);
1455out:
1456 return err;
1457}
1458
1459static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1460{
1461 int i;
1462
1463 scm->fp = UNIXCB(skb).fp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 UNIXCB(skb).fp = NULL;
1465
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001466 for (i = scm->fp->count-1; i >= 0; i--)
Hannes Frederic Sowa797c0092016-02-03 02:11:03 +01001467 unix_notinflight(scm->fp->user, scm->fp->fp[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468}
1469
Eric W. Biederman7361c362010-06-13 03:34:33 +00001470static void unix_destruct_scm(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471{
1472 struct scm_cookie scm;
1473 memset(&scm, 0, sizeof(scm));
Eric W. Biederman7361c362010-06-13 03:34:33 +00001474 scm.pid = UNIXCB(skb).pid;
Eric W. Biederman7361c362010-06-13 03:34:33 +00001475 if (UNIXCB(skb).fp)
1476 unix_detach_fds(&scm, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477
1478 /* Alas, it calls VFS */
1479 /* So fscking what? fput() had been SMP-safe since the last Summer */
1480 scm_destroy(&scm);
1481 sock_wfree(skb);
1482}
1483
willy tarreaudc6b0ec2016-01-10 07:54:56 +01001484/*
1485 * The "user->unix_inflight" variable is protected by the garbage
1486 * collection lock, and we just read it locklessly here. If you go
1487 * over the limit, there might be a tiny race in actually noticing
1488 * it across threads. Tough.
1489 */
1490static inline bool too_many_unix_fds(struct task_struct *p)
1491{
1492 struct user_struct *user = current_user();
1493
1494 if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
1495 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
1496 return false;
1497}
1498
Eric Dumazet25888e32010-11-25 04:11:39 +00001499#define MAX_RECURSION_LEVEL 4
1500
Miklos Szeredi62093442008-11-09 15:23:57 +01001501static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502{
1503 int i;
Eric Dumazet25888e32010-11-25 04:11:39 +00001504 unsigned char max_level = 0;
1505 int unix_sock_count = 0;
1506
willy tarreaudc6b0ec2016-01-10 07:54:56 +01001507 if (too_many_unix_fds(current))
1508 return -ETOOMANYREFS;
1509
Eric Dumazet25888e32010-11-25 04:11:39 +00001510 for (i = scm->fp->count - 1; i >= 0; i--) {
1511 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1512
1513 if (sk) {
1514 unix_sock_count++;
1515 max_level = max(max_level,
1516 unix_sk(sk)->recursion_level);
1517 }
1518 }
1519 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1520 return -ETOOMANYREFS;
Miklos Szeredi62093442008-11-09 15:23:57 +01001521
1522 /*
1523 * Need to duplicate file references for the sake of garbage
1524 * collection. Otherwise a socket in the fps might become a
1525 * candidate for GC while the skb is not yet queued.
1526 */
1527 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1528 if (!UNIXCB(skb).fp)
1529 return -ENOMEM;
1530
willy tarreaudc6b0ec2016-01-10 07:54:56 +01001531 for (i = scm->fp->count - 1; i >= 0; i--)
Hannes Frederic Sowa797c0092016-02-03 02:11:03 +01001532 unix_inflight(scm->fp->user, scm->fp->fp[i]);
Eric Dumazet25888e32010-11-25 04:11:39 +00001533 return max_level;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534}
1535
David S. Millerf78a5fd2011-09-16 19:34:00 -04001536static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
Eric W. Biederman7361c362010-06-13 03:34:33 +00001537{
1538 int err = 0;
Eric Dumazet16e57262011-09-19 05:52:27 +00001539
David S. Millerf78a5fd2011-09-16 19:34:00 -04001540 UNIXCB(skb).pid = get_pid(scm->pid);
Eric W. Biederman6b0ee8c02013-04-03 17:28:16 +00001541 UNIXCB(skb).uid = scm->creds.uid;
1542 UNIXCB(skb).gid = scm->creds.gid;
Eric W. Biederman7361c362010-06-13 03:34:33 +00001543 UNIXCB(skb).fp = NULL;
1544 if (scm->fp && send_fds)
1545 err = unix_attach_fds(scm, skb);
1546
1547 skb->destructor = unix_destruct_scm;
1548 return err;
1549}
1550
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551/*
Eric Dumazet16e57262011-09-19 05:52:27 +00001552 * Some apps rely on write() giving SCM_CREDENTIALS
1553 * We include credentials if source or destination socket
1554 * asserted SOCK_PASSCRED.
1555 */
1556static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1557 const struct sock *other)
1558{
Eric W. Biederman6b0ee8c02013-04-03 17:28:16 +00001559 if (UNIXCB(skb).pid)
Eric Dumazet16e57262011-09-19 05:52:27 +00001560 return;
1561 if (test_bit(SOCK_PASSCRED, &sock->flags) ||
Eric W. Biederman25da0e32013-04-03 16:13:35 +00001562 !other->sk_socket ||
1563 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
Eric Dumazet16e57262011-09-19 05:52:27 +00001564 UNIXCB(skb).pid = get_pid(task_tgid(current));
David S. Miller6e0895c2013-04-22 20:32:51 -04001565 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
Eric Dumazet16e57262011-09-19 05:52:27 +00001566 }
1567}
1568
1569/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 * Send AF_UNIX data.
1571 */
1572
Ying Xue1b784142015-03-02 15:37:48 +08001573static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1574 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001577 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 struct unix_sock *u = unix_sk(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +01001579 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 struct sock *other = NULL;
1581 int namelen = 0; /* fake GCC */
1582 int err;
Eric Dumazet95c96172012-04-15 05:58:06 +00001583 unsigned int hash;
David S. Millerf78a5fd2011-09-16 19:34:00 -04001584 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 long timeo;
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001586 struct scm_cookie scm;
Eric Dumazet25888e32010-11-25 04:11:39 +00001587 int max_level;
Eric Dumazeteb6a2482012-04-03 05:28:28 +00001588 int data_len = 0;
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001589 int sk_locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590
dann frazier5f23b732008-11-26 15:32:27 -08001591 wait_for_unix_gc();
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001592 err = scm_send(sock, msg, &scm, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 if (err < 0)
1594 return err;
1595
1596 err = -EOPNOTSUPP;
1597 if (msg->msg_flags&MSG_OOB)
1598 goto out;
1599
1600 if (msg->msg_namelen) {
1601 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1602 if (err < 0)
1603 goto out;
1604 namelen = err;
1605 } else {
1606 sunaddr = NULL;
1607 err = -ENOTCONN;
1608 other = unix_peer_get(sk);
1609 if (!other)
1610 goto out;
1611 }
1612
Joe Perchesf64f9e72009-11-29 16:55:45 -08001613 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1614 && (err = unix_autobind(sock)) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615 goto out;
1616
1617 err = -EMSGSIZE;
1618 if (len > sk->sk_sndbuf - 32)
1619 goto out;
1620
Kirill Tkhai31ff6aa2014-05-15 19:56:28 +04001621 if (len > SKB_MAX_ALLOC) {
Eric Dumazeteb6a2482012-04-03 05:28:28 +00001622 data_len = min_t(size_t,
1623 len - SKB_MAX_ALLOC,
1624 MAX_SKB_FRAGS * PAGE_SIZE);
Kirill Tkhai31ff6aa2014-05-15 19:56:28 +04001625 data_len = PAGE_ALIGN(data_len);
1626
1627 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1628 }
Eric Dumazeteb6a2482012-04-03 05:28:28 +00001629
1630 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
Eric Dumazet28d64272013-08-08 14:38:47 -07001631 msg->msg_flags & MSG_DONTWAIT, &err,
1632 PAGE_ALLOC_COSTLY_ORDER);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001633 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 goto out;
1635
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001636 err = unix_scm_to_skb(&scm, skb, true);
Eric Dumazet25888e32010-11-25 04:11:39 +00001637 if (err < 0)
Eric W. Biederman7361c362010-06-13 03:34:33 +00001638 goto out_free;
Eric Dumazet25888e32010-11-25 04:11:39 +00001639 max_level = err + 1;
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001640 unix_get_secdata(&scm, skb);
Catherine Zhang877ce7c2006-06-29 12:27:47 -07001641
Eric Dumazeteb6a2482012-04-03 05:28:28 +00001642 skb_put(skb, len - data_len);
1643 skb->data_len = data_len;
1644 skb->len = len;
Al Viroc0371da2014-11-24 10:42:55 -05001645 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 if (err)
1647 goto out_free;
1648
1649 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1650
1651restart:
1652 if (!other) {
1653 err = -ECONNRESET;
1654 if (sunaddr == NULL)
1655 goto out_free;
1656
Denis V. Lunev097e66c2007-11-19 22:29:30 -08001657 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 hash, &err);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001659 if (other == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 goto out_free;
1661 }
1662
Alban Crequyd6ae3ba2011-01-18 06:39:15 +00001663 if (sk_filter(other, skb) < 0) {
1664 /* Toss the packet but do not return any error to the sender */
1665 err = len;
1666 goto out_free;
1667 }
1668
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001669 sk_locked = 0;
David S. Miller1c92b4e2007-05-31 13:24:26 -07001670 unix_state_lock(other);
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001671restart_locked:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 err = -EPERM;
1673 if (!unix_may_send(sk, other))
1674 goto out_unlock;
1675
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001676 if (unlikely(sock_flag(other, SOCK_DEAD))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 /*
1678 * Check with 1003.1g - what should
1679 * datagram error
1680 */
David S. Miller1c92b4e2007-05-31 13:24:26 -07001681 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 sock_put(other);
1683
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001684 if (!sk_locked)
1685 unix_state_lock(sk);
1686
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 if (unix_peer(sk) == other) {
Jianjun Konge27dfce2008-11-01 21:38:31 -07001689 unix_peer(sk) = NULL;
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001690 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1691
David S. Miller1c92b4e2007-05-31 13:24:26 -07001692 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693
1694 unix_dgram_disconnected(sk, other);
1695 sock_put(other);
1696 err = -ECONNREFUSED;
1697 } else {
David S. Miller1c92b4e2007-05-31 13:24:26 -07001698 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 }
1700
1701 other = NULL;
1702 if (err)
1703 goto out_free;
1704 goto restart;
1705 }
1706
1707 err = -EPIPE;
1708 if (other->sk_shutdown & RCV_SHUTDOWN)
1709 goto out_unlock;
1710
1711 if (sk->sk_type != SOCK_SEQPACKET) {
1712 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1713 if (err)
1714 goto out_unlock;
1715 }
1716
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001717 if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
1718 if (timeo) {
1719 timeo = unix_wait_for_peer(other, timeo);
1720
1721 err = sock_intr_errno(timeo);
1722 if (signal_pending(current))
1723 goto out_free;
1724
1725 goto restart;
1726 }
1727
1728 if (!sk_locked) {
1729 unix_state_unlock(other);
1730 unix_state_double_lock(sk, other);
1731 }
1732
1733 if (unix_peer(sk) != other ||
1734 unix_dgram_peer_wake_me(sk, other)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 err = -EAGAIN;
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001736 sk_locked = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 goto out_unlock;
1738 }
1739
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001740 if (!sk_locked) {
1741 sk_locked = 1;
1742 goto restart_locked;
1743 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 }
1745
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001746 if (unlikely(sk_locked))
1747 unix_state_unlock(sk);
1748
Alban Crequy3f661162010-10-04 08:48:28 +00001749 if (sock_flag(other, SOCK_RCVTSTAMP))
1750 __net_timestamp(skb);
Eric Dumazet16e57262011-09-19 05:52:27 +00001751 maybe_add_creds(skb, sock, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 skb_queue_tail(&other->sk_receive_queue, skb);
Eric Dumazet25888e32010-11-25 04:11:39 +00001753 if (max_level > unix_sk(other)->recursion_level)
1754 unix_sk(other)->recursion_level = max_level;
David S. Miller1c92b4e2007-05-31 13:24:26 -07001755 unix_state_unlock(other);
David S. Miller676d2362014-04-11 16:15:36 -04001756 other->sk_data_ready(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 sock_put(other);
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001758 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 return len;
1760
1761out_unlock:
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001762 if (sk_locked)
1763 unix_state_unlock(sk);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001764 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765out_free:
1766 kfree_skb(skb);
1767out:
1768 if (other)
1769 sock_put(other);
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001770 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 return err;
1772}
1773
Eric Dumazete370a722013-08-08 14:37:32 -07001774/* We use paged skbs for stream sockets, and limit occupancy to 32768
1775 * bytes, and a minimun of a full page.
1776 */
1777#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001778
Ying Xue1b784142015-03-02 15:37:48 +08001779static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1780 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 struct sock *sk = sock->sk;
1783 struct sock *other = NULL;
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001784 int err, size;
David S. Millerf78a5fd2011-09-16 19:34:00 -04001785 struct sk_buff *skb;
Jianjun Konge27dfce2008-11-01 21:38:31 -07001786 int sent = 0;
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001787 struct scm_cookie scm;
Miklos Szeredi8ba69ba2009-09-11 11:31:45 -07001788 bool fds_sent = false;
Eric Dumazet25888e32010-11-25 04:11:39 +00001789 int max_level;
Eric Dumazete370a722013-08-08 14:37:32 -07001790 int data_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791
dann frazier5f23b732008-11-26 15:32:27 -08001792 wait_for_unix_gc();
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001793 err = scm_send(sock, msg, &scm, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 if (err < 0)
1795 return err;
1796
1797 err = -EOPNOTSUPP;
1798 if (msg->msg_flags&MSG_OOB)
1799 goto out_err;
1800
1801 if (msg->msg_namelen) {
1802 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1803 goto out_err;
1804 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 err = -ENOTCONN;
Benjamin LaHaise830a1e52005-12-13 23:22:32 -08001806 other = unix_peer(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 if (!other)
1808 goto out_err;
1809 }
1810
1811 if (sk->sk_shutdown & SEND_SHUTDOWN)
1812 goto pipe_err;
1813
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001814 while (sent < len) {
Eric Dumazete370a722013-08-08 14:37:32 -07001815 size = len - sent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816
1817 /* Keep two messages in the pipe so it schedules better */
Eric Dumazete370a722013-08-08 14:37:32 -07001818 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819
Eric Dumazete370a722013-08-08 14:37:32 -07001820 /* allow fallback to order-0 allocations */
1821 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001822
Eric Dumazete370a722013-08-08 14:37:32 -07001823 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001824
Kirill Tkhai31ff6aa2014-05-15 19:56:28 +04001825 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
1826
Eric Dumazete370a722013-08-08 14:37:32 -07001827 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
Eric Dumazet28d64272013-08-08 14:38:47 -07001828 msg->msg_flags & MSG_DONTWAIT, &err,
1829 get_order(UNIX_SKB_FRAGS_SZ));
Eric Dumazete370a722013-08-08 14:37:32 -07001830 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 goto out_err;
1832
David S. Millerf78a5fd2011-09-16 19:34:00 -04001833 /* Only send the fds in the first buffer */
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001834 err = unix_scm_to_skb(&scm, skb, !fds_sent);
Eric Dumazet25888e32010-11-25 04:11:39 +00001835 if (err < 0) {
Eric W. Biederman7361c362010-06-13 03:34:33 +00001836 kfree_skb(skb);
David S. Millerf78a5fd2011-09-16 19:34:00 -04001837 goto out_err;
Miklos Szeredi62093442008-11-09 15:23:57 +01001838 }
Eric Dumazet25888e32010-11-25 04:11:39 +00001839 max_level = err + 1;
Eric W. Biederman7361c362010-06-13 03:34:33 +00001840 fds_sent = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841
Eric Dumazete370a722013-08-08 14:37:32 -07001842 skb_put(skb, size - data_len);
1843 skb->data_len = data_len;
1844 skb->len = size;
Al Viroc0371da2014-11-24 10:42:55 -05001845 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001846 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 kfree_skb(skb);
David S. Millerf78a5fd2011-09-16 19:34:00 -04001848 goto out_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 }
1850
David S. Miller1c92b4e2007-05-31 13:24:26 -07001851 unix_state_lock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852
1853 if (sock_flag(other, SOCK_DEAD) ||
1854 (other->sk_shutdown & RCV_SHUTDOWN))
1855 goto pipe_err_free;
1856
Eric Dumazet16e57262011-09-19 05:52:27 +00001857 maybe_add_creds(skb, sock, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858 skb_queue_tail(&other->sk_receive_queue, skb);
Eric Dumazet25888e32010-11-25 04:11:39 +00001859 if (max_level > unix_sk(other)->recursion_level)
1860 unix_sk(other)->recursion_level = max_level;
David S. Miller1c92b4e2007-05-31 13:24:26 -07001861 unix_state_unlock(other);
David S. Miller676d2362014-04-11 16:15:36 -04001862 other->sk_data_ready(other);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001863 sent += size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001866 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867
1868 return sent;
1869
1870pipe_err_free:
David S. Miller1c92b4e2007-05-31 13:24:26 -07001871 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 kfree_skb(skb);
1873pipe_err:
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001874 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1875 send_sig(SIGPIPE, current, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 err = -EPIPE;
1877out_err:
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001878 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 return sent ? : err;
1880}
1881
Ying Xue1b784142015-03-02 15:37:48 +08001882static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
1883 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884{
1885 int err;
1886 struct sock *sk = sock->sk;
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001887
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 err = sock_error(sk);
1889 if (err)
1890 return err;
1891
1892 if (sk->sk_state != TCP_ESTABLISHED)
1893 return -ENOTCONN;
1894
1895 if (msg->msg_namelen)
1896 msg->msg_namelen = 0;
1897
Ying Xue1b784142015-03-02 15:37:48 +08001898 return unix_dgram_sendmsg(sock, msg, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899}
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001900
Ying Xue1b784142015-03-02 15:37:48 +08001901static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
1902 size_t size, int flags)
Eric W. Biedermana05d2ad2011-04-24 01:54:57 +00001903{
1904 struct sock *sk = sock->sk;
1905
1906 if (sk->sk_state != TCP_ESTABLISHED)
1907 return -ENOTCONN;
1908
Ying Xue1b784142015-03-02 15:37:48 +08001909 return unix_dgram_recvmsg(sock, msg, size, flags);
Eric W. Biedermana05d2ad2011-04-24 01:54:57 +00001910}
1911
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1913{
1914 struct unix_sock *u = unix_sk(sk);
1915
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 if (u->addr) {
1917 msg->msg_namelen = u->addr->len;
1918 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1919 }
1920}
1921
Ying Xue1b784142015-03-02 15:37:48 +08001922static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
1923 size_t size, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924{
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001925 struct scm_cookie scm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 struct sock *sk = sock->sk;
1927 struct unix_sock *u = unix_sk(sk);
1928 int noblock = flags & MSG_DONTWAIT;
1929 struct sk_buff *skb;
1930 int err;
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00001931 int peeked, skip;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932
1933 err = -EOPNOTSUPP;
1934 if (flags&MSG_OOB)
1935 goto out;
1936
Rainer Weikusatb3ca9b02011-02-28 04:50:55 +00001937 err = mutex_lock_interruptible(&u->readlock);
Eric Dumazetde144392014-03-25 18:42:27 -07001938 if (unlikely(err)) {
1939 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
1940 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
1941 */
1942 err = noblock ? -EAGAIN : -ERESTARTSYS;
Rainer Weikusatb3ca9b02011-02-28 04:50:55 +00001943 goto out;
1944 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00001946 skip = sk_peek_offset(sk, flags);
1947
1948 skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
Florian Zumbiehl0a112252007-11-29 23:19:23 +11001949 if (!skb) {
1950 unix_state_lock(sk);
1951 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1952 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1953 (sk->sk_shutdown & RCV_SHUTDOWN))
1954 err = 0;
1955 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 goto out_unlock;
Florian Zumbiehl0a112252007-11-29 23:19:23 +11001957 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958
Eric Dumazet67426b72010-10-29 20:44:44 +00001959 wake_up_interruptible_sync_poll(&u->peer_wait,
1960 POLLOUT | POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961
1962 if (msg->msg_name)
1963 unix_copy_addr(msg, skb->sk);
1964
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00001965 if (size > skb->len - skip)
1966 size = skb->len - skip;
1967 else if (size < skb->len - skip)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 msg->msg_flags |= MSG_TRUNC;
1969
David S. Miller51f3d022014-11-05 16:46:40 -05001970 err = skb_copy_datagram_msg(skb, skip, msg, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 if (err)
1972 goto out_free;
1973
Alban Crequy3f661162010-10-04 08:48:28 +00001974 if (sock_flag(sk, SOCK_RCVTSTAMP))
1975 __sock_recv_timestamp(msg, sk, skb);
1976
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001977 memset(&scm, 0, sizeof(scm));
1978
1979 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
1980 unix_set_secdata(&scm, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001982 if (!(flags & MSG_PEEK)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 if (UNIXCB(skb).fp)
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001984 unix_detach_fds(&scm, skb);
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00001985
1986 sk_peek_offset_bwd(sk, skb->len);
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001987 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988 /* It is questionable: on PEEK we could:
1989 - do not return fds - good, but too simple 8)
1990 - return fds, and do not return them on read (old strategy,
1991 apparently wrong)
1992 - clone fds (I chose it for now, it is the most universal
1993 solution)
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001994
1995 POSIX 1003.1g does not actually define this clearly
1996 at all. POSIX 1003.1g doesn't define a lot of things
1997 clearly however!
1998
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 */
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00002000
2001 sk_peek_offset_fwd(sk, size);
2002
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 if (UNIXCB(skb).fp)
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002004 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 }
Eric Dumazet9f6f9af2012-02-21 23:24:55 +00002006 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002008 scm_recv(sock, msg, &scm, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009
2010out_free:
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002011 skb_free_datagram(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012out_unlock:
Ingo Molnar57b47a52006-03-20 22:35:41 -08002013 mutex_unlock(&u->readlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014out:
2015 return err;
2016}
2017
2018/*
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002019 * Sleep until more data has arrived. But check for races..
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 */
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002021static long unix_stream_data_wait(struct sock *sk, long timeo,
2022 struct sk_buff *last)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023{
2024 DEFINE_WAIT(wait);
2025
David S. Miller1c92b4e2007-05-31 13:24:26 -07002026 unix_state_lock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027
2028 for (;;) {
Eric Dumazetaa395142010-04-20 13:03:51 +00002029 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002031 if (skb_peek_tail(&sk->sk_receive_queue) != last ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 sk->sk_err ||
2033 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2034 signal_pending(current) ||
2035 !timeo)
2036 break;
2037
2038 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
David S. Miller1c92b4e2007-05-31 13:24:26 -07002039 unix_state_unlock(sk);
Colin Cross2b15af62013-05-06 23:50:21 +00002040 timeo = freezable_schedule_timeout(timeo);
David S. Miller1c92b4e2007-05-31 13:24:26 -07002041 unix_state_lock(sk);
Mark Salyzynb48732e2015-05-26 08:22:19 -07002042
2043 if (sock_flag(sk, SOCK_DEAD))
2044 break;
2045
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2047 }
2048
Eric Dumazetaa395142010-04-20 13:03:51 +00002049 finish_wait(sk_sleep(sk), &wait);
David S. Miller1c92b4e2007-05-31 13:24:26 -07002050 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 return timeo;
2052}
2053
Eric Dumazete370a722013-08-08 14:37:32 -07002054static unsigned int unix_skb_len(const struct sk_buff *skb)
2055{
2056 return skb->len - UNIXCB(skb).consumed;
2057}
2058
Ying Xue1b784142015-03-02 15:37:48 +08002059static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2060 size_t size, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061{
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002062 struct scm_cookie scm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 struct sock *sk = sock->sk;
2064 struct unix_sock *u = unix_sk(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +01002065 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 int copied = 0;
Eric Dumazetde144392014-03-25 18:42:27 -07002067 int noblock = flags & MSG_DONTWAIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 int check_creds = 0;
2069 int target;
2070 int err = 0;
2071 long timeo;
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002072 int skip;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073
2074 err = -EINVAL;
2075 if (sk->sk_state != TCP_ESTABLISHED)
2076 goto out;
2077
2078 err = -EOPNOTSUPP;
2079 if (flags&MSG_OOB)
2080 goto out;
2081
2082 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
Eric Dumazetde144392014-03-25 18:42:27 -07002083 timeo = sock_rcvtimeo(sk, noblock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 /* Lock the socket to prevent queue disordering
2086 * while sleeps in memcpy_tomsg
2087 */
2088
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002089 memset(&scm, 0, sizeof(scm));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090
Rainer Weikusatcc01a0a2015-12-16 20:09:25 +00002091 mutex_lock(&u->readlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092
Andrey Vagine09e8892015-10-02 00:05:36 +03002093 if (flags & MSG_PEEK)
2094 skip = sk_peek_offset(sk, flags);
2095 else
2096 skip = 0;
2097
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002098 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 int chunk;
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002100 struct sk_buff *skb, *last;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002102 unix_state_lock(sk);
Mark Salyzynb48732e2015-05-26 08:22:19 -07002103 if (sock_flag(sk, SOCK_DEAD)) {
2104 err = -ECONNRESET;
2105 goto unlock;
2106 }
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002107 last = skb = skb_peek(&sk->sk_receive_queue);
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002108again:
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002109 if (skb == NULL) {
Eric Dumazet25888e32010-11-25 04:11:39 +00002110 unix_sk(sk)->recursion_level = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 if (copied >= target)
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002112 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
2114 /*
2115 * POSIX 1003.1g mandates this order.
2116 */
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09002117
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002118 err = sock_error(sk);
2119 if (err)
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002120 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 if (sk->sk_shutdown & RCV_SHUTDOWN)
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002122 goto unlock;
2123
2124 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 err = -EAGAIN;
2126 if (!timeo)
2127 break;
Ingo Molnar57b47a52006-03-20 22:35:41 -08002128 mutex_unlock(&u->readlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002130 timeo = unix_stream_data_wait(sk, timeo, last);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131
Rainer Weikusatcc01a0a2015-12-16 20:09:25 +00002132 if (signal_pending(current)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 err = sock_intr_errno(timeo);
Eric Dumazet8d988532016-01-24 13:53:50 -08002134 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 goto out;
2136 }
Rainer Weikusatb3ca9b02011-02-28 04:50:55 +00002137
Rainer Weikusatcc01a0a2015-12-16 20:09:25 +00002138 mutex_lock(&u->readlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 continue;
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002140 unlock:
2141 unix_state_unlock(sk);
2142 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 }
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002144
Eric Dumazete370a722013-08-08 14:37:32 -07002145 while (skip >= unix_skb_len(skb)) {
2146 skip -= unix_skb_len(skb);
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002147 last = skb;
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002148 skb = skb_peek_next(skb, &sk->sk_receive_queue);
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002149 if (!skb)
2150 goto again;
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002151 }
2152
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002153 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154
2155 if (check_creds) {
2156 /* Never glue messages from different writers */
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002157 if ((UNIXCB(skb).pid != scm.pid) ||
2158 !uid_eq(UNIXCB(skb).uid, scm.creds.uid) ||
2159 !gid_eq(UNIXCB(skb).gid, scm.creds.gid))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 break;
Eric W. Biederman0e82e7f6d2013-04-03 16:14:47 +00002161 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 /* Copy credentials */
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002163 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 check_creds = 1;
2165 }
2166
2167 /* Copy address just once */
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002168 if (sunaddr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 unix_copy_addr(msg, skb->sk);
2170 sunaddr = NULL;
2171 }
2172
Eric Dumazete370a722013-08-08 14:37:32 -07002173 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
David S. Miller51f3d022014-11-05 16:46:40 -05002174 if (skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2175 msg, chunk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 if (copied == 0)
2177 copied = -EFAULT;
2178 break;
2179 }
2180 copied += chunk;
2181 size -= chunk;
2182
2183 /* Mark read part of skb as used */
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002184 if (!(flags & MSG_PEEK)) {
Eric Dumazete370a722013-08-08 14:37:32 -07002185 UNIXCB(skb).consumed += chunk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002187 sk_peek_offset_bwd(sk, chunk);
2188
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 if (UNIXCB(skb).fp)
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002190 unix_detach_fds(&scm, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191
Eric Dumazete370a722013-08-08 14:37:32 -07002192 if (unix_skb_len(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194
Eric Dumazet6f01fd62012-01-28 16:11:03 +00002195 skb_unlink(skb, &sk->sk_receive_queue);
Neil Horman70d4bf62010-07-20 06:45:56 +00002196 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002198 if (scm.fp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 break;
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002200 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 /* It is questionable, see note in unix_dgram_recvmsg.
2202 */
2203 if (UNIXCB(skb).fp)
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002204 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
Andrey Vagine09e8892015-10-02 00:05:36 +03002206 sk_peek_offset_fwd(sk, chunk);
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002207
Aaron Conole9bf31c52015-09-26 18:50:43 -04002208 if (UNIXCB(skb).fp)
2209 break;
2210
Andrey Vagine09e8892015-10-02 00:05:36 +03002211 skip = 0;
Aaron Conole9bf31c52015-09-26 18:50:43 -04002212 last = skb;
2213 unix_state_lock(sk);
2214 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2215 if (skb)
2216 goto again;
2217 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 break;
2219 }
2220 } while (size);
2221
Ingo Molnar57b47a52006-03-20 22:35:41 -08002222 mutex_unlock(&u->readlock);
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002223 scm_recv(sock, msg, &scm, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224out:
2225 return copied ? : err;
2226}
2227
2228static int unix_shutdown(struct socket *sock, int mode)
2229{
2230 struct sock *sk = sock->sk;
2231 struct sock *other;
2232
Xi Wangfc61b922012-08-26 16:47:13 +00002233 if (mode < SHUT_RD || mode > SHUT_RDWR)
2234 return -EINVAL;
2235 /* This maps:
2236 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2237 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2238 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2239 */
2240 ++mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241
Alban Crequy7180a032011-01-19 04:56:36 +00002242 unix_state_lock(sk);
2243 sk->sk_shutdown |= mode;
2244 other = unix_peer(sk);
2245 if (other)
2246 sock_hold(other);
2247 unix_state_unlock(sk);
2248 sk->sk_state_change(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249
Alban Crequy7180a032011-01-19 04:56:36 +00002250 if (other &&
2251 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252
Alban Crequy7180a032011-01-19 04:56:36 +00002253 int peer_mode = 0;
2254
2255 if (mode&RCV_SHUTDOWN)
2256 peer_mode |= SEND_SHUTDOWN;
2257 if (mode&SEND_SHUTDOWN)
2258 peer_mode |= RCV_SHUTDOWN;
2259 unix_state_lock(other);
2260 other->sk_shutdown |= peer_mode;
2261 unix_state_unlock(other);
2262 other->sk_state_change(other);
2263 if (peer_mode == SHUTDOWN_MASK)
2264 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2265 else if (peer_mode & RCV_SHUTDOWN)
2266 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 }
Alban Crequy7180a032011-01-19 04:56:36 +00002268 if (other)
2269 sock_put(other);
2270
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 return 0;
2272}
2273
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002274long unix_inq_len(struct sock *sk)
2275{
2276 struct sk_buff *skb;
2277 long amount = 0;
2278
2279 if (sk->sk_state == TCP_LISTEN)
2280 return -EINVAL;
2281
2282 spin_lock(&sk->sk_receive_queue.lock);
2283 if (sk->sk_type == SOCK_STREAM ||
2284 sk->sk_type == SOCK_SEQPACKET) {
2285 skb_queue_walk(&sk->sk_receive_queue, skb)
Eric Dumazete370a722013-08-08 14:37:32 -07002286 amount += unix_skb_len(skb);
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002287 } else {
2288 skb = skb_peek(&sk->sk_receive_queue);
2289 if (skb)
2290 amount = skb->len;
2291 }
2292 spin_unlock(&sk->sk_receive_queue.lock);
2293
2294 return amount;
2295}
2296EXPORT_SYMBOL_GPL(unix_inq_len);
2297
2298long unix_outq_len(struct sock *sk)
2299{
2300 return sk_wmem_alloc_get(sk);
2301}
2302EXPORT_SYMBOL_GPL(unix_outq_len);
2303
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2305{
2306 struct sock *sk = sock->sk;
Jianjun Konge27dfce2008-11-01 21:38:31 -07002307 long amount = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 int err;
2309
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002310 switch (cmd) {
2311 case SIOCOUTQ:
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002312 amount = unix_outq_len(sk);
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002313 err = put_user(amount, (int __user *)arg);
2314 break;
2315 case SIOCINQ:
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002316 amount = unix_inq_len(sk);
2317 if (amount < 0)
2318 err = amount;
2319 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 err = put_user(amount, (int __user *)arg);
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002321 break;
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002322 default:
2323 err = -ENOIOCTLCMD;
2324 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 }
2326 return err;
2327}
2328
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002329static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330{
2331 struct sock *sk = sock->sk;
2332 unsigned int mask;
2333
Eric Dumazetaa395142010-04-20 13:03:51 +00002334 sock_poll_wait(file, sk_sleep(sk), wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 mask = 0;
2336
2337 /* exceptional events? */
2338 if (sk->sk_err)
2339 mask |= POLLERR;
2340 if (sk->sk_shutdown == SHUTDOWN_MASK)
2341 mask |= POLLHUP;
Davide Libenzif348d702006-03-25 03:07:39 -08002342 if (sk->sk_shutdown & RCV_SHUTDOWN)
Eric Dumazetdb409802010-09-06 11:13:50 +00002343 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344
2345 /* readable? */
Eric Dumazetdb409802010-09-06 11:13:50 +00002346 if (!skb_queue_empty(&sk->sk_receive_queue))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 mask |= POLLIN | POLLRDNORM;
2348
2349 /* Connection-based need to check for termination and startup */
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002350 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2351 sk->sk_state == TCP_CLOSE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 mask |= POLLHUP;
2353
2354 /*
2355 * we set writable also when the other side has shut down the
2356 * connection. This prevents stuck sockets.
2357 */
2358 if (unix_writable(sk))
2359 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2360
2361 return mask;
2362}
2363
Rainer Weikusatec0d2152008-06-27 19:34:18 -07002364static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2365 poll_table *wait)
Rainer Weikusat3c734192008-06-17 22:28:05 -07002366{
Rainer Weikusatec0d2152008-06-27 19:34:18 -07002367 struct sock *sk = sock->sk, *other;
2368 unsigned int mask, writable;
Rainer Weikusat3c734192008-06-17 22:28:05 -07002369
Eric Dumazetaa395142010-04-20 13:03:51 +00002370 sock_poll_wait(file, sk_sleep(sk), wait);
Rainer Weikusat3c734192008-06-17 22:28:05 -07002371 mask = 0;
2372
2373 /* exceptional events? */
2374 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +00002375 mask |= POLLERR |
Jacob Keller8facd5f2013-04-02 13:55:40 -07002376 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +00002377
Rainer Weikusat3c734192008-06-17 22:28:05 -07002378 if (sk->sk_shutdown & RCV_SHUTDOWN)
Eric Dumazet5456f092010-10-31 05:36:23 +00002379 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
Rainer Weikusat3c734192008-06-17 22:28:05 -07002380 if (sk->sk_shutdown == SHUTDOWN_MASK)
2381 mask |= POLLHUP;
2382
2383 /* readable? */
Eric Dumazet5456f092010-10-31 05:36:23 +00002384 if (!skb_queue_empty(&sk->sk_receive_queue))
Rainer Weikusat3c734192008-06-17 22:28:05 -07002385 mask |= POLLIN | POLLRDNORM;
2386
2387 /* Connection-based need to check for termination and startup */
2388 if (sk->sk_type == SOCK_SEQPACKET) {
2389 if (sk->sk_state == TCP_CLOSE)
2390 mask |= POLLHUP;
2391 /* connection hasn't started yet? */
2392 if (sk->sk_state == TCP_SYN_SENT)
2393 return mask;
2394 }
2395
Eric Dumazet973a34a2010-10-31 05:38:25 +00002396 /* No write status requested, avoid expensive OUT tests. */
Hans Verkuil626cf232012-03-23 15:02:27 -07002397 if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
Eric Dumazet973a34a2010-10-31 05:38:25 +00002398 return mask;
2399
Rainer Weikusatec0d2152008-06-27 19:34:18 -07002400 writable = unix_writable(sk);
Rainer Weikusat5c77e262015-11-20 22:07:23 +00002401 if (writable) {
2402 unix_state_lock(sk);
2403
2404 other = unix_peer(sk);
2405 if (other && unix_peer(other) != sk &&
2406 unix_recvq_full(other) &&
2407 unix_dgram_peer_wake_me(sk, other))
2408 writable = 0;
2409
2410 unix_state_unlock(sk);
Rainer Weikusatec0d2152008-06-27 19:34:18 -07002411 }
2412
2413 if (writable)
Rainer Weikusat3c734192008-06-17 22:28:05 -07002414 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2415 else
2416 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2417
Rainer Weikusat3c734192008-06-17 22:28:05 -07002418 return mask;
2419}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420
2421#ifdef CONFIG_PROC_FS
Pavel Emelyanova53eb3f2007-11-23 20:30:01 +08002422
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002423#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2424
2425#define get_bucket(x) ((x) >> BUCKET_SPACE)
2426#define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2427#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
Pavel Emelyanova53eb3f2007-11-23 20:30:01 +08002428
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002429static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430{
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002431 unsigned long offset = get_offset(*pos);
2432 unsigned long bucket = get_bucket(*pos);
2433 struct sock *sk;
2434 unsigned long count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002436 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2437 if (sock_net(sk) != seq_file_net(seq))
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002438 continue;
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002439 if (++count == offset)
2440 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 }
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002442
2443 return sk;
2444}
2445
2446static struct sock *unix_next_socket(struct seq_file *seq,
2447 struct sock *sk,
2448 loff_t *pos)
2449{
2450 unsigned long bucket;
2451
2452 while (sk > (struct sock *)SEQ_START_TOKEN) {
2453 sk = sk_next(sk);
2454 if (!sk)
2455 goto next_bucket;
2456 if (sock_net(sk) == seq_file_net(seq))
2457 return sk;
2458 }
2459
2460 do {
2461 sk = unix_from_bucket(seq, pos);
2462 if (sk)
2463 return sk;
2464
2465next_bucket:
2466 bucket = get_bucket(*pos) + 1;
2467 *pos = set_bucket_offset(bucket, 1);
2468 } while (bucket < ARRAY_SIZE(unix_socket_table));
2469
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 return NULL;
2471}
2472
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002474 __acquires(unix_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475{
David S. Millerfbe9cc42005-12-13 23:26:29 -08002476 spin_lock(&unix_table_lock);
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002477
2478 if (!*pos)
2479 return SEQ_START_TOKEN;
2480
2481 if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2482 return NULL;
2483
2484 return unix_next_socket(seq, NULL, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485}
2486
2487static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2488{
2489 ++*pos;
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002490 return unix_next_socket(seq, v, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491}
2492
2493static void unix_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002494 __releases(unix_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495{
David S. Millerfbe9cc42005-12-13 23:26:29 -08002496 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497}
2498
2499static int unix_seq_show(struct seq_file *seq, void *v)
2500{
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09002501
Joe Perchesb9f31242008-04-12 19:04:38 -07002502 if (v == SEQ_START_TOKEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2504 "Inode Path\n");
2505 else {
2506 struct sock *s = v;
2507 struct unix_sock *u = unix_sk(s);
David S. Miller1c92b4e2007-05-31 13:24:26 -07002508 unix_state_lock(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509
Dan Rosenberg71338aa2011-05-23 12:17:35 +00002510 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 s,
2512 atomic_read(&s->sk_refcnt),
2513 0,
2514 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2515 s->sk_type,
2516 s->sk_socket ?
2517 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2518 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2519 sock_i_ino(s));
2520
2521 if (u->addr) {
2522 int i, len;
2523 seq_putc(seq, ' ');
2524
2525 i = 0;
2526 len = u->addr->len - sizeof(short);
2527 if (!UNIX_ABSTRACT(s))
2528 len--;
2529 else {
2530 seq_putc(seq, '@');
2531 i++;
2532 }
2533 for ( ; i < len; i++)
2534 seq_putc(seq, u->addr->name->sun_path[i]);
2535 }
David S. Miller1c92b4e2007-05-31 13:24:26 -07002536 unix_state_unlock(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 seq_putc(seq, '\n');
2538 }
2539
2540 return 0;
2541}
2542
Philippe De Muyter56b3d972007-07-10 23:07:31 -07002543static const struct seq_operations unix_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 .start = unix_seq_start,
2545 .next = unix_seq_next,
2546 .stop = unix_seq_stop,
2547 .show = unix_seq_show,
2548};
2549
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550static int unix_seq_open(struct inode *inode, struct file *file)
2551{
Denis V. Luneve372c412007-11-19 22:31:54 -08002552 return seq_open_net(inode, file, &unix_seq_ops,
Eric Dumazet8b51b062012-06-08 22:10:20 +00002553 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554}
2555
Arjan van de Venda7071d2007-02-12 00:55:36 -08002556static const struct file_operations unix_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557 .owner = THIS_MODULE,
2558 .open = unix_seq_open,
2559 .read = seq_read,
2560 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08002561 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562};
2563
2564#endif
2565
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00002566static const struct net_proto_family unix_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567 .family = PF_UNIX,
2568 .create = unix_create,
2569 .owner = THIS_MODULE,
2570};
2571
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002572
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002573static int __net_init unix_net_init(struct net *net)
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002574{
2575 int error = -ENOMEM;
2576
Denis V. Luneva0a53c82007-12-11 04:19:17 -08002577 net->unx.sysctl_max_dgram_qlen = 10;
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +11002578 if (unix_sysctl_register(net))
2579 goto out;
Pavel Emelyanovd392e492007-12-01 23:44:15 +11002580
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002581#ifdef CONFIG_PROC_FS
Gao fengd4beaa62013-02-18 01:34:54 +00002582 if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +11002583 unix_sysctl_unregister(net);
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002584 goto out;
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +11002585 }
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002586#endif
2587 error = 0;
2588out:
Jianjun Kong48dcc33e2008-11-01 21:37:27 -07002589 return error;
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002590}
2591
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002592static void __net_exit unix_net_exit(struct net *net)
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002593{
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +11002594 unix_sysctl_unregister(net);
Gao fengece31ff2013-02-18 01:34:56 +00002595 remove_proc_entry("unix", net->proc_net);
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002596}
2597
2598static struct pernet_operations unix_net_ops = {
2599 .init = unix_net_init,
2600 .exit = unix_net_exit,
2601};
2602
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603static int __init af_unix_init(void)
2604{
2605 int rc = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606
YOSHIFUJI Hideaki / 吉藤英明b4fff5f2013-01-09 07:20:07 +00002607 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608
2609 rc = proto_register(&unix_proto, 1);
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09002610 if (rc != 0) {
wangweidong5cc208b2013-12-06 18:03:36 +08002611 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612 goto out;
2613 }
2614
2615 sock_register(&unix_family_ops);
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002616 register_pernet_subsys(&unix_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617out:
2618 return rc;
2619}
2620
2621static void __exit af_unix_exit(void)
2622{
2623 sock_unregister(PF_UNIX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624 proto_unregister(&unix_proto);
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002625 unregister_pernet_subsys(&unix_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626}
2627
David Woodhouse3d366962008-04-24 00:59:25 -07002628/* Earlier than device_initcall() so that other drivers invoking
2629 request_module() don't end up in a loop when modprobe tries
2630 to use a UNIX socket. But later than subsys_initcall() because
2631 we depend on stuff initialised there */
2632fs_initcall(af_unix_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633module_exit(af_unix_exit);
2634
2635MODULE_LICENSE("GPL");
2636MODULE_ALIAS_NETPROTO(PF_UNIX);