blob: 03da879008d71bd75f50251bd813ecb36f3d321f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET4: Implementation of BSD Unix domain sockets.
3 *
Alan Cox113aa832008-10-13 19:01:08 -07004 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
73 *
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
81 */
82
wangweidong5cc208b2013-12-06 18:03:36 +080083#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
84
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070086#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070087#include <linux/signal.h>
88#include <linux/sched.h>
89#include <linux/errno.h>
90#include <linux/string.h>
91#include <linux/stat.h>
92#include <linux/dcache.h>
93#include <linux/namei.h>
94#include <linux/socket.h>
95#include <linux/un.h>
96#include <linux/fcntl.h>
97#include <linux/termios.h>
98#include <linux/sockios.h>
99#include <linux/net.h>
100#include <linux/in.h>
101#include <linux/fs.h>
102#include <linux/slab.h>
103#include <asm/uaccess.h>
104#include <linux/skbuff.h>
105#include <linux/netdevice.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200106#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <net/sock.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -0700108#include <net/tcp_states.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#include <net/af_unix.h>
110#include <linux/proc_fs.h>
111#include <linux/seq_file.h>
112#include <net/scm.h>
113#include <linux/init.h>
114#include <linux/poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <linux/rtnetlink.h>
116#include <linux/mount.h>
117#include <net/checksum.h>
118#include <linux/security.h>
Colin Cross2b15af62013-05-06 23:50:21 +0000119#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
Eric Dumazet7123aaa2012-06-08 05:03:21 +0000121struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
Pavel Emelyanovfa7ff562011-12-15 02:44:03 +0000122EXPORT_SYMBOL_GPL(unix_socket_table);
123DEFINE_SPINLOCK(unix_table_lock);
124EXPORT_SYMBOL_GPL(unix_table_lock);
Eric Dumazet518de9b2010-10-26 14:22:44 -0700125static atomic_long_t unix_nr_socks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Eric Dumazet7123aaa2012-06-08 05:03:21 +0000128static struct hlist_head *unix_sockets_unbound(void *addr)
129{
130 unsigned long hash = (unsigned long)addr;
131
132 hash ^= hash >> 16;
133 hash ^= hash >> 8;
134 hash %= UNIX_HASH_SIZE;
135 return &unix_socket_table[UNIX_HASH_SIZE + hash];
136}
137
138#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700140#ifdef CONFIG_SECURITY_NETWORK
Catherine Zhangdc49c1f2006-08-02 14:12:06 -0700141static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700142{
Catherine Zhangdc49c1f2006-08-02 14:12:06 -0700143 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700144}
145
146static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
147{
Catherine Zhangdc49c1f2006-08-02 14:12:06 -0700148 scm->secid = *UNIXSID(skb);
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700149}
150#else
Catherine Zhangdc49c1f2006-08-02 14:12:06 -0700151static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700152{ }
153
154static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
155{ }
156#endif /* CONFIG_SECURITY_NETWORK */
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158/*
159 * SMP locking strategy:
David S. Millerfbe9cc42005-12-13 23:26:29 -0800160 * hash table is protected with spinlock unix_table_lock
Stephen Hemminger663717f2010-02-18 14:12:06 -0800161 * each socket state is protected by separate spin lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 */
163
Eric Dumazet95c96172012-04-15 05:58:06 +0000164static inline unsigned int unix_hash_fold(__wsum n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165{
Anton Blanchard0a134042014-03-05 14:29:58 +1100166 unsigned int hash = (__force unsigned int)csum_fold(n);
Eric Dumazet95c96172012-04-15 05:58:06 +0000167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 hash ^= hash>>8;
169 return hash&(UNIX_HASH_SIZE-1);
170}
171
172#define unix_peer(sk) (unix_sk(sk)->peer)
173
174static inline int unix_our_peer(struct sock *sk, struct sock *osk)
175{
176 return unix_peer(osk) == sk;
177}
178
179static inline int unix_may_send(struct sock *sk, struct sock *osk)
180{
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800181 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182}
183
Rainer Weikusat3c734192008-06-17 22:28:05 -0700184static inline int unix_recvq_full(struct sock const *sk)
185{
186 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
187}
188
Pavel Emelyanovfa7ff562011-12-15 02:44:03 +0000189struct sock *unix_peer_get(struct sock *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190{
191 struct sock *peer;
192
David S. Miller1c92b4e2007-05-31 13:24:26 -0700193 unix_state_lock(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 peer = unix_peer(s);
195 if (peer)
196 sock_hold(peer);
David S. Miller1c92b4e2007-05-31 13:24:26 -0700197 unix_state_unlock(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 return peer;
199}
Pavel Emelyanovfa7ff562011-12-15 02:44:03 +0000200EXPORT_SYMBOL_GPL(unix_peer_get);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
202static inline void unix_release_addr(struct unix_address *addr)
203{
204 if (atomic_dec_and_test(&addr->refcnt))
205 kfree(addr);
206}
207
208/*
209 * Check unix socket name:
210 * - should be not zero length.
211 * - if started by not zero, should be NULL terminated (FS object)
212 * - if started by zero, it is abstract name.
213 */
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +0900214
Eric Dumazet95c96172012-04-15 05:58:06 +0000215static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216{
217 if (len <= sizeof(short) || len > sizeof(*sunaddr))
218 return -EINVAL;
219 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
220 return -EINVAL;
221 if (sunaddr->sun_path[0]) {
222 /*
223 * This may look like an off by one error but it is a bit more
224 * subtle. 108 is the longest valid AF_UNIX path for a binding.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300225 * sun_path[108] doesn't as such exist. However in kernel space
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 * we are guaranteed that it is a valid memory location in our
227 * kernel address buffer.
228 */
Jianjun Konge27dfce2008-11-01 21:38:31 -0700229 ((char *)sunaddr)[len] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 len = strlen(sunaddr->sun_path)+1+sizeof(short);
231 return len;
232 }
233
Joe Perches07f07572008-11-19 15:44:53 -0800234 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 return len;
236}
237
238static void __unix_remove_socket(struct sock *sk)
239{
240 sk_del_node_init(sk);
241}
242
243static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
244{
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700245 WARN_ON(!sk_unhashed(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 sk_add_node(sk, list);
247}
248
249static inline void unix_remove_socket(struct sock *sk)
250{
David S. Millerfbe9cc42005-12-13 23:26:29 -0800251 spin_lock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 __unix_remove_socket(sk);
David S. Millerfbe9cc42005-12-13 23:26:29 -0800253 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254}
255
256static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
257{
David S. Millerfbe9cc42005-12-13 23:26:29 -0800258 spin_lock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 __unix_insert_socket(list, sk);
David S. Millerfbe9cc42005-12-13 23:26:29 -0800260 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261}
262
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800263static struct sock *__unix_find_socket_byname(struct net *net,
264 struct sockaddr_un *sunname,
Eric Dumazet95c96172012-04-15 05:58:06 +0000265 int len, int type, unsigned int hash)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 struct sock *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Sasha Levinb67bfe02013-02-27 17:06:00 -0800269 sk_for_each(s, &unix_socket_table[hash ^ type]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 struct unix_sock *u = unix_sk(s);
271
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +0900272 if (!net_eq(sock_net(s), net))
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800273 continue;
274
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 if (u->addr->len == len &&
276 !memcmp(u->addr->name, sunname, len))
277 goto found;
278 }
279 s = NULL;
280found:
281 return s;
282}
283
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800284static inline struct sock *unix_find_socket_byname(struct net *net,
285 struct sockaddr_un *sunname,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 int len, int type,
Eric Dumazet95c96172012-04-15 05:58:06 +0000287 unsigned int hash)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288{
289 struct sock *s;
290
David S. Millerfbe9cc42005-12-13 23:26:29 -0800291 spin_lock(&unix_table_lock);
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800292 s = __unix_find_socket_byname(net, sunname, len, type, hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 if (s)
294 sock_hold(s);
David S. Millerfbe9cc42005-12-13 23:26:29 -0800295 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 return s;
297}
298
Eric W. Biederman6616f782010-06-13 03:35:48 +0000299static struct sock *unix_find_socket_byinode(struct inode *i)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300{
301 struct sock *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
David S. Millerfbe9cc42005-12-13 23:26:29 -0800303 spin_lock(&unix_table_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800304 sk_for_each(s,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
Al Viro40ffe672012-03-14 21:54:32 -0400306 struct dentry *dentry = unix_sk(s)->path.dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
David Howellsa25b3762015-03-17 22:26:21 +0000308 if (dentry && d_backing_inode(dentry) == i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 sock_hold(s);
310 goto found;
311 }
312 }
313 s = NULL;
314found:
David S. Millerfbe9cc42005-12-13 23:26:29 -0800315 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 return s;
317}
318
Rainer Weikusat5c77e262015-11-20 22:07:23 +0000319/* Support code for asymmetrically connected dgram sockets
320 *
321 * If a datagram socket is connected to a socket not itself connected
322 * to the first socket (eg, /dev/log), clients may only enqueue more
323 * messages if the present receive queue of the server socket is not
324 * "too large". This means there's a second writeability condition
325 * poll and sendmsg need to test. The dgram recv code will do a wake
326 * up on the peer_wait wait queue of a socket upon reception of a
327 * datagram which needs to be propagated to sleeping would-be writers
328 * since these might not have sent anything so far. This can't be
329 * accomplished via poll_wait because the lifetime of the server
330 * socket might be less than that of its clients if these break their
331 * association with it or if the server socket is closed while clients
332 * are still connected to it and there's no way to inform "a polling
333 * implementation" that it should let go of a certain wait queue
334 *
335 * In order to propagate a wake up, a wait_queue_t of the client
336 * socket is enqueued on the peer_wait queue of the server socket
337 * whose wake function does a wake_up on the ordinary client socket
338 * wait queue. This connection is established whenever a write (or
339 * poll for write) hit the flow control condition and broken when the
340 * association to the server socket is dissolved or after a wake up
341 * was relayed.
342 */
343
344static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
345 void *key)
346{
347 struct unix_sock *u;
348 wait_queue_head_t *u_sleep;
349
350 u = container_of(q, struct unix_sock, peer_wake);
351
352 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
353 q);
354 u->peer_wake.private = NULL;
355
356 /* relaying can only happen while the wq still exists */
357 u_sleep = sk_sleep(&u->sk);
358 if (u_sleep)
359 wake_up_interruptible_poll(u_sleep, key);
360
361 return 0;
362}
363
364static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
365{
366 struct unix_sock *u, *u_other;
367 int rc;
368
369 u = unix_sk(sk);
370 u_other = unix_sk(other);
371 rc = 0;
372 spin_lock(&u_other->peer_wait.lock);
373
374 if (!u->peer_wake.private) {
375 u->peer_wake.private = other;
376 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
377
378 rc = 1;
379 }
380
381 spin_unlock(&u_other->peer_wait.lock);
382 return rc;
383}
384
385static void unix_dgram_peer_wake_disconnect(struct sock *sk,
386 struct sock *other)
387{
388 struct unix_sock *u, *u_other;
389
390 u = unix_sk(sk);
391 u_other = unix_sk(other);
392 spin_lock(&u_other->peer_wait.lock);
393
394 if (u->peer_wake.private == other) {
395 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
396 u->peer_wake.private = NULL;
397 }
398
399 spin_unlock(&u_other->peer_wait.lock);
400}
401
402static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
403 struct sock *other)
404{
405 unix_dgram_peer_wake_disconnect(sk, other);
406 wake_up_interruptible_poll(sk_sleep(sk),
407 POLLOUT |
408 POLLWRNORM |
409 POLLWRBAND);
410}
411
412/* preconditions:
413 * - unix_peer(sk) == other
414 * - association is stable
415 */
416static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
417{
418 int connected;
419
420 connected = unix_dgram_peer_wake_connect(sk, other);
421
422 if (unix_recvq_full(other))
423 return 1;
424
425 if (connected)
426 unix_dgram_peer_wake_disconnect(sk, other);
427
428 return 0;
429}
430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431static inline int unix_writable(struct sock *sk)
432{
433 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
434}
435
436static void unix_write_space(struct sock *sk)
437{
Eric Dumazet43815482010-04-29 11:01:49 +0000438 struct socket_wq *wq;
439
440 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 if (unix_writable(sk)) {
Eric Dumazet43815482010-04-29 11:01:49 +0000442 wq = rcu_dereference(sk->sk_wq);
443 if (wq_has_sleeper(wq))
Eric Dumazet67426b72010-10-29 20:44:44 +0000444 wake_up_interruptible_sync_poll(&wq->wait,
445 POLLOUT | POLLWRNORM | POLLWRBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +0800446 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 }
Eric Dumazet43815482010-04-29 11:01:49 +0000448 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449}
450
451/* When dgram socket disconnects (or changes its peer), we clear its receive
452 * queue of packets arrived from previous peer. First, it allows to do
453 * flow control based only on wmem_alloc; second, sk connected to peer
454 * may receive messages only from that peer. */
455static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
456{
David S. Millerb03efcf2005-07-08 14:57:23 -0700457 if (!skb_queue_empty(&sk->sk_receive_queue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 skb_queue_purge(&sk->sk_receive_queue);
459 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
460
461 /* If one link of bidirectional dgram pipe is disconnected,
462 * we signal error. Messages are lost. Do not make this,
463 * when peer was not connected to us.
464 */
465 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
466 other->sk_err = ECONNRESET;
467 other->sk_error_report(other);
468 }
469 }
470}
471
472static void unix_sock_destructor(struct sock *sk)
473{
474 struct unix_sock *u = unix_sk(sk);
475
476 skb_queue_purge(&sk->sk_receive_queue);
477
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700478 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
479 WARN_ON(!sk_unhashed(sk));
480 WARN_ON(sk->sk_socket);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 if (!sock_flag(sk, SOCK_DEAD)) {
wangweidong5cc208b2013-12-06 18:03:36 +0800482 pr_info("Attempt to release alive unix socket: %p\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 return;
484 }
485
486 if (u->addr)
487 unix_release_addr(u->addr);
488
Eric Dumazet518de9b2010-10-26 14:22:44 -0700489 atomic_long_dec(&unix_nr_socks);
David S. Miller6f756a82008-11-23 17:34:03 -0800490 local_bh_disable();
Eric Dumazeta8076d82008-11-17 02:38:49 -0800491 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
David S. Miller6f756a82008-11-23 17:34:03 -0800492 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493#ifdef UNIX_REFCNT_DEBUG
wangweidong5cc208b2013-12-06 18:03:36 +0800494 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
Eric Dumazet518de9b2010-10-26 14:22:44 -0700495 atomic_long_read(&unix_nr_socks));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496#endif
497}
498
Paul Mooreded34e02013-03-25 03:18:33 +0000499static void unix_release_sock(struct sock *sk, int embrion)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500{
501 struct unix_sock *u = unix_sk(sk);
Al Viro40ffe672012-03-14 21:54:32 -0400502 struct path path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 struct sock *skpair;
504 struct sk_buff *skb;
505 int state;
506
507 unix_remove_socket(sk);
508
509 /* Clear state */
David S. Miller1c92b4e2007-05-31 13:24:26 -0700510 unix_state_lock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 sock_orphan(sk);
512 sk->sk_shutdown = SHUTDOWN_MASK;
Al Viro40ffe672012-03-14 21:54:32 -0400513 path = u->path;
514 u->path.dentry = NULL;
515 u->path.mnt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 state = sk->sk_state;
517 sk->sk_state = TCP_CLOSE;
David S. Miller1c92b4e2007-05-31 13:24:26 -0700518 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
520 wake_up_interruptible_all(&u->peer_wait);
521
Jianjun Konge27dfce2008-11-01 21:38:31 -0700522 skpair = unix_peer(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
Jianjun Konge27dfce2008-11-01 21:38:31 -0700524 if (skpair != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
David S. Miller1c92b4e2007-05-31 13:24:26 -0700526 unix_state_lock(skpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 /* No more writes */
528 skpair->sk_shutdown = SHUTDOWN_MASK;
529 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
530 skpair->sk_err = ECONNRESET;
David S. Miller1c92b4e2007-05-31 13:24:26 -0700531 unix_state_unlock(skpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 skpair->sk_state_change(skpair);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +0800533 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 }
Rainer Weikusat5c77e262015-11-20 22:07:23 +0000535
536 unix_dgram_peer_wake_disconnect(sk, skpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 sock_put(skpair); /* It may now die */
538 unix_peer(sk) = NULL;
539 }
540
541 /* Try to flush out this socket. Throw out buffers at least */
542
543 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
Jianjun Konge27dfce2008-11-01 21:38:31 -0700544 if (state == TCP_LISTEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 unix_release_sock(skb->sk, 1);
546 /* passed fds are erased in the kfree_skb hook */
547 kfree_skb(skb);
548 }
549
Al Viro40ffe672012-03-14 21:54:32 -0400550 if (path.dentry)
551 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552
553 sock_put(sk);
554
555 /* ---- Socket is dead now and most probably destroyed ---- */
556
557 /*
Alan Coxe04dae82012-09-17 00:52:41 +0000558 * Fixme: BSD difference: In BSD all sockets connected to us get
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 * ECONNRESET and we die on the spot. In Linux we behave
560 * like files and pipes do and wait for the last
561 * dereference.
562 *
563 * Can't we simply set sock->err?
564 *
565 * What the above comment does talk about? --ANK(980817)
566 */
567
Pavel Emelyanov9305cfa2007-11-10 22:06:01 -0800568 if (unix_tot_inflight)
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +0900569 unix_gc(); /* Garbage collect fds */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570}
571
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000572static void init_peercred(struct sock *sk)
573{
574 put_pid(sk->sk_peer_pid);
575 if (sk->sk_peer_cred)
576 put_cred(sk->sk_peer_cred);
577 sk->sk_peer_pid = get_pid(task_tgid(current));
578 sk->sk_peer_cred = get_current_cred();
579}
580
581static void copy_peercred(struct sock *sk, struct sock *peersk)
582{
583 put_pid(sk->sk_peer_pid);
584 if (sk->sk_peer_cred)
585 put_cred(sk->sk_peer_cred);
586 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
587 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
588}
589
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590static int unix_listen(struct socket *sock, int backlog)
591{
592 int err;
593 struct sock *sk = sock->sk;
594 struct unix_sock *u = unix_sk(sk);
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000595 struct pid *old_pid = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
597 err = -EOPNOTSUPP;
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800598 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
599 goto out; /* Only stream/seqpacket sockets accept */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 err = -EINVAL;
601 if (!u->addr)
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800602 goto out; /* No listens on an unbound socket */
David S. Miller1c92b4e2007-05-31 13:24:26 -0700603 unix_state_lock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
605 goto out_unlock;
606 if (backlog > sk->sk_max_ack_backlog)
607 wake_up_interruptible_all(&u->peer_wait);
608 sk->sk_max_ack_backlog = backlog;
609 sk->sk_state = TCP_LISTEN;
610 /* set credentials so connect can copy them */
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000611 init_peercred(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 err = 0;
613
614out_unlock:
David S. Miller1c92b4e2007-05-31 13:24:26 -0700615 unix_state_unlock(sk);
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000616 put_pid(old_pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617out:
618 return err;
619}
620
621static int unix_release(struct socket *);
622static int unix_bind(struct socket *, struct sockaddr *, int);
623static int unix_stream_connect(struct socket *, struct sockaddr *,
624 int addr_len, int flags);
625static int unix_socketpair(struct socket *, struct socket *);
626static int unix_accept(struct socket *, struct socket *, int);
627static int unix_getname(struct socket *, struct sockaddr *, int *, int);
628static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
Rainer Weikusatec0d2152008-06-27 19:34:18 -0700629static unsigned int unix_dgram_poll(struct file *, struct socket *,
630 poll_table *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631static int unix_ioctl(struct socket *, unsigned int, unsigned long);
632static int unix_shutdown(struct socket *, int);
Ying Xue1b784142015-03-02 15:37:48 +0800633static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
634static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
635static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
636static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637static int unix_dgram_connect(struct socket *, struct sockaddr *,
638 int, int);
Ying Xue1b784142015-03-02 15:37:48 +0800639static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
640static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
641 int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642
Sasha Levin12663bf2013-12-07 17:26:27 -0500643static int unix_set_peek_off(struct sock *sk, int val)
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000644{
645 struct unix_sock *u = unix_sk(sk);
646
Sasha Levin12663bf2013-12-07 17:26:27 -0500647 if (mutex_lock_interruptible(&u->readlock))
648 return -EINTR;
649
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000650 sk->sk_peek_off = val;
651 mutex_unlock(&u->readlock);
Sasha Levin12663bf2013-12-07 17:26:27 -0500652
653 return 0;
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000654}
655
656
Eric Dumazet90ddc4f2005-12-22 12:49:22 -0800657static const struct proto_ops unix_stream_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 .family = PF_UNIX,
659 .owner = THIS_MODULE,
660 .release = unix_release,
661 .bind = unix_bind,
662 .connect = unix_stream_connect,
663 .socketpair = unix_socketpair,
664 .accept = unix_accept,
665 .getname = unix_getname,
666 .poll = unix_poll,
667 .ioctl = unix_ioctl,
668 .listen = unix_listen,
669 .shutdown = unix_shutdown,
670 .setsockopt = sock_no_setsockopt,
671 .getsockopt = sock_no_getsockopt,
672 .sendmsg = unix_stream_sendmsg,
673 .recvmsg = unix_stream_recvmsg,
674 .mmap = sock_no_mmap,
675 .sendpage = sock_no_sendpage,
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +0000676 .set_peek_off = unix_set_peek_off,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677};
678
Eric Dumazet90ddc4f2005-12-22 12:49:22 -0800679static const struct proto_ops unix_dgram_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 .family = PF_UNIX,
681 .owner = THIS_MODULE,
682 .release = unix_release,
683 .bind = unix_bind,
684 .connect = unix_dgram_connect,
685 .socketpair = unix_socketpair,
686 .accept = sock_no_accept,
687 .getname = unix_getname,
Rainer Weikusatec0d2152008-06-27 19:34:18 -0700688 .poll = unix_dgram_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 .ioctl = unix_ioctl,
690 .listen = sock_no_listen,
691 .shutdown = unix_shutdown,
692 .setsockopt = sock_no_setsockopt,
693 .getsockopt = sock_no_getsockopt,
694 .sendmsg = unix_dgram_sendmsg,
695 .recvmsg = unix_dgram_recvmsg,
696 .mmap = sock_no_mmap,
697 .sendpage = sock_no_sendpage,
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000698 .set_peek_off = unix_set_peek_off,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699};
700
Eric Dumazet90ddc4f2005-12-22 12:49:22 -0800701static const struct proto_ops unix_seqpacket_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 .family = PF_UNIX,
703 .owner = THIS_MODULE,
704 .release = unix_release,
705 .bind = unix_bind,
706 .connect = unix_stream_connect,
707 .socketpair = unix_socketpair,
708 .accept = unix_accept,
709 .getname = unix_getname,
Rainer Weikusatec0d2152008-06-27 19:34:18 -0700710 .poll = unix_dgram_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 .ioctl = unix_ioctl,
712 .listen = unix_listen,
713 .shutdown = unix_shutdown,
714 .setsockopt = sock_no_setsockopt,
715 .getsockopt = sock_no_getsockopt,
716 .sendmsg = unix_seqpacket_sendmsg,
Eric W. Biedermana05d2ad2011-04-24 01:54:57 +0000717 .recvmsg = unix_seqpacket_recvmsg,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 .mmap = sock_no_mmap,
719 .sendpage = sock_no_sendpage,
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000720 .set_peek_off = unix_set_peek_off,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721};
722
723static struct proto unix_proto = {
Eric Dumazet248969a2008-11-17 00:00:30 -0800724 .name = "UNIX",
725 .owner = THIS_MODULE,
Eric Dumazet248969a2008-11-17 00:00:30 -0800726 .obj_size = sizeof(struct unix_sock),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727};
728
Ingo Molnara09785a2006-07-03 00:25:12 -0700729/*
730 * AF_UNIX sockets do not interact with hardware, hence they
731 * dont trigger interrupts - so it's safe for them to have
732 * bh-unsafe locking for their sk_receive_queue.lock. Split off
733 * this special lock-class by reinitializing the spinlock key:
734 */
735static struct lock_class_key af_unix_sk_receive_queue_lock_key;
736
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800737static struct sock *unix_create1(struct net *net, struct socket *sock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738{
739 struct sock *sk = NULL;
740 struct unix_sock *u;
741
Eric Dumazet518de9b2010-10-26 14:22:44 -0700742 atomic_long_inc(&unix_nr_socks);
743 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 goto out;
745
Pavel Emelyanov6257ff22007-11-01 00:39:31 -0700746 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 if (!sk)
748 goto out;
749
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800750 sock_init_data(sock, sk);
Ingo Molnara09785a2006-07-03 00:25:12 -0700751 lockdep_set_class(&sk->sk_receive_queue.lock,
752 &af_unix_sk_receive_queue_lock_key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
754 sk->sk_write_space = unix_write_space;
Denis V. Luneva0a53c82007-12-11 04:19:17 -0800755 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 sk->sk_destruct = unix_sock_destructor;
757 u = unix_sk(sk);
Al Viro40ffe672012-03-14 21:54:32 -0400758 u->path.dentry = NULL;
759 u->path.mnt = NULL;
Benjamin LaHaisefd19f322006-01-03 14:10:46 -0800760 spin_lock_init(&u->lock);
Al Viro516e0cc2008-07-26 00:39:17 -0400761 atomic_long_set(&u->inflight, 0);
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700762 INIT_LIST_HEAD(&u->link);
Ingo Molnar57b47a52006-03-20 22:35:41 -0800763 mutex_init(&u->readlock); /* single task reading lock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 init_waitqueue_head(&u->peer_wait);
Rainer Weikusat5c77e262015-11-20 22:07:23 +0000765 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
Eric Dumazet7123aaa2012-06-08 05:03:21 +0000766 unix_insert_socket(unix_sockets_unbound(sk), sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767out:
Pavel Emelyanov284b3272007-11-10 22:08:30 -0800768 if (sk == NULL)
Eric Dumazet518de9b2010-10-26 14:22:44 -0700769 atomic_long_dec(&unix_nr_socks);
Eric Dumazet920de802008-11-24 00:09:29 -0800770 else {
771 local_bh_disable();
Eric Dumazeta8076d82008-11-17 02:38:49 -0800772 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
Eric Dumazet920de802008-11-24 00:09:29 -0800773 local_bh_enable();
774 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 return sk;
776}
777
Eric Paris3f378b62009-11-05 22:18:14 -0800778static int unix_create(struct net *net, struct socket *sock, int protocol,
779 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780{
781 if (protocol && protocol != PF_UNIX)
782 return -EPROTONOSUPPORT;
783
784 sock->state = SS_UNCONNECTED;
785
786 switch (sock->type) {
787 case SOCK_STREAM:
788 sock->ops = &unix_stream_ops;
789 break;
790 /*
791 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
792 * nothing uses it.
793 */
794 case SOCK_RAW:
Jianjun Konge27dfce2008-11-01 21:38:31 -0700795 sock->type = SOCK_DGRAM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 case SOCK_DGRAM:
797 sock->ops = &unix_dgram_ops;
798 break;
799 case SOCK_SEQPACKET:
800 sock->ops = &unix_seqpacket_ops;
801 break;
802 default:
803 return -ESOCKTNOSUPPORT;
804 }
805
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -0700806 return unix_create1(net, sock) ? 0 : -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807}
808
809static int unix_release(struct socket *sock)
810{
811 struct sock *sk = sock->sk;
812
813 if (!sk)
814 return 0;
815
Paul Mooreded34e02013-03-25 03:18:33 +0000816 unix_release_sock(sk, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 sock->sk = NULL;
818
Paul Mooreded34e02013-03-25 03:18:33 +0000819 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820}
821
822static int unix_autobind(struct socket *sock)
823{
824 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900825 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 struct unix_sock *u = unix_sk(sk);
827 static u32 ordernum = 1;
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800828 struct unix_address *addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 int err;
Tetsuo Handa8df73ff2010-09-04 01:34:28 +0000830 unsigned int retries = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831
Sasha Levin37ab4fa2013-12-13 10:54:22 -0500832 err = mutex_lock_interruptible(&u->readlock);
833 if (err)
834 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835
836 err = 0;
837 if (u->addr)
838 goto out;
839
840 err = -ENOMEM;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700841 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 if (!addr)
843 goto out;
844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 addr->name->sun_family = AF_UNIX;
846 atomic_set(&addr->refcnt, 1);
847
848retry:
849 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
Joe Perches07f07572008-11-19 15:44:53 -0800850 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
David S. Millerfbe9cc42005-12-13 23:26:29 -0800852 spin_lock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 ordernum = (ordernum+1)&0xFFFFF;
854
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800855 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 addr->hash)) {
David S. Millerfbe9cc42005-12-13 23:26:29 -0800857 spin_unlock(&unix_table_lock);
Tetsuo Handa8df73ff2010-09-04 01:34:28 +0000858 /*
859 * __unix_find_socket_byname() may take long time if many names
860 * are already in use.
861 */
862 cond_resched();
863 /* Give up if all names seems to be in use. */
864 if (retries++ == 0xFFFFF) {
865 err = -ENOSPC;
866 kfree(addr);
867 goto out;
868 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 goto retry;
870 }
871 addr->hash ^= sk->sk_type;
872
873 __unix_remove_socket(sk);
874 u->addr = addr;
875 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
David S. Millerfbe9cc42005-12-13 23:26:29 -0800876 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 err = 0;
878
Ingo Molnar57b47a52006-03-20 22:35:41 -0800879out: mutex_unlock(&u->readlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 return err;
881}
882
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800883static struct sock *unix_find_other(struct net *net,
884 struct sockaddr_un *sunname, int len,
Eric Dumazet95c96172012-04-15 05:58:06 +0000885 int type, unsigned int hash, int *error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886{
887 struct sock *u;
Al Viro421748e2008-08-02 01:04:36 -0400888 struct path path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 int err = 0;
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +0900890
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 if (sunname->sun_path[0]) {
Al Viro421748e2008-08-02 01:04:36 -0400892 struct inode *inode;
893 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 if (err)
895 goto fail;
David Howellsa25b3762015-03-17 22:26:21 +0000896 inode = d_backing_inode(path.dentry);
Al Viro421748e2008-08-02 01:04:36 -0400897 err = inode_permission(inode, MAY_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 if (err)
899 goto put_fail;
900
901 err = -ECONNREFUSED;
Al Viro421748e2008-08-02 01:04:36 -0400902 if (!S_ISSOCK(inode->i_mode))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 goto put_fail;
Eric W. Biederman6616f782010-06-13 03:35:48 +0000904 u = unix_find_socket_byinode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 if (!u)
906 goto put_fail;
907
908 if (u->sk_type == type)
Al Viro68ac1232012-03-15 08:21:57 -0400909 touch_atime(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910
Al Viro421748e2008-08-02 01:04:36 -0400911 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912
Jianjun Konge27dfce2008-11-01 21:38:31 -0700913 err = -EPROTOTYPE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 if (u->sk_type != type) {
915 sock_put(u);
916 goto fail;
917 }
918 } else {
919 err = -ECONNREFUSED;
Jianjun Konge27dfce2008-11-01 21:38:31 -0700920 u = unix_find_socket_byname(net, sunname, len, type, hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 if (u) {
922 struct dentry *dentry;
Al Viro40ffe672012-03-14 21:54:32 -0400923 dentry = unix_sk(u)->path.dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 if (dentry)
Al Viro68ac1232012-03-15 08:21:57 -0400925 touch_atime(&unix_sk(u)->path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 } else
927 goto fail;
928 }
929 return u;
930
931put_fail:
Al Viro421748e2008-08-02 01:04:36 -0400932 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933fail:
Jianjun Konge27dfce2008-11-01 21:38:31 -0700934 *error = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 return NULL;
936}
937
Rainer Weikusat272d4742016-01-03 18:56:38 +0000938static int unix_mknod(struct dentry *dentry, struct path *path, umode_t mode,
939 struct path *res)
Al Virofaf02012012-07-20 02:37:29 +0400940{
Rainer Weikusat272d4742016-01-03 18:56:38 +0000941 int err;
Al Virofaf02012012-07-20 02:37:29 +0400942
Rainer Weikusat272d4742016-01-03 18:56:38 +0000943 err = security_path_mknod(path, dentry, mode, 0);
Al Virofaf02012012-07-20 02:37:29 +0400944 if (!err) {
Rainer Weikusat272d4742016-01-03 18:56:38 +0000945 err = vfs_mknod(d_inode(path->dentry), dentry, mode, 0);
Al Virofaf02012012-07-20 02:37:29 +0400946 if (!err) {
Rainer Weikusat272d4742016-01-03 18:56:38 +0000947 res->mnt = mntget(path->mnt);
Al Virofaf02012012-07-20 02:37:29 +0400948 res->dentry = dget(dentry);
949 }
950 }
Rainer Weikusat272d4742016-01-03 18:56:38 +0000951
Al Virofaf02012012-07-20 02:37:29 +0400952 return err;
953}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954
955static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
956{
957 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900958 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 struct unix_sock *u = unix_sk(sk);
Jianjun Konge27dfce2008-11-01 21:38:31 -0700960 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
Al Virodae6ad82011-06-26 11:50:15 -0400961 char *sun_path = sunaddr->sun_path;
Rainer Weikusat272d4742016-01-03 18:56:38 +0000962 int err, name_err;
Eric Dumazet95c96172012-04-15 05:58:06 +0000963 unsigned int hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 struct unix_address *addr;
965 struct hlist_head *list;
Rainer Weikusat272d4742016-01-03 18:56:38 +0000966 struct path path;
967 struct dentry *dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
969 err = -EINVAL;
970 if (sunaddr->sun_family != AF_UNIX)
971 goto out;
972
Jianjun Konge27dfce2008-11-01 21:38:31 -0700973 if (addr_len == sizeof(short)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 err = unix_autobind(sock);
975 goto out;
976 }
977
978 err = unix_mkname(sunaddr, addr_len, &hash);
979 if (err < 0)
980 goto out;
981 addr_len = err;
982
Rainer Weikusat272d4742016-01-03 18:56:38 +0000983 name_err = 0;
984 dentry = NULL;
985 if (sun_path[0]) {
986 /* Get the parent directory, calculate the hash for last
987 * component.
988 */
989 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
990
991 if (IS_ERR(dentry)) {
992 /* delay report until after 'already bound' check */
993 name_err = PTR_ERR(dentry);
994 dentry = NULL;
995 }
996 }
997
Sasha Levin37ab4fa2013-12-13 10:54:22 -0500998 err = mutex_lock_interruptible(&u->readlock);
999 if (err)
Rainer Weikusat272d4742016-01-03 18:56:38 +00001000 goto out_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
1002 err = -EINVAL;
1003 if (u->addr)
1004 goto out_up;
1005
Rainer Weikusat272d4742016-01-03 18:56:38 +00001006 if (name_err) {
1007 err = name_err == -EEXIST ? -EADDRINUSE : name_err;
1008 goto out_up;
1009 }
1010
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 err = -ENOMEM;
1012 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
1013 if (!addr)
1014 goto out_up;
1015
1016 memcpy(addr->name, sunaddr, addr_len);
1017 addr->len = addr_len;
1018 addr->hash = hash ^ sk->sk_type;
1019 atomic_set(&addr->refcnt, 1);
1020
Rainer Weikusat272d4742016-01-03 18:56:38 +00001021 if (dentry) {
1022 struct path u_path;
Al Virofaf02012012-07-20 02:37:29 +04001023 umode_t mode = S_IFSOCK |
Al Viroce3b0f82009-03-29 19:08:22 -04001024 (SOCK_INODE(sock)->i_mode & ~current_umask());
Rainer Weikusat272d4742016-01-03 18:56:38 +00001025 err = unix_mknod(dentry, &path, mode, &u_path);
Al Virofaf02012012-07-20 02:37:29 +04001026 if (err) {
1027 if (err == -EEXIST)
1028 err = -EADDRINUSE;
1029 unix_release_addr(addr);
1030 goto out_up;
1031 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 addr->hash = UNIX_HASH_SIZE;
Rainer Weikusat272d4742016-01-03 18:56:38 +00001033 hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
Al Virofaf02012012-07-20 02:37:29 +04001034 spin_lock(&unix_table_lock);
Rainer Weikusat272d4742016-01-03 18:56:38 +00001035 u->path = u_path;
Al Virofaf02012012-07-20 02:37:29 +04001036 list = &unix_socket_table[hash];
1037 } else {
1038 spin_lock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 err = -EADDRINUSE;
Denis V. Lunev097e66c2007-11-19 22:29:30 -08001040 if (__unix_find_socket_byname(net, sunaddr, addr_len,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 sk->sk_type, hash)) {
1042 unix_release_addr(addr);
1043 goto out_unlock;
1044 }
1045
1046 list = &unix_socket_table[addr->hash];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 }
1048
1049 err = 0;
1050 __unix_remove_socket(sk);
1051 u->addr = addr;
1052 __unix_insert_socket(list, sk);
1053
1054out_unlock:
David S. Millerfbe9cc42005-12-13 23:26:29 -08001055 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056out_up:
Ingo Molnar57b47a52006-03-20 22:35:41 -08001057 mutex_unlock(&u->readlock);
Rainer Weikusat272d4742016-01-03 18:56:38 +00001058out_path:
1059 if (dentry)
1060 done_path_create(&path, dentry);
1061
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062out:
1063 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064}
1065
David S. Miller278a3de2007-05-31 15:19:20 -07001066static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1067{
1068 if (unlikely(sk1 == sk2) || !sk2) {
1069 unix_state_lock(sk1);
1070 return;
1071 }
1072 if (sk1 < sk2) {
1073 unix_state_lock(sk1);
1074 unix_state_lock_nested(sk2);
1075 } else {
1076 unix_state_lock(sk2);
1077 unix_state_lock_nested(sk1);
1078 }
1079}
1080
1081static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1082{
1083 if (unlikely(sk1 == sk2) || !sk2) {
1084 unix_state_unlock(sk1);
1085 return;
1086 }
1087 unix_state_unlock(sk1);
1088 unix_state_unlock(sk2);
1089}
1090
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1092 int alen, int flags)
1093{
1094 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001095 struct net *net = sock_net(sk);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001096 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 struct sock *other;
Eric Dumazet95c96172012-04-15 05:58:06 +00001098 unsigned int hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 int err;
1100
1101 if (addr->sa_family != AF_UNSPEC) {
1102 err = unix_mkname(sunaddr, alen, &hash);
1103 if (err < 0)
1104 goto out;
1105 alen = err;
1106
1107 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
1108 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
1109 goto out;
1110
David S. Miller278a3de2007-05-31 15:19:20 -07001111restart:
Jianjun Konge27dfce2008-11-01 21:38:31 -07001112 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 if (!other)
1114 goto out;
1115
David S. Miller278a3de2007-05-31 15:19:20 -07001116 unix_state_double_lock(sk, other);
1117
1118 /* Apparently VFS overslept socket death. Retry. */
1119 if (sock_flag(other, SOCK_DEAD)) {
1120 unix_state_double_unlock(sk, other);
1121 sock_put(other);
1122 goto restart;
1123 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
1125 err = -EPERM;
1126 if (!unix_may_send(sk, other))
1127 goto out_unlock;
1128
1129 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1130 if (err)
1131 goto out_unlock;
1132
1133 } else {
1134 /*
1135 * 1003.1g breaking connected state with AF_UNSPEC
1136 */
1137 other = NULL;
David S. Miller278a3de2007-05-31 15:19:20 -07001138 unix_state_double_lock(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 }
1140
1141 /*
1142 * If it was connected, reconnect.
1143 */
1144 if (unix_peer(sk)) {
1145 struct sock *old_peer = unix_peer(sk);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001146 unix_peer(sk) = other;
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001147 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1148
David S. Miller278a3de2007-05-31 15:19:20 -07001149 unix_state_double_unlock(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150
1151 if (other != old_peer)
1152 unix_dgram_disconnected(sk, old_peer);
1153 sock_put(old_peer);
1154 } else {
Jianjun Konge27dfce2008-11-01 21:38:31 -07001155 unix_peer(sk) = other;
David S. Miller278a3de2007-05-31 15:19:20 -07001156 unix_state_double_unlock(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 }
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001158 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159
1160out_unlock:
David S. Miller278a3de2007-05-31 15:19:20 -07001161 unix_state_double_unlock(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 sock_put(other);
1163out:
1164 return err;
1165}
1166
1167static long unix_wait_for_peer(struct sock *other, long timeo)
1168{
1169 struct unix_sock *u = unix_sk(other);
1170 int sched;
1171 DEFINE_WAIT(wait);
1172
1173 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1174
1175 sched = !sock_flag(other, SOCK_DEAD) &&
1176 !(other->sk_shutdown & RCV_SHUTDOWN) &&
Rainer Weikusat3c734192008-06-17 22:28:05 -07001177 unix_recvq_full(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178
David S. Miller1c92b4e2007-05-31 13:24:26 -07001179 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180
1181 if (sched)
1182 timeo = schedule_timeout(timeo);
1183
1184 finish_wait(&u->peer_wait, &wait);
1185 return timeo;
1186}
1187
1188static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1189 int addr_len, int flags)
1190{
Jianjun Konge27dfce2008-11-01 21:38:31 -07001191 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001193 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1195 struct sock *newsk = NULL;
1196 struct sock *other = NULL;
1197 struct sk_buff *skb = NULL;
Eric Dumazet95c96172012-04-15 05:58:06 +00001198 unsigned int hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 int st;
1200 int err;
1201 long timeo;
1202
1203 err = unix_mkname(sunaddr, addr_len, &hash);
1204 if (err < 0)
1205 goto out;
1206 addr_len = err;
1207
Joe Perchesf64f9e72009-11-29 16:55:45 -08001208 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1209 (err = unix_autobind(sock)) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 goto out;
1211
1212 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1213
1214 /* First of all allocate resources.
1215 If we will make it after state is locked,
1216 we will have to recheck all again in any case.
1217 */
1218
1219 err = -ENOMEM;
1220
1221 /* create new sock for complete connection */
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001222 newsk = unix_create1(sock_net(sk), NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 if (newsk == NULL)
1224 goto out;
1225
1226 /* Allocate skb for sending to listening sock */
1227 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1228 if (skb == NULL)
1229 goto out;
1230
1231restart:
1232 /* Find listening sock. */
Denis V. Lunev097e66c2007-11-19 22:29:30 -08001233 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 if (!other)
1235 goto out;
1236
1237 /* Latch state of peer */
David S. Miller1c92b4e2007-05-31 13:24:26 -07001238 unix_state_lock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239
1240 /* Apparently VFS overslept socket death. Retry. */
1241 if (sock_flag(other, SOCK_DEAD)) {
David S. Miller1c92b4e2007-05-31 13:24:26 -07001242 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 sock_put(other);
1244 goto restart;
1245 }
1246
1247 err = -ECONNREFUSED;
1248 if (other->sk_state != TCP_LISTEN)
1249 goto out_unlock;
Tomoki Sekiyama77238f22009-10-18 23:17:37 -07001250 if (other->sk_shutdown & RCV_SHUTDOWN)
1251 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
Rainer Weikusat3c734192008-06-17 22:28:05 -07001253 if (unix_recvq_full(other)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 err = -EAGAIN;
1255 if (!timeo)
1256 goto out_unlock;
1257
1258 timeo = unix_wait_for_peer(other, timeo);
1259
1260 err = sock_intr_errno(timeo);
1261 if (signal_pending(current))
1262 goto out;
1263 sock_put(other);
1264 goto restart;
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001265 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266
1267 /* Latch our state.
1268
Daniel Balutae5537bf2011-03-14 15:25:33 -07001269 It is tricky place. We need to grab our state lock and cannot
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 drop lock on peer. It is dangerous because deadlock is
1271 possible. Connect to self case and simultaneous
1272 attempt to connect are eliminated by checking socket
1273 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1274 check this before attempt to grab lock.
1275
1276 Well, and we have to recheck the state after socket locked.
1277 */
1278 st = sk->sk_state;
1279
1280 switch (st) {
1281 case TCP_CLOSE:
1282 /* This is ok... continue with connect */
1283 break;
1284 case TCP_ESTABLISHED:
1285 /* Socket is already connected */
1286 err = -EISCONN;
1287 goto out_unlock;
1288 default:
1289 err = -EINVAL;
1290 goto out_unlock;
1291 }
1292
David S. Miller1c92b4e2007-05-31 13:24:26 -07001293 unix_state_lock_nested(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294
1295 if (sk->sk_state != st) {
David S. Miller1c92b4e2007-05-31 13:24:26 -07001296 unix_state_unlock(sk);
1297 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 sock_put(other);
1299 goto restart;
1300 }
1301
David S. Miller3610cda2011-01-05 15:38:53 -08001302 err = security_unix_stream_connect(sk, other, newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 if (err) {
David S. Miller1c92b4e2007-05-31 13:24:26 -07001304 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 goto out_unlock;
1306 }
1307
1308 /* The way is open! Fastly set all the necessary fields... */
1309
1310 sock_hold(sk);
1311 unix_peer(newsk) = sk;
1312 newsk->sk_state = TCP_ESTABLISHED;
1313 newsk->sk_type = sk->sk_type;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001314 init_peercred(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 newu = unix_sk(newsk);
Eric Dumazeteaefd112011-02-18 03:26:36 +00001316 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 otheru = unix_sk(other);
1318
1319 /* copy address information from listening to new sock*/
1320 if (otheru->addr) {
1321 atomic_inc(&otheru->addr->refcnt);
1322 newu->addr = otheru->addr;
1323 }
Al Viro40ffe672012-03-14 21:54:32 -04001324 if (otheru->path.dentry) {
1325 path_get(&otheru->path);
1326 newu->path = otheru->path;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 }
1328
1329 /* Set credentials */
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001330 copy_peercred(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 sock->state = SS_CONNECTED;
1333 sk->sk_state = TCP_ESTABLISHED;
Benjamin LaHaise830a1e52005-12-13 23:22:32 -08001334 sock_hold(newsk);
1335
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001336 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
Benjamin LaHaise830a1e52005-12-13 23:22:32 -08001337 unix_peer(sk) = newsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338
David S. Miller1c92b4e2007-05-31 13:24:26 -07001339 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340
1341 /* take ten and and send info to listening sock */
1342 spin_lock(&other->sk_receive_queue.lock);
1343 __skb_queue_tail(&other->sk_receive_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 spin_unlock(&other->sk_receive_queue.lock);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001345 unix_state_unlock(other);
David S. Miller676d2362014-04-11 16:15:36 -04001346 other->sk_data_ready(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 sock_put(other);
1348 return 0;
1349
1350out_unlock:
1351 if (other)
David S. Miller1c92b4e2007-05-31 13:24:26 -07001352 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
1354out:
Wei Yongjun40d44442009-02-25 00:32:45 +00001355 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 if (newsk)
1357 unix_release_sock(newsk, 0);
1358 if (other)
1359 sock_put(other);
1360 return err;
1361}
1362
1363static int unix_socketpair(struct socket *socka, struct socket *sockb)
1364{
Jianjun Konge27dfce2008-11-01 21:38:31 -07001365 struct sock *ska = socka->sk, *skb = sockb->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366
1367 /* Join our sockets back to back */
1368 sock_hold(ska);
1369 sock_hold(skb);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001370 unix_peer(ska) = skb;
1371 unix_peer(skb) = ska;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001372 init_peercred(ska);
1373 init_peercred(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374
1375 if (ska->sk_type != SOCK_DGRAM) {
1376 ska->sk_state = TCP_ESTABLISHED;
1377 skb->sk_state = TCP_ESTABLISHED;
1378 socka->state = SS_CONNECTED;
1379 sockb->state = SS_CONNECTED;
1380 }
1381 return 0;
1382}
1383
Daniel Borkmann90c6bd32013-10-17 22:51:31 +02001384static void unix_sock_inherit_flags(const struct socket *old,
1385 struct socket *new)
1386{
1387 if (test_bit(SOCK_PASSCRED, &old->flags))
1388 set_bit(SOCK_PASSCRED, &new->flags);
1389 if (test_bit(SOCK_PASSSEC, &old->flags))
1390 set_bit(SOCK_PASSSEC, &new->flags);
1391}
1392
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1394{
1395 struct sock *sk = sock->sk;
1396 struct sock *tsk;
1397 struct sk_buff *skb;
1398 int err;
1399
1400 err = -EOPNOTSUPP;
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001401 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 goto out;
1403
1404 err = -EINVAL;
1405 if (sk->sk_state != TCP_LISTEN)
1406 goto out;
1407
1408 /* If socket state is TCP_LISTEN it cannot change (for now...),
1409 * so that no locks are necessary.
1410 */
1411
1412 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1413 if (!skb) {
1414 /* This means receive shutdown. */
1415 if (err == 0)
1416 err = -EINVAL;
1417 goto out;
1418 }
1419
1420 tsk = skb->sk;
1421 skb_free_datagram(sk, skb);
1422 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1423
1424 /* attach accepted sock to socket */
David S. Miller1c92b4e2007-05-31 13:24:26 -07001425 unix_state_lock(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 newsock->state = SS_CONNECTED;
Daniel Borkmann90c6bd32013-10-17 22:51:31 +02001427 unix_sock_inherit_flags(sock, newsock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 sock_graft(tsk, newsock);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001429 unix_state_unlock(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 return 0;
1431
1432out:
1433 return err;
1434}
1435
1436
1437static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1438{
1439 struct sock *sk = sock->sk;
1440 struct unix_sock *u;
Cyrill Gorcunov13cfa972009-11-08 05:51:19 +00001441 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 int err = 0;
1443
1444 if (peer) {
1445 sk = unix_peer_get(sk);
1446
1447 err = -ENOTCONN;
1448 if (!sk)
1449 goto out;
1450 err = 0;
1451 } else {
1452 sock_hold(sk);
1453 }
1454
1455 u = unix_sk(sk);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001456 unix_state_lock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 if (!u->addr) {
1458 sunaddr->sun_family = AF_UNIX;
1459 sunaddr->sun_path[0] = 0;
1460 *uaddr_len = sizeof(short);
1461 } else {
1462 struct unix_address *addr = u->addr;
1463
1464 *uaddr_len = addr->len;
1465 memcpy(sunaddr, addr->name, *uaddr_len);
1466 }
David S. Miller1c92b4e2007-05-31 13:24:26 -07001467 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 sock_put(sk);
1469out:
1470 return err;
1471}
1472
1473static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1474{
1475 int i;
1476
1477 scm->fp = UNIXCB(skb).fp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 UNIXCB(skb).fp = NULL;
1479
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001480 for (i = scm->fp->count-1; i >= 0; i--)
Hannes Frederic Sowa797c0092016-02-03 02:11:03 +01001481 unix_notinflight(scm->fp->user, scm->fp->fp[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482}
1483
Eric W. Biederman7361c362010-06-13 03:34:33 +00001484static void unix_destruct_scm(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485{
1486 struct scm_cookie scm;
1487 memset(&scm, 0, sizeof(scm));
Eric W. Biederman7361c362010-06-13 03:34:33 +00001488 scm.pid = UNIXCB(skb).pid;
Eric W. Biederman7361c362010-06-13 03:34:33 +00001489 if (UNIXCB(skb).fp)
1490 unix_detach_fds(&scm, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491
1492 /* Alas, it calls VFS */
1493 /* So fscking what? fput() had been SMP-safe since the last Summer */
1494 scm_destroy(&scm);
1495 sock_wfree(skb);
1496}
1497
willy tarreaudc6b0ec2016-01-10 07:54:56 +01001498/*
1499 * The "user->unix_inflight" variable is protected by the garbage
1500 * collection lock, and we just read it locklessly here. If you go
1501 * over the limit, there might be a tiny race in actually noticing
1502 * it across threads. Tough.
1503 */
1504static inline bool too_many_unix_fds(struct task_struct *p)
1505{
1506 struct user_struct *user = current_user();
1507
1508 if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
1509 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
1510 return false;
1511}
1512
Eric Dumazet25888e32010-11-25 04:11:39 +00001513#define MAX_RECURSION_LEVEL 4
1514
Miklos Szeredi62093442008-11-09 15:23:57 +01001515static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516{
1517 int i;
Eric Dumazet25888e32010-11-25 04:11:39 +00001518 unsigned char max_level = 0;
1519 int unix_sock_count = 0;
1520
willy tarreaudc6b0ec2016-01-10 07:54:56 +01001521 if (too_many_unix_fds(current))
1522 return -ETOOMANYREFS;
1523
Eric Dumazet25888e32010-11-25 04:11:39 +00001524 for (i = scm->fp->count - 1; i >= 0; i--) {
1525 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1526
1527 if (sk) {
1528 unix_sock_count++;
1529 max_level = max(max_level,
1530 unix_sk(sk)->recursion_level);
1531 }
1532 }
1533 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1534 return -ETOOMANYREFS;
Miklos Szeredi62093442008-11-09 15:23:57 +01001535
1536 /*
1537 * Need to duplicate file references for the sake of garbage
1538 * collection. Otherwise a socket in the fps might become a
1539 * candidate for GC while the skb is not yet queued.
1540 */
1541 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1542 if (!UNIXCB(skb).fp)
1543 return -ENOMEM;
1544
willy tarreaudc6b0ec2016-01-10 07:54:56 +01001545 for (i = scm->fp->count - 1; i >= 0; i--)
Hannes Frederic Sowa797c0092016-02-03 02:11:03 +01001546 unix_inflight(scm->fp->user, scm->fp->fp[i]);
Eric Dumazet25888e32010-11-25 04:11:39 +00001547 return max_level;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548}
1549
David S. Millerf78a5fd2011-09-16 19:34:00 -04001550static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
Eric W. Biederman7361c362010-06-13 03:34:33 +00001551{
1552 int err = 0;
Eric Dumazet16e57262011-09-19 05:52:27 +00001553
David S. Millerf78a5fd2011-09-16 19:34:00 -04001554 UNIXCB(skb).pid = get_pid(scm->pid);
Eric W. Biederman6b0ee8c02013-04-03 17:28:16 +00001555 UNIXCB(skb).uid = scm->creds.uid;
1556 UNIXCB(skb).gid = scm->creds.gid;
Eric W. Biederman7361c362010-06-13 03:34:33 +00001557 UNIXCB(skb).fp = NULL;
1558 if (scm->fp && send_fds)
1559 err = unix_attach_fds(scm, skb);
1560
1561 skb->destructor = unix_destruct_scm;
1562 return err;
1563}
1564
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565/*
Eric Dumazet16e57262011-09-19 05:52:27 +00001566 * Some apps rely on write() giving SCM_CREDENTIALS
1567 * We include credentials if source or destination socket
1568 * asserted SOCK_PASSCRED.
1569 */
1570static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1571 const struct sock *other)
1572{
Eric W. Biederman6b0ee8c02013-04-03 17:28:16 +00001573 if (UNIXCB(skb).pid)
Eric Dumazet16e57262011-09-19 05:52:27 +00001574 return;
1575 if (test_bit(SOCK_PASSCRED, &sock->flags) ||
Eric W. Biederman25da0e32013-04-03 16:13:35 +00001576 !other->sk_socket ||
1577 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
Eric Dumazet16e57262011-09-19 05:52:27 +00001578 UNIXCB(skb).pid = get_pid(task_tgid(current));
David S. Miller6e0895c2013-04-22 20:32:51 -04001579 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
Eric Dumazet16e57262011-09-19 05:52:27 +00001580 }
1581}
1582
1583/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 * Send AF_UNIX data.
1585 */
1586
Ying Xue1b784142015-03-02 15:37:48 +08001587static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1588 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001591 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 struct unix_sock *u = unix_sk(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +01001593 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 struct sock *other = NULL;
1595 int namelen = 0; /* fake GCC */
1596 int err;
Eric Dumazet95c96172012-04-15 05:58:06 +00001597 unsigned int hash;
David S. Millerf78a5fd2011-09-16 19:34:00 -04001598 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 long timeo;
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001600 struct scm_cookie scm;
Eric Dumazet25888e32010-11-25 04:11:39 +00001601 int max_level;
Eric Dumazeteb6a2482012-04-03 05:28:28 +00001602 int data_len = 0;
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001603 int sk_locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604
dann frazier5f23b732008-11-26 15:32:27 -08001605 wait_for_unix_gc();
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001606 err = scm_send(sock, msg, &scm, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 if (err < 0)
1608 return err;
1609
1610 err = -EOPNOTSUPP;
1611 if (msg->msg_flags&MSG_OOB)
1612 goto out;
1613
1614 if (msg->msg_namelen) {
1615 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1616 if (err < 0)
1617 goto out;
1618 namelen = err;
1619 } else {
1620 sunaddr = NULL;
1621 err = -ENOTCONN;
1622 other = unix_peer_get(sk);
1623 if (!other)
1624 goto out;
1625 }
1626
Joe Perchesf64f9e72009-11-29 16:55:45 -08001627 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1628 && (err = unix_autobind(sock)) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 goto out;
1630
1631 err = -EMSGSIZE;
1632 if (len > sk->sk_sndbuf - 32)
1633 goto out;
1634
Kirill Tkhai31ff6aa2014-05-15 19:56:28 +04001635 if (len > SKB_MAX_ALLOC) {
Eric Dumazeteb6a2482012-04-03 05:28:28 +00001636 data_len = min_t(size_t,
1637 len - SKB_MAX_ALLOC,
1638 MAX_SKB_FRAGS * PAGE_SIZE);
Kirill Tkhai31ff6aa2014-05-15 19:56:28 +04001639 data_len = PAGE_ALIGN(data_len);
1640
1641 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1642 }
Eric Dumazeteb6a2482012-04-03 05:28:28 +00001643
1644 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
Eric Dumazet28d64272013-08-08 14:38:47 -07001645 msg->msg_flags & MSG_DONTWAIT, &err,
1646 PAGE_ALLOC_COSTLY_ORDER);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001647 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 goto out;
1649
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001650 err = unix_scm_to_skb(&scm, skb, true);
Eric Dumazet25888e32010-11-25 04:11:39 +00001651 if (err < 0)
Eric W. Biederman7361c362010-06-13 03:34:33 +00001652 goto out_free;
Eric Dumazet25888e32010-11-25 04:11:39 +00001653 max_level = err + 1;
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001654 unix_get_secdata(&scm, skb);
Catherine Zhang877ce7c2006-06-29 12:27:47 -07001655
Eric Dumazeteb6a2482012-04-03 05:28:28 +00001656 skb_put(skb, len - data_len);
1657 skb->data_len = data_len;
1658 skb->len = len;
Al Viroc0371da2014-11-24 10:42:55 -05001659 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 if (err)
1661 goto out_free;
1662
1663 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1664
1665restart:
1666 if (!other) {
1667 err = -ECONNRESET;
1668 if (sunaddr == NULL)
1669 goto out_free;
1670
Denis V. Lunev097e66c2007-11-19 22:29:30 -08001671 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 hash, &err);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001673 if (other == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 goto out_free;
1675 }
1676
Alban Crequyd6ae3ba2011-01-18 06:39:15 +00001677 if (sk_filter(other, skb) < 0) {
1678 /* Toss the packet but do not return any error to the sender */
1679 err = len;
1680 goto out_free;
1681 }
1682
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001683 sk_locked = 0;
David S. Miller1c92b4e2007-05-31 13:24:26 -07001684 unix_state_lock(other);
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001685restart_locked:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 err = -EPERM;
1687 if (!unix_may_send(sk, other))
1688 goto out_unlock;
1689
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001690 if (unlikely(sock_flag(other, SOCK_DEAD))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 /*
1692 * Check with 1003.1g - what should
1693 * datagram error
1694 */
David S. Miller1c92b4e2007-05-31 13:24:26 -07001695 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 sock_put(other);
1697
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001698 if (!sk_locked)
1699 unix_state_lock(sk);
1700
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 if (unix_peer(sk) == other) {
Jianjun Konge27dfce2008-11-01 21:38:31 -07001703 unix_peer(sk) = NULL;
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001704 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1705
David S. Miller1c92b4e2007-05-31 13:24:26 -07001706 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707
1708 unix_dgram_disconnected(sk, other);
1709 sock_put(other);
1710 err = -ECONNREFUSED;
1711 } else {
David S. Miller1c92b4e2007-05-31 13:24:26 -07001712 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 }
1714
1715 other = NULL;
1716 if (err)
1717 goto out_free;
1718 goto restart;
1719 }
1720
1721 err = -EPIPE;
1722 if (other->sk_shutdown & RCV_SHUTDOWN)
1723 goto out_unlock;
1724
1725 if (sk->sk_type != SOCK_SEQPACKET) {
1726 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1727 if (err)
1728 goto out_unlock;
1729 }
1730
Rainer Weikusat73fd5052016-02-11 19:37:27 +00001731 /* other == sk && unix_peer(other) != sk if
1732 * - unix_peer(sk) == NULL, destination address bound to sk
1733 * - unix_peer(sk) == sk by time of get but disconnected before lock
1734 */
1735 if (other != sk &&
1736 unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001737 if (timeo) {
1738 timeo = unix_wait_for_peer(other, timeo);
1739
1740 err = sock_intr_errno(timeo);
1741 if (signal_pending(current))
1742 goto out_free;
1743
1744 goto restart;
1745 }
1746
1747 if (!sk_locked) {
1748 unix_state_unlock(other);
1749 unix_state_double_lock(sk, other);
1750 }
1751
1752 if (unix_peer(sk) != other ||
1753 unix_dgram_peer_wake_me(sk, other)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 err = -EAGAIN;
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001755 sk_locked = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 goto out_unlock;
1757 }
1758
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001759 if (!sk_locked) {
1760 sk_locked = 1;
1761 goto restart_locked;
1762 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 }
1764
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001765 if (unlikely(sk_locked))
1766 unix_state_unlock(sk);
1767
Alban Crequy3f661162010-10-04 08:48:28 +00001768 if (sock_flag(other, SOCK_RCVTSTAMP))
1769 __net_timestamp(skb);
Eric Dumazet16e57262011-09-19 05:52:27 +00001770 maybe_add_creds(skb, sock, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 skb_queue_tail(&other->sk_receive_queue, skb);
Eric Dumazet25888e32010-11-25 04:11:39 +00001772 if (max_level > unix_sk(other)->recursion_level)
1773 unix_sk(other)->recursion_level = max_level;
David S. Miller1c92b4e2007-05-31 13:24:26 -07001774 unix_state_unlock(other);
David S. Miller676d2362014-04-11 16:15:36 -04001775 other->sk_data_ready(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 sock_put(other);
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001777 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 return len;
1779
1780out_unlock:
Rainer Weikusat5c77e262015-11-20 22:07:23 +00001781 if (sk_locked)
1782 unix_state_unlock(sk);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001783 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784out_free:
1785 kfree_skb(skb);
1786out:
1787 if (other)
1788 sock_put(other);
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001789 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 return err;
1791}
1792
Eric Dumazete370a722013-08-08 14:37:32 -07001793/* We use paged skbs for stream sockets, and limit occupancy to 32768
1794 * bytes, and a minimun of a full page.
1795 */
1796#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001797
Ying Xue1b784142015-03-02 15:37:48 +08001798static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1799 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 struct sock *sk = sock->sk;
1802 struct sock *other = NULL;
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001803 int err, size;
David S. Millerf78a5fd2011-09-16 19:34:00 -04001804 struct sk_buff *skb;
Jianjun Konge27dfce2008-11-01 21:38:31 -07001805 int sent = 0;
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001806 struct scm_cookie scm;
Miklos Szeredi8ba69ba2009-09-11 11:31:45 -07001807 bool fds_sent = false;
Eric Dumazet25888e32010-11-25 04:11:39 +00001808 int max_level;
Eric Dumazete370a722013-08-08 14:37:32 -07001809 int data_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810
dann frazier5f23b732008-11-26 15:32:27 -08001811 wait_for_unix_gc();
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001812 err = scm_send(sock, msg, &scm, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 if (err < 0)
1814 return err;
1815
1816 err = -EOPNOTSUPP;
1817 if (msg->msg_flags&MSG_OOB)
1818 goto out_err;
1819
1820 if (msg->msg_namelen) {
1821 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1822 goto out_err;
1823 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 err = -ENOTCONN;
Benjamin LaHaise830a1e52005-12-13 23:22:32 -08001825 other = unix_peer(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 if (!other)
1827 goto out_err;
1828 }
1829
1830 if (sk->sk_shutdown & SEND_SHUTDOWN)
1831 goto pipe_err;
1832
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001833 while (sent < len) {
Eric Dumazete370a722013-08-08 14:37:32 -07001834 size = len - sent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835
1836 /* Keep two messages in the pipe so it schedules better */
Eric Dumazete370a722013-08-08 14:37:32 -07001837 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838
Eric Dumazete370a722013-08-08 14:37:32 -07001839 /* allow fallback to order-0 allocations */
1840 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001841
Eric Dumazete370a722013-08-08 14:37:32 -07001842 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001843
Kirill Tkhai31ff6aa2014-05-15 19:56:28 +04001844 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
1845
Eric Dumazete370a722013-08-08 14:37:32 -07001846 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
Eric Dumazet28d64272013-08-08 14:38:47 -07001847 msg->msg_flags & MSG_DONTWAIT, &err,
1848 get_order(UNIX_SKB_FRAGS_SZ));
Eric Dumazete370a722013-08-08 14:37:32 -07001849 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 goto out_err;
1851
David S. Millerf78a5fd2011-09-16 19:34:00 -04001852 /* Only send the fds in the first buffer */
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001853 err = unix_scm_to_skb(&scm, skb, !fds_sent);
Eric Dumazet25888e32010-11-25 04:11:39 +00001854 if (err < 0) {
Eric W. Biederman7361c362010-06-13 03:34:33 +00001855 kfree_skb(skb);
David S. Millerf78a5fd2011-09-16 19:34:00 -04001856 goto out_err;
Miklos Szeredi62093442008-11-09 15:23:57 +01001857 }
Eric Dumazet25888e32010-11-25 04:11:39 +00001858 max_level = err + 1;
Eric W. Biederman7361c362010-06-13 03:34:33 +00001859 fds_sent = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860
Eric Dumazete370a722013-08-08 14:37:32 -07001861 skb_put(skb, size - data_len);
1862 skb->data_len = data_len;
1863 skb->len = size;
Al Viroc0371da2014-11-24 10:42:55 -05001864 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001865 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 kfree_skb(skb);
David S. Millerf78a5fd2011-09-16 19:34:00 -04001867 goto out_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 }
1869
David S. Miller1c92b4e2007-05-31 13:24:26 -07001870 unix_state_lock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871
1872 if (sock_flag(other, SOCK_DEAD) ||
1873 (other->sk_shutdown & RCV_SHUTDOWN))
1874 goto pipe_err_free;
1875
Eric Dumazet16e57262011-09-19 05:52:27 +00001876 maybe_add_creds(skb, sock, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 skb_queue_tail(&other->sk_receive_queue, skb);
Eric Dumazet25888e32010-11-25 04:11:39 +00001878 if (max_level > unix_sk(other)->recursion_level)
1879 unix_sk(other)->recursion_level = max_level;
David S. Miller1c92b4e2007-05-31 13:24:26 -07001880 unix_state_unlock(other);
David S. Miller676d2362014-04-11 16:15:36 -04001881 other->sk_data_ready(other);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001882 sent += size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001885 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886
1887 return sent;
1888
1889pipe_err_free:
David S. Miller1c92b4e2007-05-31 13:24:26 -07001890 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 kfree_skb(skb);
1892pipe_err:
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001893 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1894 send_sig(SIGPIPE, current, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 err = -EPIPE;
1896out_err:
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001897 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 return sent ? : err;
1899}
1900
Ying Xue1b784142015-03-02 15:37:48 +08001901static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
1902 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903{
1904 int err;
1905 struct sock *sk = sock->sk;
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001906
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 err = sock_error(sk);
1908 if (err)
1909 return err;
1910
1911 if (sk->sk_state != TCP_ESTABLISHED)
1912 return -ENOTCONN;
1913
1914 if (msg->msg_namelen)
1915 msg->msg_namelen = 0;
1916
Ying Xue1b784142015-03-02 15:37:48 +08001917 return unix_dgram_sendmsg(sock, msg, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918}
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001919
Ying Xue1b784142015-03-02 15:37:48 +08001920static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
1921 size_t size, int flags)
Eric W. Biedermana05d2ad2011-04-24 01:54:57 +00001922{
1923 struct sock *sk = sock->sk;
1924
1925 if (sk->sk_state != TCP_ESTABLISHED)
1926 return -ENOTCONN;
1927
Ying Xue1b784142015-03-02 15:37:48 +08001928 return unix_dgram_recvmsg(sock, msg, size, flags);
Eric W. Biedermana05d2ad2011-04-24 01:54:57 +00001929}
1930
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1932{
1933 struct unix_sock *u = unix_sk(sk);
1934
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 if (u->addr) {
1936 msg->msg_namelen = u->addr->len;
1937 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1938 }
1939}
1940
Ying Xue1b784142015-03-02 15:37:48 +08001941static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
1942 size_t size, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943{
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001944 struct scm_cookie scm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 struct sock *sk = sock->sk;
1946 struct unix_sock *u = unix_sk(sk);
1947 int noblock = flags & MSG_DONTWAIT;
1948 struct sk_buff *skb;
1949 int err;
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00001950 int peeked, skip;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951
1952 err = -EOPNOTSUPP;
1953 if (flags&MSG_OOB)
1954 goto out;
1955
Rainer Weikusatb3ca9b02011-02-28 04:50:55 +00001956 err = mutex_lock_interruptible(&u->readlock);
Eric Dumazetde144392014-03-25 18:42:27 -07001957 if (unlikely(err)) {
1958 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
1959 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
1960 */
1961 err = noblock ? -EAGAIN : -ERESTARTSYS;
Rainer Weikusatb3ca9b02011-02-28 04:50:55 +00001962 goto out;
1963 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00001965 skip = sk_peek_offset(sk, flags);
1966
1967 skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
Florian Zumbiehl0a112252007-11-29 23:19:23 +11001968 if (!skb) {
1969 unix_state_lock(sk);
1970 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1971 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1972 (sk->sk_shutdown & RCV_SHUTDOWN))
1973 err = 0;
1974 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 goto out_unlock;
Florian Zumbiehl0a112252007-11-29 23:19:23 +11001976 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977
Eric Dumazet67426b72010-10-29 20:44:44 +00001978 wake_up_interruptible_sync_poll(&u->peer_wait,
1979 POLLOUT | POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980
1981 if (msg->msg_name)
1982 unix_copy_addr(msg, skb->sk);
1983
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00001984 if (size > skb->len - skip)
1985 size = skb->len - skip;
1986 else if (size < skb->len - skip)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 msg->msg_flags |= MSG_TRUNC;
1988
David S. Miller51f3d022014-11-05 16:46:40 -05001989 err = skb_copy_datagram_msg(skb, skip, msg, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 if (err)
1991 goto out_free;
1992
Alban Crequy3f661162010-10-04 08:48:28 +00001993 if (sock_flag(sk, SOCK_RCVTSTAMP))
1994 __sock_recv_timestamp(msg, sk, skb);
1995
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001996 memset(&scm, 0, sizeof(scm));
1997
1998 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
1999 unix_set_secdata(&scm, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002001 if (!(flags & MSG_PEEK)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 if (UNIXCB(skb).fp)
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002003 unix_detach_fds(&scm, skb);
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00002004
2005 sk_peek_offset_bwd(sk, skb->len);
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002006 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 /* It is questionable: on PEEK we could:
2008 - do not return fds - good, but too simple 8)
2009 - return fds, and do not return them on read (old strategy,
2010 apparently wrong)
2011 - clone fds (I chose it for now, it is the most universal
2012 solution)
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09002013
2014 POSIX 1003.1g does not actually define this clearly
2015 at all. POSIX 1003.1g doesn't define a lot of things
2016 clearly however!
2017
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 */
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00002019
2020 sk_peek_offset_fwd(sk, size);
2021
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022 if (UNIXCB(skb).fp)
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002023 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024 }
Eric Dumazet9f6f9af2012-02-21 23:24:55 +00002025 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002027 scm_recv(sock, msg, &scm, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028
2029out_free:
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002030 skb_free_datagram(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031out_unlock:
Ingo Molnar57b47a52006-03-20 22:35:41 -08002032 mutex_unlock(&u->readlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033out:
2034 return err;
2035}
2036
2037/*
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002038 * Sleep until more data has arrived. But check for races..
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 */
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002040static long unix_stream_data_wait(struct sock *sk, long timeo,
2041 struct sk_buff *last)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042{
2043 DEFINE_WAIT(wait);
2044
David S. Miller1c92b4e2007-05-31 13:24:26 -07002045 unix_state_lock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046
2047 for (;;) {
Eric Dumazetaa395142010-04-20 13:03:51 +00002048 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002050 if (skb_peek_tail(&sk->sk_receive_queue) != last ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 sk->sk_err ||
2052 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2053 signal_pending(current) ||
2054 !timeo)
2055 break;
2056
2057 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
David S. Miller1c92b4e2007-05-31 13:24:26 -07002058 unix_state_unlock(sk);
Colin Cross2b15af62013-05-06 23:50:21 +00002059 timeo = freezable_schedule_timeout(timeo);
David S. Miller1c92b4e2007-05-31 13:24:26 -07002060 unix_state_lock(sk);
Mark Salyzynb48732e2015-05-26 08:22:19 -07002061
2062 if (sock_flag(sk, SOCK_DEAD))
2063 break;
2064
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2066 }
2067
Eric Dumazetaa395142010-04-20 13:03:51 +00002068 finish_wait(sk_sleep(sk), &wait);
David S. Miller1c92b4e2007-05-31 13:24:26 -07002069 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 return timeo;
2071}
2072
Eric Dumazete370a722013-08-08 14:37:32 -07002073static unsigned int unix_skb_len(const struct sk_buff *skb)
2074{
2075 return skb->len - UNIXCB(skb).consumed;
2076}
2077
Ying Xue1b784142015-03-02 15:37:48 +08002078static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2079 size_t size, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080{
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002081 struct scm_cookie scm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 struct sock *sk = sock->sk;
2083 struct unix_sock *u = unix_sk(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +01002084 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 int copied = 0;
Eric Dumazetde144392014-03-25 18:42:27 -07002086 int noblock = flags & MSG_DONTWAIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 int check_creds = 0;
2088 int target;
2089 int err = 0;
2090 long timeo;
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002091 int skip;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092
2093 err = -EINVAL;
2094 if (sk->sk_state != TCP_ESTABLISHED)
2095 goto out;
2096
2097 err = -EOPNOTSUPP;
2098 if (flags&MSG_OOB)
2099 goto out;
2100
2101 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
Eric Dumazetde144392014-03-25 18:42:27 -07002102 timeo = sock_rcvtimeo(sk, noblock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 /* Lock the socket to prevent queue disordering
2105 * while sleeps in memcpy_tomsg
2106 */
2107
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002108 memset(&scm, 0, sizeof(scm));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109
Rainer Weikusatcc01a0a2015-12-16 20:09:25 +00002110 mutex_lock(&u->readlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111
Andrey Vagine09e8892015-10-02 00:05:36 +03002112 if (flags & MSG_PEEK)
2113 skip = sk_peek_offset(sk, flags);
2114 else
2115 skip = 0;
2116
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002117 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 int chunk;
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002119 struct sk_buff *skb, *last;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002121 unix_state_lock(sk);
Mark Salyzynb48732e2015-05-26 08:22:19 -07002122 if (sock_flag(sk, SOCK_DEAD)) {
2123 err = -ECONNRESET;
2124 goto unlock;
2125 }
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002126 last = skb = skb_peek(&sk->sk_receive_queue);
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002127again:
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002128 if (skb == NULL) {
Eric Dumazet25888e32010-11-25 04:11:39 +00002129 unix_sk(sk)->recursion_level = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 if (copied >= target)
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002131 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132
2133 /*
2134 * POSIX 1003.1g mandates this order.
2135 */
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09002136
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002137 err = sock_error(sk);
2138 if (err)
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002139 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 if (sk->sk_shutdown & RCV_SHUTDOWN)
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002141 goto unlock;
2142
2143 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 err = -EAGAIN;
2145 if (!timeo)
2146 break;
Ingo Molnar57b47a52006-03-20 22:35:41 -08002147 mutex_unlock(&u->readlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002149 timeo = unix_stream_data_wait(sk, timeo, last);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150
Rainer Weikusatcc01a0a2015-12-16 20:09:25 +00002151 if (signal_pending(current)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 err = sock_intr_errno(timeo);
Eric Dumazet8d988532016-01-24 13:53:50 -08002153 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 goto out;
2155 }
Rainer Weikusatb3ca9b02011-02-28 04:50:55 +00002156
Rainer Weikusatcc01a0a2015-12-16 20:09:25 +00002157 mutex_lock(&u->readlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 continue;
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002159 unlock:
2160 unix_state_unlock(sk);
2161 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 }
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002163
Eric Dumazete370a722013-08-08 14:37:32 -07002164 while (skip >= unix_skb_len(skb)) {
2165 skip -= unix_skb_len(skb);
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002166 last = skb;
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002167 skb = skb_peek_next(skb, &sk->sk_receive_queue);
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002168 if (!skb)
2169 goto again;
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002170 }
2171
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002172 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173
2174 if (check_creds) {
2175 /* Never glue messages from different writers */
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002176 if ((UNIXCB(skb).pid != scm.pid) ||
2177 !uid_eq(UNIXCB(skb).uid, scm.creds.uid) ||
2178 !gid_eq(UNIXCB(skb).gid, scm.creds.gid))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 break;
Eric W. Biederman0e82e7f6d2013-04-03 16:14:47 +00002180 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 /* Copy credentials */
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002182 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 check_creds = 1;
2184 }
2185
2186 /* Copy address just once */
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002187 if (sunaddr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 unix_copy_addr(msg, skb->sk);
2189 sunaddr = NULL;
2190 }
2191
Eric Dumazete370a722013-08-08 14:37:32 -07002192 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
David S. Miller51f3d022014-11-05 16:46:40 -05002193 if (skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2194 msg, chunk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 if (copied == 0)
2196 copied = -EFAULT;
2197 break;
2198 }
2199 copied += chunk;
2200 size -= chunk;
2201
2202 /* Mark read part of skb as used */
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002203 if (!(flags & MSG_PEEK)) {
Eric Dumazete370a722013-08-08 14:37:32 -07002204 UNIXCB(skb).consumed += chunk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002206 sk_peek_offset_bwd(sk, chunk);
2207
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208 if (UNIXCB(skb).fp)
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002209 unix_detach_fds(&scm, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210
Eric Dumazete370a722013-08-08 14:37:32 -07002211 if (unix_skb_len(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213
Eric Dumazet6f01fd62012-01-28 16:11:03 +00002214 skb_unlink(skb, &sk->sk_receive_queue);
Neil Horman70d4bf62010-07-20 06:45:56 +00002215 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002217 if (scm.fp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 break;
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002219 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 /* It is questionable, see note in unix_dgram_recvmsg.
2221 */
2222 if (UNIXCB(skb).fp)
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002223 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
Andrey Vagine09e8892015-10-02 00:05:36 +03002225 sk_peek_offset_fwd(sk, chunk);
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002226
Aaron Conole9bf31c52015-09-26 18:50:43 -04002227 if (UNIXCB(skb).fp)
2228 break;
2229
Andrey Vagine09e8892015-10-02 00:05:36 +03002230 skip = 0;
Aaron Conole9bf31c52015-09-26 18:50:43 -04002231 last = skb;
2232 unix_state_lock(sk);
2233 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2234 if (skb)
2235 goto again;
2236 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 break;
2238 }
2239 } while (size);
2240
Ingo Molnar57b47a52006-03-20 22:35:41 -08002241 mutex_unlock(&u->readlock);
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002242 scm_recv(sock, msg, &scm, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243out:
2244 return copied ? : err;
2245}
2246
2247static int unix_shutdown(struct socket *sock, int mode)
2248{
2249 struct sock *sk = sock->sk;
2250 struct sock *other;
2251
Xi Wangfc61b922012-08-26 16:47:13 +00002252 if (mode < SHUT_RD || mode > SHUT_RDWR)
2253 return -EINVAL;
2254 /* This maps:
2255 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2256 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2257 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2258 */
2259 ++mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260
Alban Crequy7180a032011-01-19 04:56:36 +00002261 unix_state_lock(sk);
2262 sk->sk_shutdown |= mode;
2263 other = unix_peer(sk);
2264 if (other)
2265 sock_hold(other);
2266 unix_state_unlock(sk);
2267 sk->sk_state_change(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268
Alban Crequy7180a032011-01-19 04:56:36 +00002269 if (other &&
2270 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271
Alban Crequy7180a032011-01-19 04:56:36 +00002272 int peer_mode = 0;
2273
2274 if (mode&RCV_SHUTDOWN)
2275 peer_mode |= SEND_SHUTDOWN;
2276 if (mode&SEND_SHUTDOWN)
2277 peer_mode |= RCV_SHUTDOWN;
2278 unix_state_lock(other);
2279 other->sk_shutdown |= peer_mode;
2280 unix_state_unlock(other);
2281 other->sk_state_change(other);
2282 if (peer_mode == SHUTDOWN_MASK)
2283 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2284 else if (peer_mode & RCV_SHUTDOWN)
2285 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 }
Alban Crequy7180a032011-01-19 04:56:36 +00002287 if (other)
2288 sock_put(other);
2289
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 return 0;
2291}
2292
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002293long unix_inq_len(struct sock *sk)
2294{
2295 struct sk_buff *skb;
2296 long amount = 0;
2297
2298 if (sk->sk_state == TCP_LISTEN)
2299 return -EINVAL;
2300
2301 spin_lock(&sk->sk_receive_queue.lock);
2302 if (sk->sk_type == SOCK_STREAM ||
2303 sk->sk_type == SOCK_SEQPACKET) {
2304 skb_queue_walk(&sk->sk_receive_queue, skb)
Eric Dumazete370a722013-08-08 14:37:32 -07002305 amount += unix_skb_len(skb);
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002306 } else {
2307 skb = skb_peek(&sk->sk_receive_queue);
2308 if (skb)
2309 amount = skb->len;
2310 }
2311 spin_unlock(&sk->sk_receive_queue.lock);
2312
2313 return amount;
2314}
2315EXPORT_SYMBOL_GPL(unix_inq_len);
2316
2317long unix_outq_len(struct sock *sk)
2318{
2319 return sk_wmem_alloc_get(sk);
2320}
2321EXPORT_SYMBOL_GPL(unix_outq_len);
2322
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2324{
2325 struct sock *sk = sock->sk;
Jianjun Konge27dfce2008-11-01 21:38:31 -07002326 long amount = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327 int err;
2328
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002329 switch (cmd) {
2330 case SIOCOUTQ:
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002331 amount = unix_outq_len(sk);
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002332 err = put_user(amount, (int __user *)arg);
2333 break;
2334 case SIOCINQ:
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002335 amount = unix_inq_len(sk);
2336 if (amount < 0)
2337 err = amount;
2338 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 err = put_user(amount, (int __user *)arg);
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002340 break;
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002341 default:
2342 err = -ENOIOCTLCMD;
2343 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 }
2345 return err;
2346}
2347
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002348static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349{
2350 struct sock *sk = sock->sk;
2351 unsigned int mask;
2352
Eric Dumazetaa395142010-04-20 13:03:51 +00002353 sock_poll_wait(file, sk_sleep(sk), wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 mask = 0;
2355
2356 /* exceptional events? */
2357 if (sk->sk_err)
2358 mask |= POLLERR;
2359 if (sk->sk_shutdown == SHUTDOWN_MASK)
2360 mask |= POLLHUP;
Davide Libenzif348d702006-03-25 03:07:39 -08002361 if (sk->sk_shutdown & RCV_SHUTDOWN)
Eric Dumazetdb409802010-09-06 11:13:50 +00002362 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363
2364 /* readable? */
Eric Dumazetdb409802010-09-06 11:13:50 +00002365 if (!skb_queue_empty(&sk->sk_receive_queue))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366 mask |= POLLIN | POLLRDNORM;
2367
2368 /* Connection-based need to check for termination and startup */
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002369 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2370 sk->sk_state == TCP_CLOSE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371 mask |= POLLHUP;
2372
2373 /*
2374 * we set writable also when the other side has shut down the
2375 * connection. This prevents stuck sockets.
2376 */
2377 if (unix_writable(sk))
2378 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2379
2380 return mask;
2381}
2382
Rainer Weikusatec0d2152008-06-27 19:34:18 -07002383static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2384 poll_table *wait)
Rainer Weikusat3c734192008-06-17 22:28:05 -07002385{
Rainer Weikusatec0d2152008-06-27 19:34:18 -07002386 struct sock *sk = sock->sk, *other;
2387 unsigned int mask, writable;
Rainer Weikusat3c734192008-06-17 22:28:05 -07002388
Eric Dumazetaa395142010-04-20 13:03:51 +00002389 sock_poll_wait(file, sk_sleep(sk), wait);
Rainer Weikusat3c734192008-06-17 22:28:05 -07002390 mask = 0;
2391
2392 /* exceptional events? */
2393 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +00002394 mask |= POLLERR |
Jacob Keller8facd5f2013-04-02 13:55:40 -07002395 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +00002396
Rainer Weikusat3c734192008-06-17 22:28:05 -07002397 if (sk->sk_shutdown & RCV_SHUTDOWN)
Eric Dumazet5456f092010-10-31 05:36:23 +00002398 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
Rainer Weikusat3c734192008-06-17 22:28:05 -07002399 if (sk->sk_shutdown == SHUTDOWN_MASK)
2400 mask |= POLLHUP;
2401
2402 /* readable? */
Eric Dumazet5456f092010-10-31 05:36:23 +00002403 if (!skb_queue_empty(&sk->sk_receive_queue))
Rainer Weikusat3c734192008-06-17 22:28:05 -07002404 mask |= POLLIN | POLLRDNORM;
2405
2406 /* Connection-based need to check for termination and startup */
2407 if (sk->sk_type == SOCK_SEQPACKET) {
2408 if (sk->sk_state == TCP_CLOSE)
2409 mask |= POLLHUP;
2410 /* connection hasn't started yet? */
2411 if (sk->sk_state == TCP_SYN_SENT)
2412 return mask;
2413 }
2414
Eric Dumazet973a34a2010-10-31 05:38:25 +00002415 /* No write status requested, avoid expensive OUT tests. */
Hans Verkuil626cf232012-03-23 15:02:27 -07002416 if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
Eric Dumazet973a34a2010-10-31 05:38:25 +00002417 return mask;
2418
Rainer Weikusatec0d2152008-06-27 19:34:18 -07002419 writable = unix_writable(sk);
Rainer Weikusat5c77e262015-11-20 22:07:23 +00002420 if (writable) {
2421 unix_state_lock(sk);
2422
2423 other = unix_peer(sk);
2424 if (other && unix_peer(other) != sk &&
2425 unix_recvq_full(other) &&
2426 unix_dgram_peer_wake_me(sk, other))
2427 writable = 0;
2428
2429 unix_state_unlock(sk);
Rainer Weikusatec0d2152008-06-27 19:34:18 -07002430 }
2431
2432 if (writable)
Rainer Weikusat3c734192008-06-17 22:28:05 -07002433 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2434 else
2435 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2436
Rainer Weikusat3c734192008-06-17 22:28:05 -07002437 return mask;
2438}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439
2440#ifdef CONFIG_PROC_FS
Pavel Emelyanova53eb3f2007-11-23 20:30:01 +08002441
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002442#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2443
2444#define get_bucket(x) ((x) >> BUCKET_SPACE)
2445#define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2446#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
Pavel Emelyanova53eb3f2007-11-23 20:30:01 +08002447
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002448static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449{
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002450 unsigned long offset = get_offset(*pos);
2451 unsigned long bucket = get_bucket(*pos);
2452 struct sock *sk;
2453 unsigned long count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002455 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2456 if (sock_net(sk) != seq_file_net(seq))
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002457 continue;
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002458 if (++count == offset)
2459 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460 }
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002461
2462 return sk;
2463}
2464
2465static struct sock *unix_next_socket(struct seq_file *seq,
2466 struct sock *sk,
2467 loff_t *pos)
2468{
2469 unsigned long bucket;
2470
2471 while (sk > (struct sock *)SEQ_START_TOKEN) {
2472 sk = sk_next(sk);
2473 if (!sk)
2474 goto next_bucket;
2475 if (sock_net(sk) == seq_file_net(seq))
2476 return sk;
2477 }
2478
2479 do {
2480 sk = unix_from_bucket(seq, pos);
2481 if (sk)
2482 return sk;
2483
2484next_bucket:
2485 bucket = get_bucket(*pos) + 1;
2486 *pos = set_bucket_offset(bucket, 1);
2487 } while (bucket < ARRAY_SIZE(unix_socket_table));
2488
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489 return NULL;
2490}
2491
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002493 __acquires(unix_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494{
David S. Millerfbe9cc42005-12-13 23:26:29 -08002495 spin_lock(&unix_table_lock);
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002496
2497 if (!*pos)
2498 return SEQ_START_TOKEN;
2499
2500 if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2501 return NULL;
2502
2503 return unix_next_socket(seq, NULL, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504}
2505
2506static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2507{
2508 ++*pos;
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002509 return unix_next_socket(seq, v, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510}
2511
2512static void unix_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002513 __releases(unix_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514{
David S. Millerfbe9cc42005-12-13 23:26:29 -08002515 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516}
2517
2518static int unix_seq_show(struct seq_file *seq, void *v)
2519{
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09002520
Joe Perchesb9f31242008-04-12 19:04:38 -07002521 if (v == SEQ_START_TOKEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2523 "Inode Path\n");
2524 else {
2525 struct sock *s = v;
2526 struct unix_sock *u = unix_sk(s);
David S. Miller1c92b4e2007-05-31 13:24:26 -07002527 unix_state_lock(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528
Dan Rosenberg71338aa2011-05-23 12:17:35 +00002529 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 s,
2531 atomic_read(&s->sk_refcnt),
2532 0,
2533 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2534 s->sk_type,
2535 s->sk_socket ?
2536 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2537 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2538 sock_i_ino(s));
2539
2540 if (u->addr) {
2541 int i, len;
2542 seq_putc(seq, ' ');
2543
2544 i = 0;
2545 len = u->addr->len - sizeof(short);
2546 if (!UNIX_ABSTRACT(s))
2547 len--;
2548 else {
2549 seq_putc(seq, '@');
2550 i++;
2551 }
2552 for ( ; i < len; i++)
2553 seq_putc(seq, u->addr->name->sun_path[i]);
2554 }
David S. Miller1c92b4e2007-05-31 13:24:26 -07002555 unix_state_unlock(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556 seq_putc(seq, '\n');
2557 }
2558
2559 return 0;
2560}
2561
Philippe De Muyter56b3d972007-07-10 23:07:31 -07002562static const struct seq_operations unix_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563 .start = unix_seq_start,
2564 .next = unix_seq_next,
2565 .stop = unix_seq_stop,
2566 .show = unix_seq_show,
2567};
2568
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569static int unix_seq_open(struct inode *inode, struct file *file)
2570{
Denis V. Luneve372c412007-11-19 22:31:54 -08002571 return seq_open_net(inode, file, &unix_seq_ops,
Eric Dumazet8b51b062012-06-08 22:10:20 +00002572 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573}
2574
Arjan van de Venda7071d2007-02-12 00:55:36 -08002575static const struct file_operations unix_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576 .owner = THIS_MODULE,
2577 .open = unix_seq_open,
2578 .read = seq_read,
2579 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08002580 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581};
2582
2583#endif
2584
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00002585static const struct net_proto_family unix_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 .family = PF_UNIX,
2587 .create = unix_create,
2588 .owner = THIS_MODULE,
2589};
2590
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002591
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002592static int __net_init unix_net_init(struct net *net)
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002593{
2594 int error = -ENOMEM;
2595
Denis V. Luneva0a53c82007-12-11 04:19:17 -08002596 net->unx.sysctl_max_dgram_qlen = 10;
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +11002597 if (unix_sysctl_register(net))
2598 goto out;
Pavel Emelyanovd392e492007-12-01 23:44:15 +11002599
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002600#ifdef CONFIG_PROC_FS
Gao fengd4beaa62013-02-18 01:34:54 +00002601 if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +11002602 unix_sysctl_unregister(net);
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002603 goto out;
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +11002604 }
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002605#endif
2606 error = 0;
2607out:
Jianjun Kong48dcc33e2008-11-01 21:37:27 -07002608 return error;
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002609}
2610
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002611static void __net_exit unix_net_exit(struct net *net)
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002612{
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +11002613 unix_sysctl_unregister(net);
Gao fengece31ff2013-02-18 01:34:56 +00002614 remove_proc_entry("unix", net->proc_net);
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002615}
2616
2617static struct pernet_operations unix_net_ops = {
2618 .init = unix_net_init,
2619 .exit = unix_net_exit,
2620};
2621
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622static int __init af_unix_init(void)
2623{
2624 int rc = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625
YOSHIFUJI Hideaki / 吉藤英明b4fff5f2013-01-09 07:20:07 +00002626 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627
2628 rc = proto_register(&unix_proto, 1);
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09002629 if (rc != 0) {
wangweidong5cc208b2013-12-06 18:03:36 +08002630 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631 goto out;
2632 }
2633
2634 sock_register(&unix_family_ops);
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002635 register_pernet_subsys(&unix_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636out:
2637 return rc;
2638}
2639
2640static void __exit af_unix_exit(void)
2641{
2642 sock_unregister(PF_UNIX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 proto_unregister(&unix_proto);
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002644 unregister_pernet_subsys(&unix_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645}
2646
David Woodhouse3d366962008-04-24 00:59:25 -07002647/* Earlier than device_initcall() so that other drivers invoking
2648 request_module() don't end up in a loop when modprobe tries
2649 to use a UNIX socket. But later than subsys_initcall() because
2650 we depend on stuff initialised there */
2651fs_initcall(af_unix_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652module_exit(af_unix_exit);
2653
2654MODULE_LICENSE("GPL");
2655MODULE_ALIAS_NETPROTO(PF_UNIX);