blob: 0a023b8daa554867fb8b2a76335d997aa61f5520 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
Joe Perchese005d192012-05-16 19:58:40 +000092#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
Randy Dunlap4fc268d2006-01-11 12:17:47 -080094#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/errno.h>
96#include <linux/types.h>
97#include <linux/socket.h>
98#include <linux/in.h>
99#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/module.h>
101#include <linux/proc_fs.h>
102#include <linux/seq_file.h>
103#include <linux/sched.h>
104#include <linux/timer.h>
105#include <linux/string.h>
106#include <linux/sockios.h>
107#include <linux/net.h>
108#include <linux/mm.h>
109#include <linux/slab.h>
110#include <linux/interrupt.h>
111#include <linux/poll.h>
112#include <linux/tcp.h>
113#include <linux/init.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -0400114#include <linux/highmem.h>
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000115#include <linux/user_namespace.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100116#include <linux/static_key.h>
David S. Miller3969eb32012-01-09 13:44:23 -0800117#include <linux/memcontrol.h>
David S. Miller8c1ae102012-05-03 02:25:55 -0400118#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
120#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122#include <linux/netdevice.h>
123#include <net/protocol.h>
124#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200125#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700126#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#include <net/sock.h>
Patrick Ohly20d49472009-02-12 05:03:38 +0000128#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129#include <net/xfrm.h>
130#include <linux/ipsec.h>
Herbert Xuf8451722010-05-24 00:12:34 -0700131#include <net/cls_cgroup.h>
Neil Horman5bc14212011-11-22 05:10:51 +0000132#include <net/netprio_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
134#include <linux/filter.h>
135
Satoru Moriya3847ce32011-06-17 12:00:03 +0000136#include <trace/events/sock.h>
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138#ifdef CONFIG_INET
139#include <net/tcp.h>
140#endif
141
Glauber Costa36b77a52011-12-16 00:51:59 +0000142static DEFINE_MUTEX(proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000143static LIST_HEAD(proto_list);
144
Andrew Mortonc255a452012-07-31 16:43:02 -0700145#ifdef CONFIG_MEMCG_KMEM
Glauber Costa1d62e432012-04-09 19:36:33 -0300146int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000147{
148 struct proto *proto;
149 int ret = 0;
150
Glauber Costa36b77a52011-12-16 00:51:59 +0000151 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000152 list_for_each_entry(proto, &proto_list, node) {
153 if (proto->init_cgroup) {
Glauber Costa1d62e432012-04-09 19:36:33 -0300154 ret = proto->init_cgroup(memcg, ss);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000155 if (ret)
156 goto out;
157 }
158 }
159
Glauber Costa36b77a52011-12-16 00:51:59 +0000160 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000161 return ret;
162out:
163 list_for_each_entry_continue_reverse(proto, &proto_list, node)
164 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300165 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000166 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000167 return ret;
168}
169
Glauber Costa1d62e432012-04-09 19:36:33 -0300170void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000171{
172 struct proto *proto;
173
Glauber Costa36b77a52011-12-16 00:51:59 +0000174 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000175 list_for_each_entry_reverse(proto, &proto_list, node)
176 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300177 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000178 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000179}
180#endif
181
Ingo Molnarda21f242006-07-03 00:25:12 -0700182/*
183 * Each address family might have different locking rules, so we have
184 * one slock key per address family:
185 */
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700186static struct lock_class_key af_family_keys[AF_MAX];
187static struct lock_class_key af_family_slock_keys[AF_MAX];
188
Ingo Molnarc5905af2012-02-24 08:31:31 +0100189struct static_key memcg_socket_limit_enabled;
Glauber Costae1aab162011-12-11 21:47:03 +0000190EXPORT_SYMBOL(memcg_socket_limit_enabled);
191
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700192/*
193 * Make lock validator output more readable. (we pre-construct these
194 * strings build-time, so that runtime initialization of socket
195 * locks is fast):
196 */
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700197static const char *const af_family_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700198 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
199 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
200 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
201 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
202 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
203 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
204 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800205 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700206 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800207 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700208 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700209 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800210 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
Aloisio Almeida Jrc7fe3b52011-07-01 19:31:35 -0300211 "sk_lock-AF_NFC" , "sk_lock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700212};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700213static const char *const af_family_slock_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700214 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
215 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
216 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
217 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
218 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
219 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
220 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800221 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700222 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800223 "slock-27" , "slock-28" , "slock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700224 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700225 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800226 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
Aloisio Almeida Jrc7fe3b52011-07-01 19:31:35 -0300227 "slock-AF_NFC" , "slock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700228};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700229static const char *const af_family_clock_key_strings[AF_MAX+1] = {
Peter Zijlstra443aef02007-07-19 01:49:00 -0700230 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
231 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
232 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
233 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
234 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
235 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
236 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800237 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
Peter Zijlstra443aef02007-07-19 01:49:00 -0700238 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
Oliver Hartkoppb4942af2008-07-23 14:06:04 -0700239 "clock-27" , "clock-28" , "clock-AF_CAN" ,
David Howellse51f8022007-07-21 19:30:16 -0700240 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700241 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800242 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
Aloisio Almeida Jrc7fe3b52011-07-01 19:31:35 -0300243 "clock-AF_NFC" , "clock-AF_MAX"
Peter Zijlstra443aef02007-07-19 01:49:00 -0700244};
Ingo Molnarda21f242006-07-03 00:25:12 -0700245
246/*
247 * sk_callback_lock locking rules are per-address-family,
248 * so split the lock classes by using a per-AF key:
249 */
250static struct lock_class_key af_callback_keys[AF_MAX];
251
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252/* Take into consideration the size of the struct sk_buff overhead in the
253 * determination of these values, since that is non-constant across
254 * platforms. This makes socket queueing behavior and performance
255 * not depend upon such differences.
256 */
257#define _SK_MEM_PACKETS 256
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000258#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
260#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
261
262/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700263__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200264EXPORT_SYMBOL(sysctl_wmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700265__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200266EXPORT_SYMBOL(sysctl_rmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700267__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
268__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300270/* Maximal space eaten by iovec or ancillary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700271int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Eric Dumazet2a915252009-05-27 11:30:05 +0000272EXPORT_SYMBOL(sysctl_optmem_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
Mel Gormanc93bdd02012-07-31 16:44:19 -0700274struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
275EXPORT_SYMBOL_GPL(memalloc_socks);
276
Mel Gorman7cb02402012-07-31 16:44:16 -0700277/**
278 * sk_set_memalloc - sets %SOCK_MEMALLOC
279 * @sk: socket to set it on
280 *
281 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
282 * It's the responsibility of the admin to adjust min_free_kbytes
283 * to meet the requirements
284 */
285void sk_set_memalloc(struct sock *sk)
286{
287 sock_set_flag(sk, SOCK_MEMALLOC);
288 sk->sk_allocation |= __GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700289 static_key_slow_inc(&memalloc_socks);
Mel Gorman7cb02402012-07-31 16:44:16 -0700290}
291EXPORT_SYMBOL_GPL(sk_set_memalloc);
292
293void sk_clear_memalloc(struct sock *sk)
294{
295 sock_reset_flag(sk, SOCK_MEMALLOC);
296 sk->sk_allocation &= ~__GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700297 static_key_slow_dec(&memalloc_socks);
Mel Gormanc76562b2012-07-31 16:44:41 -0700298
299 /*
300 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
301 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
302 * it has rmem allocations there is a risk that the user of the
303 * socket cannot make forward progress due to exceeding the rmem
304 * limits. By rights, sk_clear_memalloc() should only be called
305 * on sockets being torn down but warn and reset the accounting if
306 * that assumption breaks.
307 */
308 if (WARN_ON(sk->sk_forward_alloc))
309 sk_mem_reclaim(sk);
Mel Gorman7cb02402012-07-31 16:44:16 -0700310}
311EXPORT_SYMBOL_GPL(sk_clear_memalloc);
312
Mel Gormanb4b9e352012-07-31 16:44:26 -0700313int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
314{
315 int ret;
316 unsigned long pflags = current->flags;
317
318 /* these should have been dropped before queueing */
319 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
320
321 current->flags |= PF_MEMALLOC;
322 ret = sk->sk_backlog_rcv(sk, skb);
323 tsk_restore_flags(current, pflags, PF_MEMALLOC);
324
325 return ret;
326}
327EXPORT_SYMBOL(__sk_backlog_rcv);
328
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
330{
331 struct timeval tv;
332
333 if (optlen < sizeof(tv))
334 return -EINVAL;
335 if (copy_from_user(&tv, optval, sizeof(tv)))
336 return -EFAULT;
Vasily Averinba780732007-05-24 16:58:54 -0700337 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
338 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
Vasily Averinba780732007-05-24 16:58:54 -0700340 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700341 static int warned __read_mostly;
342
Vasily Averinba780732007-05-24 16:58:54 -0700343 *timeo_p = 0;
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700344 if (warned < 10 && net_ratelimit()) {
Vasily Averinba780732007-05-24 16:58:54 -0700345 warned++;
Joe Perchese005d192012-05-16 19:58:40 +0000346 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
347 __func__, current->comm, task_pid_nr(current));
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700348 }
Vasily Averinba780732007-05-24 16:58:54 -0700349 return 0;
350 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 *timeo_p = MAX_SCHEDULE_TIMEOUT;
352 if (tv.tv_sec == 0 && tv.tv_usec == 0)
353 return 0;
354 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
355 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
356 return 0;
357}
358
359static void sock_warn_obsolete_bsdism(const char *name)
360{
361 static int warned;
362 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900363 if (strcmp(warncomm, current->comm) && warned < 5) {
364 strcpy(warncomm, current->comm);
Joe Perchese005d192012-05-16 19:58:40 +0000365 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
366 warncomm, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 warned++;
368 }
369}
370
Eric Dumazet08e29af2011-11-28 12:04:18 +0000371#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
372
373static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900374{
Eric Dumazet08e29af2011-11-28 12:04:18 +0000375 if (sk->sk_flags & flags) {
376 sk->sk_flags &= ~flags;
377 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +0000378 net_disable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 }
380}
381
382
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800383int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
384{
Eric Dumazet766e90372009-10-14 20:40:11 -0700385 int err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800386 int skb_len;
Neil Horman3b885782009-10-12 13:26:31 -0700387 unsigned long flags;
388 struct sk_buff_head *list = &sk->sk_receive_queue;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800389
Eric Dumazet0fd7bac2011-12-21 07:11:44 +0000390 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700391 atomic_inc(&sk->sk_drops);
Satoru Moriya3847ce32011-06-17 12:00:03 +0000392 trace_sock_rcvqueue_full(sk, skb);
Eric Dumazet766e90372009-10-14 20:40:11 -0700393 return -ENOMEM;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800394 }
395
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700396 err = sk_filter(sk, skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800397 if (err)
Eric Dumazet766e90372009-10-14 20:40:11 -0700398 return err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800399
Mel Gormanc76562b2012-07-31 16:44:41 -0700400 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700401 atomic_inc(&sk->sk_drops);
402 return -ENOBUFS;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800403 }
404
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800405 skb->dev = NULL;
406 skb_set_owner_r(skb, sk);
David S. Miller49ad9592008-12-17 22:11:38 -0800407
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800408 /* Cache the SKB length before we tack it onto the receive
409 * queue. Once it is added it no longer belongs to us and
410 * may be freed by other threads of control pulling packets
411 * from the queue.
412 */
413 skb_len = skb->len;
414
Eric Dumazet7fee2262010-05-11 23:19:48 +0000415 /* we escape from rcu protected region, make sure we dont leak
416 * a norefcounted dst
417 */
418 skb_dst_force(skb);
419
Neil Horman3b885782009-10-12 13:26:31 -0700420 spin_lock_irqsave(&list->lock, flags);
421 skb->dropcount = atomic_read(&sk->sk_drops);
422 __skb_queue_tail(list, skb);
423 spin_unlock_irqrestore(&list->lock, flags);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800424
425 if (!sock_flag(sk, SOCK_DEAD))
426 sk->sk_data_ready(sk, skb_len);
Eric Dumazet766e90372009-10-14 20:40:11 -0700427 return 0;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800428}
429EXPORT_SYMBOL(sock_queue_rcv_skb);
430
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200431int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800432{
433 int rc = NET_RX_SUCCESS;
434
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700435 if (sk_filter(sk, skb))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800436 goto discard_and_relse;
437
438 skb->dev = NULL;
439
Eric Dumazetf545a382012-04-22 23:34:26 +0000440 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
Eric Dumazetc3774112010-04-27 15:13:20 -0700441 atomic_inc(&sk->sk_drops);
442 goto discard_and_relse;
443 }
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200444 if (nested)
445 bh_lock_sock_nested(sk);
446 else
447 bh_lock_sock(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700448 if (!sock_owned_by_user(sk)) {
449 /*
450 * trylock + unlock semantics:
451 */
452 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
453
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700454 rc = sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700455
456 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
Eric Dumazetf545a382012-04-22 23:34:26 +0000457 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
Zhu Yi8eae9392010-03-04 18:01:40 +0000458 bh_unlock_sock(sk);
459 atomic_inc(&sk->sk_drops);
460 goto discard_and_relse;
461 }
462
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800463 bh_unlock_sock(sk);
464out:
465 sock_put(sk);
466 return rc;
467discard_and_relse:
468 kfree_skb(skb);
469 goto out;
470}
471EXPORT_SYMBOL(sk_receive_skb);
472
Krishna Kumarea94ff32009-10-19 23:46:45 +0000473void sk_reset_txq(struct sock *sk)
474{
475 sk_tx_queue_clear(sk);
476}
477EXPORT_SYMBOL(sk_reset_txq);
478
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800479struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
480{
Eric Dumazetb6c67122010-04-08 23:03:29 +0000481 struct dst_entry *dst = __sk_dst_get(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800482
483 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
Krishna Kumare022f0b2009-10-19 23:46:20 +0000484 sk_tx_queue_clear(sk);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +0000485 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800486 dst_release(dst);
487 return NULL;
488 }
489
490 return dst;
491}
492EXPORT_SYMBOL(__sk_dst_check);
493
494struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
495{
496 struct dst_entry *dst = sk_dst_get(sk);
497
498 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
499 sk_dst_reset(sk);
500 dst_release(dst);
501 return NULL;
502 }
503
504 return dst;
505}
506EXPORT_SYMBOL(sk_dst_check);
507
David S. Miller48788092007-09-14 16:41:03 -0700508static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
509{
510 int ret = -ENOPROTOOPT;
511#ifdef CONFIG_NETDEVICES
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900512 struct net *net = sock_net(sk);
David S. Miller48788092007-09-14 16:41:03 -0700513 char devname[IFNAMSIZ];
514 int index;
515
516 /* Sorry... */
517 ret = -EPERM;
518 if (!capable(CAP_NET_RAW))
519 goto out;
520
521 ret = -EINVAL;
522 if (optlen < 0)
523 goto out;
524
525 /* Bind this socket to a particular device like "eth0",
526 * as specified in the passed interface name. If the
527 * name is "" or the option length is zero the socket
528 * is not bound.
529 */
530 if (optlen > IFNAMSIZ - 1)
531 optlen = IFNAMSIZ - 1;
532 memset(devname, 0, sizeof(devname));
533
534 ret = -EFAULT;
535 if (copy_from_user(devname, optval, optlen))
536 goto out;
537
David S. Miller000ba2e2009-11-05 22:37:11 -0800538 index = 0;
539 if (devname[0] != '\0') {
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800540 struct net_device *dev;
David S. Miller48788092007-09-14 16:41:03 -0700541
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800542 rcu_read_lock();
543 dev = dev_get_by_name_rcu(net, devname);
544 if (dev)
545 index = dev->ifindex;
546 rcu_read_unlock();
David S. Miller48788092007-09-14 16:41:03 -0700547 ret = -ENODEV;
548 if (!dev)
549 goto out;
David S. Miller48788092007-09-14 16:41:03 -0700550 }
551
552 lock_sock(sk);
553 sk->sk_bound_dev_if = index;
554 sk_dst_reset(sk);
555 release_sock(sk);
556
557 ret = 0;
558
559out:
560#endif
561
562 return ret;
563}
564
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800565static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
566{
567 if (valbool)
568 sock_set_flag(sk, bit);
569 else
570 sock_reset_flag(sk, bit);
571}
572
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573/*
574 * This is meant for all protocols to use and covers goings on
575 * at the socket level. Everything here is generic.
576 */
577
578int sock_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -0700579 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580{
Eric Dumazet2a915252009-05-27 11:30:05 +0000581 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 int val;
583 int valbool;
584 struct linger ling;
585 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900586
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 /*
588 * Options without arguments
589 */
590
David S. Miller48788092007-09-14 16:41:03 -0700591 if (optname == SO_BINDTODEVICE)
592 return sock_bindtodevice(sk, optval, optlen);
593
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700594 if (optlen < sizeof(int))
595 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900596
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 if (get_user(val, (int __user *)optval))
598 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900599
Eric Dumazet2a915252009-05-27 11:30:05 +0000600 valbool = val ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
602 lock_sock(sk);
603
Eric Dumazet2a915252009-05-27 11:30:05 +0000604 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700605 case SO_DEBUG:
Eric Dumazet2a915252009-05-27 11:30:05 +0000606 if (val && !capable(CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700607 ret = -EACCES;
Eric Dumazet2a915252009-05-27 11:30:05 +0000608 else
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800609 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700610 break;
611 case SO_REUSEADDR:
Pavel Emelyanov4a17fd52012-04-19 03:39:36 +0000612 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700613 break;
614 case SO_TYPE:
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000615 case SO_PROTOCOL:
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000616 case SO_DOMAIN:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700617 case SO_ERROR:
618 ret = -ENOPROTOOPT;
619 break;
620 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800621 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700622 break;
623 case SO_BROADCAST:
624 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
625 break;
626 case SO_SNDBUF:
627 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000628 * about it this is right. Otherwise apps have to
629 * play 'guess the biggest size' games. RCVBUF/SNDBUF
630 * are treated in BSD as hints
631 */
632 val = min_t(u32, val, sysctl_wmem_max);
Patrick McHardyb0573de2005-08-09 19:30:51 -0700633set_sndbuf:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700634 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
Eric Dumazet82981932012-04-26 20:07:59 +0000635 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
636 /* Wake up sending tasks if we upped the value. */
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700637 sk->sk_write_space(sk);
638 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700640 case SO_SNDBUFFORCE:
641 if (!capable(CAP_NET_ADMIN)) {
642 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 break;
644 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700645 goto set_sndbuf;
646
647 case SO_RCVBUF:
648 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000649 * about it this is right. Otherwise apps have to
650 * play 'guess the biggest size' games. RCVBUF/SNDBUF
651 * are treated in BSD as hints
652 */
653 val = min_t(u32, val, sysctl_rmem_max);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700654set_rcvbuf:
655 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
656 /*
657 * We double it on the way in to account for
658 * "struct sk_buff" etc. overhead. Applications
659 * assume that the SO_RCVBUF setting they make will
660 * allow that much actual data to be received on that
661 * socket.
662 *
663 * Applications are unaware that "struct sk_buff" and
664 * other overheads allocate from the receive buffer
665 * during socket buffer allocation.
666 *
667 * And after considering the possible alternatives,
668 * returning the value we actually used in getsockopt
669 * is the most desirable behavior.
670 */
Eric Dumazet82981932012-04-26 20:07:59 +0000671 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700672 break;
673
674 case SO_RCVBUFFORCE:
675 if (!capable(CAP_NET_ADMIN)) {
676 ret = -EPERM;
677 break;
678 }
679 goto set_rcvbuf;
680
681 case SO_KEEPALIVE:
682#ifdef CONFIG_INET
Eric Dumazet3e109862012-09-24 07:00:11 +0000683 if (sk->sk_protocol == IPPROTO_TCP &&
684 sk->sk_type == SOCK_STREAM)
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700685 tcp_set_keepalive(sk, valbool);
686#endif
687 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
688 break;
689
690 case SO_OOBINLINE:
691 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
692 break;
693
694 case SO_NO_CHECK:
695 sk->sk_no_check = valbool;
696 break;
697
698 case SO_PRIORITY:
699 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
700 sk->sk_priority = val;
701 else
702 ret = -EPERM;
703 break;
704
705 case SO_LINGER:
706 if (optlen < sizeof(ling)) {
707 ret = -EINVAL; /* 1003.1g */
708 break;
709 }
Eric Dumazet2a915252009-05-27 11:30:05 +0000710 if (copy_from_user(&ling, optval, sizeof(ling))) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700711 ret = -EFAULT;
712 break;
713 }
714 if (!ling.l_onoff)
715 sock_reset_flag(sk, SOCK_LINGER);
716 else {
717#if (BITS_PER_LONG == 32)
718 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
719 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
720 else
721#endif
722 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
723 sock_set_flag(sk, SOCK_LINGER);
724 }
725 break;
726
727 case SO_BSDCOMPAT:
728 sock_warn_obsolete_bsdism("setsockopt");
729 break;
730
731 case SO_PASSCRED:
732 if (valbool)
733 set_bit(SOCK_PASSCRED, &sock->flags);
734 else
735 clear_bit(SOCK_PASSCRED, &sock->flags);
736 break;
737
738 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700739 case SO_TIMESTAMPNS:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700740 if (valbool) {
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700741 if (optname == SO_TIMESTAMP)
742 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
743 else
744 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700745 sock_set_flag(sk, SOCK_RCVTSTAMP);
Patrick Ohly20d49472009-02-12 05:03:38 +0000746 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700747 } else {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700748 sock_reset_flag(sk, SOCK_RCVTSTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700749 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
750 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700751 break;
752
Patrick Ohly20d49472009-02-12 05:03:38 +0000753 case SO_TIMESTAMPING:
754 if (val & ~SOF_TIMESTAMPING_MASK) {
Rémi Denis-Courmontf249fb72009-07-20 00:47:04 +0000755 ret = -EINVAL;
Patrick Ohly20d49472009-02-12 05:03:38 +0000756 break;
757 }
758 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
759 val & SOF_TIMESTAMPING_TX_HARDWARE);
760 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
761 val & SOF_TIMESTAMPING_TX_SOFTWARE);
762 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
763 val & SOF_TIMESTAMPING_RX_HARDWARE);
764 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
765 sock_enable_timestamp(sk,
766 SOCK_TIMESTAMPING_RX_SOFTWARE);
767 else
768 sock_disable_timestamp(sk,
Eric Dumazet08e29af2011-11-28 12:04:18 +0000769 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
Patrick Ohly20d49472009-02-12 05:03:38 +0000770 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
771 val & SOF_TIMESTAMPING_SOFTWARE);
772 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
773 val & SOF_TIMESTAMPING_SYS_HARDWARE);
774 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
775 val & SOF_TIMESTAMPING_RAW_HARDWARE);
776 break;
777
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700778 case SO_RCVLOWAT:
779 if (val < 0)
780 val = INT_MAX;
781 sk->sk_rcvlowat = val ? : 1;
782 break;
783
784 case SO_RCVTIMEO:
785 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
786 break;
787
788 case SO_SNDTIMEO:
789 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
790 break;
791
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700792 case SO_ATTACH_FILTER:
793 ret = -EINVAL;
794 if (optlen == sizeof(struct sock_fprog)) {
795 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700797 ret = -EFAULT;
798 if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700801 ret = sk_attach_filter(&fprog, sk);
802 }
803 break;
804
805 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -0700806 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700807 break;
808
809 case SO_PASSSEC:
810 if (valbool)
811 set_bit(SOCK_PASSSEC, &sock->flags);
812 else
813 clear_bit(SOCK_PASSSEC, &sock->flags);
814 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800815 case SO_MARK:
816 if (!capable(CAP_NET_ADMIN))
817 ret = -EPERM;
Eric Dumazet2a915252009-05-27 11:30:05 +0000818 else
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800819 sk->sk_mark = val;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800820 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700821
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 /* We implement the SO_SNDLOWAT etc to
823 not be settable (1003.1g 5.3) */
Neil Horman3b885782009-10-12 13:26:31 -0700824 case SO_RXQ_OVFL:
Johannes Berg8083f0f2011-10-07 03:30:20 +0000825 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
Neil Horman3b885782009-10-12 13:26:31 -0700826 break;
Johannes Berg6e3e9392011-11-09 10:15:42 +0100827
828 case SO_WIFI_STATUS:
829 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
830 break;
831
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000832 case SO_PEEK_OFF:
833 if (sock->ops->set_peek_off)
834 sock->ops->set_peek_off(sk, val);
835 else
836 ret = -EOPNOTSUPP;
837 break;
Ben Greear3bdc0eb2012-02-11 15:39:30 +0000838
839 case SO_NOFCS:
840 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
841 break;
842
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700843 default:
844 ret = -ENOPROTOOPT;
845 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900846 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 release_sock(sk);
848 return ret;
849}
Eric Dumazet2a915252009-05-27 11:30:05 +0000850EXPORT_SYMBOL(sock_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
852
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000853void cred_to_ucred(struct pid *pid, const struct cred *cred,
854 struct ucred *ucred)
855{
856 ucred->pid = pid_vnr(pid);
857 ucred->uid = ucred->gid = -1;
858 if (cred) {
859 struct user_namespace *current_ns = current_user_ns();
860
Eric W. Biedermanb2e4f542012-05-23 16:39:45 -0600861 ucred->uid = from_kuid_munged(current_ns, cred->euid);
862 ucred->gid = from_kgid_munged(current_ns, cred->egid);
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000863 }
864}
David S. Miller39247732010-06-16 16:18:25 -0700865EXPORT_SYMBOL_GPL(cred_to_ucred);
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000866
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867int sock_getsockopt(struct socket *sock, int level, int optname,
868 char __user *optval, int __user *optlen)
869{
870 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900871
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700872 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900873 int val;
874 struct linger ling;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 struct timeval tm;
876 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900877
H Hartley Sweeten4d0392b2010-01-15 01:08:58 -0800878 int lv = sizeof(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900880
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700881 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900882 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700883 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900885
Eugene Teo50fee1d2009-02-23 15:38:41 -0800886 memset(&v, 0, sizeof(v));
Clément Lecignedf0bca02009-02-12 16:59:09 -0800887
Eric Dumazet2a915252009-05-27 11:30:05 +0000888 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700889 case SO_DEBUG:
890 v.val = sock_flag(sk, SOCK_DBG);
891 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900892
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700893 case SO_DONTROUTE:
894 v.val = sock_flag(sk, SOCK_LOCALROUTE);
895 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900896
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700897 case SO_BROADCAST:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +0000898 v.val = sock_flag(sk, SOCK_BROADCAST);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700899 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700901 case SO_SNDBUF:
902 v.val = sk->sk_sndbuf;
903 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900904
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700905 case SO_RCVBUF:
906 v.val = sk->sk_rcvbuf;
907 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700909 case SO_REUSEADDR:
910 v.val = sk->sk_reuse;
911 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700913 case SO_KEEPALIVE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +0000914 v.val = sock_flag(sk, SOCK_KEEPOPEN);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700915 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700917 case SO_TYPE:
918 v.val = sk->sk_type;
919 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000921 case SO_PROTOCOL:
922 v.val = sk->sk_protocol;
923 break;
924
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000925 case SO_DOMAIN:
926 v.val = sk->sk_family;
927 break;
928
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700929 case SO_ERROR:
930 v.val = -sock_error(sk);
Eric Dumazet2a915252009-05-27 11:30:05 +0000931 if (v.val == 0)
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700932 v.val = xchg(&sk->sk_err_soft, 0);
933 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700935 case SO_OOBINLINE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +0000936 v.val = sock_flag(sk, SOCK_URGINLINE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700937 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900938
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700939 case SO_NO_CHECK:
940 v.val = sk->sk_no_check;
941 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700943 case SO_PRIORITY:
944 v.val = sk->sk_priority;
945 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900946
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700947 case SO_LINGER:
948 lv = sizeof(v.ling);
Eric Dumazet1b23a5d2012-05-16 05:57:07 +0000949 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700950 v.ling.l_linger = sk->sk_lingertime / HZ;
951 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900952
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700953 case SO_BSDCOMPAT:
954 sock_warn_obsolete_bsdism("getsockopt");
955 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700957 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700958 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
959 !sock_flag(sk, SOCK_RCVTSTAMPNS);
960 break;
961
962 case SO_TIMESTAMPNS:
963 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700964 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965
Patrick Ohly20d49472009-02-12 05:03:38 +0000966 case SO_TIMESTAMPING:
967 v.val = 0;
968 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
969 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
970 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
971 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
972 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
973 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
974 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
975 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
976 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
977 v.val |= SOF_TIMESTAMPING_SOFTWARE;
978 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
979 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
980 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
981 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
982 break;
983
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700984 case SO_RCVTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +0000985 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700986 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
987 v.tm.tv_sec = 0;
988 v.tm.tv_usec = 0;
989 } else {
990 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
991 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700993 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700995 case SO_SNDTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +0000996 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700997 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
998 v.tm.tv_sec = 0;
999 v.tm.tv_usec = 0;
1000 } else {
1001 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1002 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1003 }
1004 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001006 case SO_RCVLOWAT:
1007 v.val = sk->sk_rcvlowat;
1008 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -07001009
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001010 case SO_SNDLOWAT:
Eric Dumazet2a915252009-05-27 11:30:05 +00001011 v.val = 1;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001012 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001014 case SO_PASSCRED:
Eric Dumazet82981932012-04-26 20:07:59 +00001015 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001016 break;
1017
1018 case SO_PEERCRED:
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001019 {
1020 struct ucred peercred;
1021 if (len > sizeof(peercred))
1022 len = sizeof(peercred);
1023 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1024 if (copy_to_user(optval, &peercred, len))
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001025 return -EFAULT;
1026 goto lenout;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001027 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001028
1029 case SO_PEERNAME:
1030 {
1031 char address[128];
1032
1033 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1034 return -ENOTCONN;
1035 if (lv < len)
1036 return -EINVAL;
1037 if (copy_to_user(optval, address, len))
1038 return -EFAULT;
1039 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001041
1042 /* Dubious BSD thing... Probably nobody even uses it, but
1043 * the UNIX standard wants it for whatever reason... -DaveM
1044 */
1045 case SO_ACCEPTCONN:
1046 v.val = sk->sk_state == TCP_LISTEN;
1047 break;
1048
1049 case SO_PASSSEC:
Eric Dumazet82981932012-04-26 20:07:59 +00001050 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001051 break;
1052
1053 case SO_PEERSEC:
1054 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1055
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001056 case SO_MARK:
1057 v.val = sk->sk_mark;
1058 break;
1059
Neil Horman3b885782009-10-12 13:26:31 -07001060 case SO_RXQ_OVFL:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001061 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
Neil Horman3b885782009-10-12 13:26:31 -07001062 break;
1063
Johannes Berg6e3e9392011-11-09 10:15:42 +01001064 case SO_WIFI_STATUS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001065 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
Johannes Berg6e3e9392011-11-09 10:15:42 +01001066 break;
1067
Pavel Emelyanovef64a542012-02-21 07:31:34 +00001068 case SO_PEEK_OFF:
1069 if (!sock->ops->set_peek_off)
1070 return -EOPNOTSUPP;
1071
1072 v.val = sk->sk_peek_off;
1073 break;
David S. Millerbc2f7992012-02-24 14:48:34 -05001074 case SO_NOFCS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001075 v.val = sock_flag(sk, SOCK_NOFCS);
David S. Millerbc2f7992012-02-24 14:48:34 -05001076 break;
Pavel Emelyanovf7b86bf2012-10-18 23:55:56 +00001077 case SO_BINDTODEVICE:
1078 v.val = sk->sk_bound_dev_if;
1079 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001080 default:
1081 return -ENOPROTOOPT;
1082 }
1083
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 if (len > lv)
1085 len = lv;
1086 if (copy_to_user(optval, &v, len))
1087 return -EFAULT;
1088lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001089 if (put_user(len, optlen))
1090 return -EFAULT;
1091 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092}
1093
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001094/*
1095 * Initialize an sk_lock.
1096 *
1097 * (We also register the sk_lock with the lock validator.)
1098 */
Dave Jonesb6f99a22007-03-22 12:27:49 -07001099static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001100{
Peter Zijlstraed075362006-12-06 20:35:24 -08001101 sock_lock_init_class_and_name(sk,
1102 af_family_slock_key_strings[sk->sk_family],
1103 af_family_slock_keys + sk->sk_family,
1104 af_family_key_strings[sk->sk_family],
1105 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001106}
1107
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001108/*
1109 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1110 * even temporarly, because of RCU lookups. sk_node should also be left as is.
Eric Dumazet68835ab2010-11-30 19:04:07 +00001111 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001112 */
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001113static void sock_copy(struct sock *nsk, const struct sock *osk)
1114{
1115#ifdef CONFIG_SECURITY_NETWORK
1116 void *sptr = nsk->sk_security;
1117#endif
Eric Dumazet68835ab2010-11-30 19:04:07 +00001118 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1119
1120 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1121 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1122
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001123#ifdef CONFIG_SECURITY_NETWORK
1124 nsk->sk_security = sptr;
1125 security_sk_clone(osk, nsk);
1126#endif
1127}
1128
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001129/*
1130 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
1131 * un-modified. Special care is taken when initializing object to zero.
1132 */
1133static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1134{
1135 if (offsetof(struct sock, sk_node.next) != 0)
1136 memset(sk, 0, offsetof(struct sock, sk_node.next));
1137 memset(&sk->sk_node.pprev, 0,
1138 size - offsetof(struct sock, sk_node.pprev));
1139}
1140
1141void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1142{
1143 unsigned long nulls1, nulls2;
1144
1145 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1146 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1147 if (nulls1 > nulls2)
1148 swap(nulls1, nulls2);
1149
1150 if (nulls1 != 0)
1151 memset((char *)sk, 0, nulls1);
1152 memset((char *)sk + nulls1 + sizeof(void *), 0,
1153 nulls2 - nulls1 - sizeof(void *));
1154 memset((char *)sk + nulls2 + sizeof(void *), 0,
1155 size - nulls2 - sizeof(void *));
1156}
1157EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1158
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001159static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1160 int family)
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001161{
1162 struct sock *sk;
1163 struct kmem_cache *slab;
1164
1165 slab = prot->slab;
Eric Dumazete912b112009-07-08 19:36:05 +00001166 if (slab != NULL) {
1167 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1168 if (!sk)
1169 return sk;
1170 if (priority & __GFP_ZERO) {
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001171 if (prot->clear_sk)
1172 prot->clear_sk(sk, prot->obj_size);
1173 else
1174 sk_prot_clear_nulls(sk, prot->obj_size);
Eric Dumazete912b112009-07-08 19:36:05 +00001175 }
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001176 } else
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001177 sk = kmalloc(prot->obj_size, priority);
1178
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001179 if (sk != NULL) {
Vegard Nossuma98b65a2009-02-26 14:46:57 +01001180 kmemcheck_annotate_bitfield(sk, flags);
1181
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001182 if (security_sk_alloc(sk, family, priority))
1183 goto out_free;
1184
1185 if (!try_module_get(prot->owner))
1186 goto out_free_sec;
Krishna Kumare022f0b2009-10-19 23:46:20 +00001187 sk_tx_queue_clear(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001188 }
1189
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001190 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001191
1192out_free_sec:
1193 security_sk_free(sk);
1194out_free:
1195 if (slab != NULL)
1196 kmem_cache_free(slab, sk);
1197 else
1198 kfree(sk);
1199 return NULL;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001200}
1201
1202static void sk_prot_free(struct proto *prot, struct sock *sk)
1203{
1204 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001205 struct module *owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001206
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001207 owner = prot->owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001208 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001209
1210 security_sk_free(sk);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001211 if (slab != NULL)
1212 kmem_cache_free(slab, sk);
1213 else
1214 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001215 module_put(owner);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001216}
1217
Herbert Xuf8451722010-05-24 00:12:34 -07001218#ifdef CONFIG_CGROUPS
Daniel Wagner8fb974c2012-09-12 16:12:02 +02001219#if IS_ENABLED(CONFIG_NET_CLS_CGROUP)
Daniel Wagnerfd9a08a2012-10-25 04:16:58 +00001220void sock_update_classid(struct sock *sk, struct task_struct *task)
Herbert Xuf8451722010-05-24 00:12:34 -07001221{
Paul E. McKenney11441822010-10-06 17:15:35 -07001222 u32 classid;
Herbert Xuf8451722010-05-24 00:12:34 -07001223
Daniel Wagnerfd9a08a2012-10-25 04:16:58 +00001224 classid = task_cls_classid(task);
Neil Horman3afa6d02012-08-20 07:59:10 +00001225 if (classid != sk->sk_classid)
Herbert Xuf8451722010-05-24 00:12:34 -07001226 sk->sk_classid = classid;
1227}
Herbert Xu82862742010-05-24 00:14:10 -07001228EXPORT_SYMBOL(sock_update_classid);
Daniel Wagner8fb974c2012-09-12 16:12:02 +02001229#endif
Neil Horman5bc14212011-11-22 05:10:51 +00001230
Daniel Wagner51e4e7f2012-09-12 16:12:03 +02001231#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
John Fastabend406a3c62012-07-20 10:39:25 +00001232void sock_update_netprioidx(struct sock *sk, struct task_struct *task)
Neil Horman5bc14212011-11-22 05:10:51 +00001233{
Neil Horman5bc14212011-11-22 05:10:51 +00001234 if (in_interrupt())
1235 return;
Neil Horman2b73bc62012-02-10 05:43:38 +00001236
John Fastabend406a3c62012-07-20 10:39:25 +00001237 sk->sk_cgrp_prioidx = task_netprioidx(task);
Neil Horman5bc14212011-11-22 05:10:51 +00001238}
1239EXPORT_SYMBOL_GPL(sock_update_netprioidx);
Herbert Xuf8451722010-05-24 00:12:34 -07001240#endif
Daniel Wagner51e4e7f2012-09-12 16:12:03 +02001241#endif
Herbert Xuf8451722010-05-24 00:12:34 -07001242
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243/**
1244 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001245 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001246 * @family: protocol family
1247 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1248 * @prot: struct proto associated with this new sock instance
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001250struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001251 struct proto *prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252{
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001253 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001255 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001257 sk->sk_family = family;
1258 /*
1259 * See comment in struct sock definition to understand
1260 * why we need sk_prot_creator -acme
1261 */
1262 sk->sk_prot = sk->sk_prot_creator = prot;
1263 sock_lock_init(sk);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001264 sock_net_set(sk, get_net(net));
Jarek Poplawskid66ee052009-08-30 23:15:36 +00001265 atomic_set(&sk->sk_wmem_alloc, 1);
Herbert Xuf8451722010-05-24 00:12:34 -07001266
Daniel Wagnerfd9a08a2012-10-25 04:16:58 +00001267 sock_update_classid(sk, current);
John Fastabend406a3c62012-07-20 10:39:25 +00001268 sock_update_netprioidx(sk, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 }
Frank Filza79af592005-09-27 15:23:38 -07001270
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001271 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272}
Eric Dumazet2a915252009-05-27 11:30:05 +00001273EXPORT_SYMBOL(sk_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
Eric Dumazet2b85a342009-06-11 02:55:43 -07001275static void __sk_free(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276{
1277 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278
1279 if (sk->sk_destruct)
1280 sk->sk_destruct(sk);
1281
Paul E. McKenneya898def2010-02-22 17:04:49 -08001282 filter = rcu_dereference_check(sk->sk_filter,
1283 atomic_read(&sk->sk_wmem_alloc) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -07001285 sk_filter_uncharge(sk, filter);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001286 RCU_INIT_POINTER(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 }
1288
Eric Dumazet08e29af2011-11-28 12:04:18 +00001289 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290
1291 if (atomic_read(&sk->sk_omem_alloc))
Joe Perchese005d192012-05-16 19:58:40 +00001292 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1293 __func__, atomic_read(&sk->sk_omem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001295 if (sk->sk_peer_cred)
1296 put_cred(sk->sk_peer_cred);
1297 put_pid(sk->sk_peer_pid);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001298 put_net(sock_net(sk));
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001299 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300}
Eric Dumazet2b85a342009-06-11 02:55:43 -07001301
1302void sk_free(struct sock *sk)
1303{
1304 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001305 * We subtract one from sk_wmem_alloc and can know if
Eric Dumazet2b85a342009-06-11 02:55:43 -07001306 * some packets are still in some tx queue.
1307 * If not null, sock_wfree() will call __sk_free(sk) later
1308 */
1309 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1310 __sk_free(sk);
1311}
Eric Dumazet2a915252009-05-27 11:30:05 +00001312EXPORT_SYMBOL(sk_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
Denis V. Lunevedf02082008-02-29 11:18:32 -08001314/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001315 * Last sock_put should drop reference to sk->sk_net. It has already
1316 * been dropped in sk_change_net. Taking reference to stopping namespace
Denis V. Lunevedf02082008-02-29 11:18:32 -08001317 * is not an option.
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001318 * Take reference to a socket to remove it from hash _alive_ and after that
Denis V. Lunevedf02082008-02-29 11:18:32 -08001319 * destroy it in the context of init_net.
1320 */
1321void sk_release_kernel(struct sock *sk)
1322{
1323 if (sk == NULL || sk->sk_socket == NULL)
1324 return;
1325
1326 sock_hold(sk);
1327 sock_release(sk->sk_socket);
Denis V. Lunev65a18ec2008-04-16 01:59:46 -07001328 release_net(sock_net(sk));
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001329 sock_net_set(sk, get_net(&init_net));
Denis V. Lunevedf02082008-02-29 11:18:32 -08001330 sock_put(sk);
1331}
David S. Miller45af1752008-02-29 11:33:19 -08001332EXPORT_SYMBOL(sk_release_kernel);
Denis V. Lunevedf02082008-02-29 11:18:32 -08001333
Stephen Rothwell475f1b52012-01-09 16:33:16 +11001334static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1335{
1336 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1337 sock_update_memcg(newsk);
1338}
1339
Eric Dumazete56c57d2011-11-08 17:07:07 -05001340/**
1341 * sk_clone_lock - clone a socket, and lock its clone
1342 * @sk: the socket to clone
1343 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1344 *
1345 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1346 */
1347struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001348{
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001349 struct sock *newsk;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001350
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001351 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001352 if (newsk != NULL) {
1353 struct sk_filter *filter;
1354
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001355 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001356
1357 /* SANITY */
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001358 get_net(sock_net(newsk));
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001359 sk_node_init(&newsk->sk_node);
1360 sock_lock_init(newsk);
1361 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001362 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Zhu Yi8eae9392010-03-04 18:01:40 +00001363 newsk->sk_backlog.len = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001364
1365 atomic_set(&newsk->sk_rmem_alloc, 0);
Eric Dumazet2b85a342009-06-11 02:55:43 -07001366 /*
1367 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1368 */
1369 atomic_set(&newsk->sk_wmem_alloc, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001370 atomic_set(&newsk->sk_omem_alloc, 0);
1371 skb_queue_head_init(&newsk->sk_receive_queue);
1372 skb_queue_head_init(&newsk->sk_write_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07001373#ifdef CONFIG_NET_DMA
1374 skb_queue_head_init(&newsk->sk_async_wait_queue);
1375#endif
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001376
Eric Dumazetb6c67122010-04-08 23:03:29 +00001377 spin_lock_init(&newsk->sk_dst_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001378 rwlock_init(&newsk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001379 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1380 af_callback_keys + newsk->sk_family,
1381 af_family_clock_key_strings[newsk->sk_family]);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001382
1383 newsk->sk_dst_cache = NULL;
1384 newsk->sk_wmem_queued = 0;
1385 newsk->sk_forward_alloc = 0;
1386 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001387 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1388
1389 sock_reset_flag(newsk, SOCK_DONE);
1390 skb_queue_head_init(&newsk->sk_error_queue);
1391
Eric Dumazet0d7da9d2010-10-25 03:47:05 +00001392 filter = rcu_dereference_protected(newsk->sk_filter, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001393 if (filter != NULL)
1394 sk_filter_charge(newsk, filter);
1395
1396 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1397 /* It is still raw copy of parent, so invalidate
1398 * destructor and make plain sk_free() */
1399 newsk->sk_destruct = NULL;
Thomas Gleixnerb0691c82011-10-25 02:30:50 +00001400 bh_unlock_sock(newsk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001401 sk_free(newsk);
1402 newsk = NULL;
1403 goto out;
1404 }
1405
1406 newsk->sk_err = 0;
1407 newsk->sk_priority = 0;
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001408 /*
1409 * Before updating sk_refcnt, we must commit prior changes to memory
1410 * (Documentation/RCU/rculist_nulls.txt for details)
1411 */
1412 smp_wmb();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001413 atomic_set(&newsk->sk_refcnt, 2);
1414
1415 /*
1416 * Increment the counter in the same struct proto as the master
1417 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1418 * is the same as sk->sk_prot->socks, as this field was copied
1419 * with memcpy).
1420 *
1421 * This _changes_ the previous behaviour, where
1422 * tcp_create_openreq_child always was incrementing the
1423 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1424 * to be taken into account in all callers. -acme
1425 */
1426 sk_refcnt_debug_inc(newsk);
David S. Miller972692e2008-06-17 22:41:38 -07001427 sk_set_socket(newsk, NULL);
Eric Dumazet43815482010-04-29 11:01:49 +00001428 newsk->sk_wq = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001429
Glauber Costaf3f511e2012-01-05 20:16:39 +00001430 sk_update_clone(sk, newsk);
1431
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001432 if (newsk->sk_prot->sockets_allocated)
Glauber Costa180d8cd2011-12-11 21:47:02 +00001433 sk_sockets_allocated_inc(newsk);
Octavian Purdila704da5602010-01-08 00:00:09 -08001434
Eric Dumazet08e29af2011-11-28 12:04:18 +00001435 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
Octavian Purdila704da5602010-01-08 00:00:09 -08001436 net_enable_timestamp();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001437 }
1438out:
1439 return newsk;
1440}
Eric Dumazete56c57d2011-11-08 17:07:07 -05001441EXPORT_SYMBOL_GPL(sk_clone_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001442
Andi Kleen99580892007-04-20 17:12:43 -07001443void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1444{
1445 __sk_dst_set(sk, dst);
1446 sk->sk_route_caps = dst->dev->features;
1447 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07001448 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Eric Dumazeta4654192010-05-16 00:36:33 -07001449 sk->sk_route_caps &= ~sk->sk_route_nocaps;
Andi Kleen99580892007-04-20 17:12:43 -07001450 if (sk_can_gso(sk)) {
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001451 if (dst->header_len) {
Andi Kleen99580892007-04-20 17:12:43 -07001452 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001453 } else {
Andi Kleen99580892007-04-20 17:12:43 -07001454 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001455 sk->sk_gso_max_size = dst->dev->gso_max_size;
Ben Hutchings14853482012-07-30 16:11:42 +00001456 sk->sk_gso_max_segs = dst->dev->gso_max_segs;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001457 }
Andi Kleen99580892007-04-20 17:12:43 -07001458 }
1459}
1460EXPORT_SYMBOL_GPL(sk_setup_caps);
1461
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462/*
1463 * Simple resource managers for sockets.
1464 */
1465
1466
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001467/*
1468 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 */
1470void sock_wfree(struct sk_buff *skb)
1471{
1472 struct sock *sk = skb->sk;
Eric Dumazetd99927f2009-09-24 10:49:24 +00001473 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474
Eric Dumazetd99927f2009-09-24 10:49:24 +00001475 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1476 /*
1477 * Keep a reference on sk_wmem_alloc, this will be released
1478 * after sk_write_space() call
1479 */
1480 atomic_sub(len - 1, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 sk->sk_write_space(sk);
Eric Dumazetd99927f2009-09-24 10:49:24 +00001482 len = 1;
1483 }
Eric Dumazet2b85a342009-06-11 02:55:43 -07001484 /*
Eric Dumazetd99927f2009-09-24 10:49:24 +00001485 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1486 * could not do because of in-flight packets
Eric Dumazet2b85a342009-06-11 02:55:43 -07001487 */
Eric Dumazetd99927f2009-09-24 10:49:24 +00001488 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
Eric Dumazet2b85a342009-06-11 02:55:43 -07001489 __sk_free(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490}
Eric Dumazet2a915252009-05-27 11:30:05 +00001491EXPORT_SYMBOL(sock_wfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001493/*
1494 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 */
1496void sock_rfree(struct sk_buff *skb)
1497{
1498 struct sock *sk = skb->sk;
Eric Dumazetd361fd52010-07-10 22:45:17 +00001499 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500
Eric Dumazetd361fd52010-07-10 22:45:17 +00001501 atomic_sub(len, &sk->sk_rmem_alloc);
1502 sk_mem_uncharge(sk, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503}
Eric Dumazet2a915252009-05-27 11:30:05 +00001504EXPORT_SYMBOL(sock_rfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505
David S. Miller41063e92012-06-19 21:22:05 -07001506void sock_edemux(struct sk_buff *skb)
1507{
Eric Dumazete8123472012-09-02 23:57:18 +00001508 struct sock *sk = skb->sk;
1509
Randy Dunlap1c463e52012-09-10 09:13:07 -07001510#ifdef CONFIG_INET
Eric Dumazete8123472012-09-02 23:57:18 +00001511 if (sk->sk_state == TCP_TIME_WAIT)
1512 inet_twsk_put(inet_twsk(sk));
1513 else
Randy Dunlap1c463e52012-09-10 09:13:07 -07001514#endif
Eric Dumazete8123472012-09-02 23:57:18 +00001515 sock_put(sk);
David S. Miller41063e92012-06-19 21:22:05 -07001516}
1517EXPORT_SYMBOL(sock_edemux);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518
Eric W. Biederman976d02012012-05-23 17:16:53 -06001519kuid_t sock_i_uid(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520{
Eric W. Biederman976d02012012-05-23 17:16:53 -06001521 kuid_t uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522
Eric Dumazetf064af12010-09-22 12:43:39 +00001523 read_lock_bh(&sk->sk_callback_lock);
Eric W. Biederman976d02012012-05-23 17:16:53 -06001524 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
Eric Dumazetf064af12010-09-22 12:43:39 +00001525 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 return uid;
1527}
Eric Dumazet2a915252009-05-27 11:30:05 +00001528EXPORT_SYMBOL(sock_i_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529
1530unsigned long sock_i_ino(struct sock *sk)
1531{
1532 unsigned long ino;
1533
Eric Dumazetf064af12010-09-22 12:43:39 +00001534 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001536 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 return ino;
1538}
Eric Dumazet2a915252009-05-27 11:30:05 +00001539EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
1541/*
1542 * Allocate a skb from the socket's send buffer.
1543 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001544struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001545 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546{
1547 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Eric Dumazet2a915252009-05-27 11:30:05 +00001548 struct sk_buff *skb = alloc_skb(size, priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 if (skb) {
1550 skb_set_owner_w(skb, sk);
1551 return skb;
1552 }
1553 }
1554 return NULL;
1555}
Eric Dumazet2a915252009-05-27 11:30:05 +00001556EXPORT_SYMBOL(sock_wmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557
1558/*
1559 * Allocate a skb from the socket's receive buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001560 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001561struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001562 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563{
1564 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1565 struct sk_buff *skb = alloc_skb(size, priority);
1566 if (skb) {
1567 skb_set_owner_r(skb, sk);
1568 return skb;
1569 }
1570 }
1571 return NULL;
1572}
1573
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001574/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001576 */
Al Virodd0fc662005-10-07 07:46:04 +01001577void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578{
Eric Dumazet95c96172012-04-15 05:58:06 +00001579 if ((unsigned int)size <= sysctl_optmem_max &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1581 void *mem;
1582 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001583 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 */
1585 atomic_add(size, &sk->sk_omem_alloc);
1586 mem = kmalloc(size, priority);
1587 if (mem)
1588 return mem;
1589 atomic_sub(size, &sk->sk_omem_alloc);
1590 }
1591 return NULL;
1592}
Eric Dumazet2a915252009-05-27 11:30:05 +00001593EXPORT_SYMBOL(sock_kmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594
1595/*
1596 * Free an option memory block.
1597 */
1598void sock_kfree_s(struct sock *sk, void *mem, int size)
1599{
1600 kfree(mem);
1601 atomic_sub(size, &sk->sk_omem_alloc);
1602}
Eric Dumazet2a915252009-05-27 11:30:05 +00001603EXPORT_SYMBOL(sock_kfree_s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604
1605/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1606 I think, these locks should be removed for datagram sockets.
1607 */
Eric Dumazet2a915252009-05-27 11:30:05 +00001608static long sock_wait_for_wmem(struct sock *sk, long timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609{
1610 DEFINE_WAIT(wait);
1611
1612 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1613 for (;;) {
1614 if (!timeo)
1615 break;
1616 if (signal_pending(current))
1617 break;
1618 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001619 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1621 break;
1622 if (sk->sk_shutdown & SEND_SHUTDOWN)
1623 break;
1624 if (sk->sk_err)
1625 break;
1626 timeo = schedule_timeout(timeo);
1627 }
Eric Dumazetaa395142010-04-20 13:03:51 +00001628 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 return timeo;
1630}
1631
1632
1633/*
1634 * Generic send/receive buffer handlers
1635 */
1636
Herbert Xu4cc7f682009-02-04 16:55:54 -08001637struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1638 unsigned long data_len, int noblock,
1639 int *errcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640{
1641 struct sk_buff *skb;
Al Viro7d877f32005-10-21 03:20:43 -04001642 gfp_t gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 long timeo;
1644 int err;
Jason Wangcc9b17a2012-05-30 21:18:10 +00001645 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1646
1647 err = -EMSGSIZE;
1648 if (npages > MAX_SKB_FRAGS)
1649 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650
1651 gfp_mask = sk->sk_allocation;
1652 if (gfp_mask & __GFP_WAIT)
1653 gfp_mask |= __GFP_REPEAT;
1654
1655 timeo = sock_sndtimeo(sk, noblock);
1656 while (1) {
1657 err = sock_error(sk);
1658 if (err != 0)
1659 goto failure;
1660
1661 err = -EPIPE;
1662 if (sk->sk_shutdown & SEND_SHUTDOWN)
1663 goto failure;
1664
1665 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Larry Woodmandb38c1792006-11-03 16:05:45 -08001666 skb = alloc_skb(header_len, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 if (skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 int i;
1669
1670 /* No pages, we're done... */
1671 if (!data_len)
1672 break;
1673
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 skb->truesize += data_len;
1675 skb_shinfo(skb)->nr_frags = npages;
1676 for (i = 0; i < npages; i++) {
1677 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678
1679 page = alloc_pages(sk->sk_allocation, 0);
1680 if (!page) {
1681 err = -ENOBUFS;
1682 skb_shinfo(skb)->nr_frags = i;
1683 kfree_skb(skb);
1684 goto failure;
1685 }
1686
Ian Campbellea2ab692011-08-22 23:44:58 +00001687 __skb_fill_page_desc(skb, i,
1688 page, 0,
1689 (data_len >= PAGE_SIZE ?
1690 PAGE_SIZE :
1691 data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 data_len -= PAGE_SIZE;
1693 }
1694
1695 /* Full success... */
1696 break;
1697 }
1698 err = -ENOBUFS;
1699 goto failure;
1700 }
1701 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1702 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1703 err = -EAGAIN;
1704 if (!timeo)
1705 goto failure;
1706 if (signal_pending(current))
1707 goto interrupted;
1708 timeo = sock_wait_for_wmem(sk, timeo);
1709 }
1710
1711 skb_set_owner_w(skb, sk);
1712 return skb;
1713
1714interrupted:
1715 err = sock_intr_errno(timeo);
1716failure:
1717 *errcode = err;
1718 return NULL;
1719}
Herbert Xu4cc7f682009-02-04 16:55:54 -08001720EXPORT_SYMBOL(sock_alloc_send_pskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001722struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 int noblock, int *errcode)
1724{
1725 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1726}
Eric Dumazet2a915252009-05-27 11:30:05 +00001727EXPORT_SYMBOL(sock_alloc_send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728
Eric Dumazet5640f762012-09-23 23:04:42 +00001729/* On 32bit arches, an skb frag is limited to 2^15 */
1730#define SKB_FRAG_PAGE_ORDER get_order(32768)
1731
1732bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1733{
1734 int order;
1735
1736 if (pfrag->page) {
1737 if (atomic_read(&pfrag->page->_count) == 1) {
1738 pfrag->offset = 0;
1739 return true;
1740 }
1741 if (pfrag->offset < pfrag->size)
1742 return true;
1743 put_page(pfrag->page);
1744 }
1745
1746 /* We restrict high order allocations to users that can afford to wait */
1747 order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
1748
1749 do {
1750 gfp_t gfp = sk->sk_allocation;
1751
1752 if (order)
1753 gfp |= __GFP_COMP | __GFP_NOWARN;
1754 pfrag->page = alloc_pages(gfp, order);
1755 if (likely(pfrag->page)) {
1756 pfrag->offset = 0;
1757 pfrag->size = PAGE_SIZE << order;
1758 return true;
1759 }
1760 } while (--order >= 0);
1761
1762 sk_enter_memory_pressure(sk);
1763 sk_stream_moderate_sndbuf(sk);
1764 return false;
1765}
1766EXPORT_SYMBOL(sk_page_frag_refill);
1767
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768static void __lock_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001769 __releases(&sk->sk_lock.slock)
1770 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771{
1772 DEFINE_WAIT(wait);
1773
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001774 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1776 TASK_UNINTERRUPTIBLE);
1777 spin_unlock_bh(&sk->sk_lock.slock);
1778 schedule();
1779 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001780 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 break;
1782 }
1783 finish_wait(&sk->sk_lock.wq, &wait);
1784}
1785
1786static void __release_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001787 __releases(&sk->sk_lock.slock)
1788 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789{
1790 struct sk_buff *skb = sk->sk_backlog.head;
1791
1792 do {
1793 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1794 bh_unlock_sock(sk);
1795
1796 do {
1797 struct sk_buff *next = skb->next;
1798
Eric Dumazete4cbb022012-04-30 16:07:09 +00001799 prefetch(next);
Eric Dumazet7fee2262010-05-11 23:19:48 +00001800 WARN_ON_ONCE(skb_dst_is_noref(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 skb->next = NULL;
Peter Zijlstrac57943a2008-10-07 14:18:42 -07001802 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803
1804 /*
1805 * We are in process context here with softirqs
1806 * disabled, use cond_resched_softirq() to preempt.
1807 * This is safe to do because we've taken the backlog
1808 * queue private:
1809 */
1810 cond_resched_softirq();
1811
1812 skb = next;
1813 } while (skb != NULL);
1814
1815 bh_lock_sock(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001816 } while ((skb = sk->sk_backlog.head) != NULL);
Zhu Yi8eae9392010-03-04 18:01:40 +00001817
1818 /*
1819 * Doing the zeroing here guarantee we can not loop forever
1820 * while a wild producer attempts to flood us.
1821 */
1822 sk->sk_backlog.len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823}
1824
1825/**
1826 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001827 * @sk: sock to wait on
1828 * @timeo: for how long
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 *
1830 * Now socket state including sk->sk_err is changed only under lock,
1831 * hence we may omit checks after joining wait queue.
1832 * We check receive queue before schedule() only as optimization;
1833 * it is very likely that release_sock() added new data.
1834 */
1835int sk_wait_data(struct sock *sk, long *timeo)
1836{
1837 int rc;
1838 DEFINE_WAIT(wait);
1839
Eric Dumazetaa395142010-04-20 13:03:51 +00001840 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1842 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1843 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001844 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 return rc;
1846}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847EXPORT_SYMBOL(sk_wait_data);
1848
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001849/**
1850 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1851 * @sk: socket
1852 * @size: memory size to allocate
1853 * @kind: allocation type
1854 *
1855 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1856 * rmem allocation. This function assumes that protocols which have
1857 * memory_pressure use sk_wmem_queued as write buffer accounting.
1858 */
1859int __sk_mem_schedule(struct sock *sk, int size, int kind)
1860{
1861 struct proto *prot = sk->sk_prot;
1862 int amt = sk_mem_pages(size);
Eric Dumazet8d987e52010-11-09 23:24:26 +00001863 long allocated;
Glauber Costae1aab162011-12-11 21:47:03 +00001864 int parent_status = UNDER_LIMIT;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001865
1866 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001867
Glauber Costae1aab162011-12-11 21:47:03 +00001868 allocated = sk_memory_allocated_add(sk, amt, &parent_status);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001869
1870 /* Under limit. */
Glauber Costae1aab162011-12-11 21:47:03 +00001871 if (parent_status == UNDER_LIMIT &&
1872 allocated <= sk_prot_mem_limits(sk, 0)) {
Glauber Costa180d8cd2011-12-11 21:47:02 +00001873 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001874 return 1;
1875 }
1876
Glauber Costae1aab162011-12-11 21:47:03 +00001877 /* Under pressure. (we or our parents) */
1878 if ((parent_status > SOFT_LIMIT) ||
1879 allocated > sk_prot_mem_limits(sk, 1))
Glauber Costa180d8cd2011-12-11 21:47:02 +00001880 sk_enter_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001881
Glauber Costae1aab162011-12-11 21:47:03 +00001882 /* Over hard limit (we or our parents) */
1883 if ((parent_status == OVER_LIMIT) ||
1884 (allocated > sk_prot_mem_limits(sk, 2)))
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001885 goto suppress_allocation;
1886
1887 /* guarantee minimum buffer size under pressure */
1888 if (kind == SK_MEM_RECV) {
1889 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1890 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001891
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001892 } else { /* SK_MEM_SEND */
1893 if (sk->sk_type == SOCK_STREAM) {
1894 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1895 return 1;
1896 } else if (atomic_read(&sk->sk_wmem_alloc) <
1897 prot->sysctl_wmem[0])
1898 return 1;
1899 }
1900
Glauber Costa180d8cd2011-12-11 21:47:02 +00001901 if (sk_has_memory_pressure(sk)) {
Eric Dumazet17483762008-11-25 21:16:35 -08001902 int alloc;
1903
Glauber Costa180d8cd2011-12-11 21:47:02 +00001904 if (!sk_under_memory_pressure(sk))
Eric Dumazet17483762008-11-25 21:16:35 -08001905 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001906 alloc = sk_sockets_allocated_read_positive(sk);
1907 if (sk_prot_mem_limits(sk, 2) > alloc *
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001908 sk_mem_pages(sk->sk_wmem_queued +
1909 atomic_read(&sk->sk_rmem_alloc) +
1910 sk->sk_forward_alloc))
1911 return 1;
1912 }
1913
1914suppress_allocation:
1915
1916 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1917 sk_stream_moderate_sndbuf(sk);
1918
1919 /* Fail only if socket is _under_ its sndbuf.
1920 * In this case we cannot block, so that we have to fail.
1921 */
1922 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1923 return 1;
1924 }
1925
Satoru Moriya3847ce32011-06-17 12:00:03 +00001926 trace_sock_exceed_buf_limit(sk, prot, allocated);
1927
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001928 /* Alas. Undo changes. */
1929 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001930
Glauber Costa0e90b312012-01-20 04:57:16 +00001931 sk_memory_allocated_sub(sk, amt);
Glauber Costa180d8cd2011-12-11 21:47:02 +00001932
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001933 return 0;
1934}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001935EXPORT_SYMBOL(__sk_mem_schedule);
1936
1937/**
1938 * __sk_reclaim - reclaim memory_allocated
1939 * @sk: socket
1940 */
1941void __sk_mem_reclaim(struct sock *sk)
1942{
Glauber Costa180d8cd2011-12-11 21:47:02 +00001943 sk_memory_allocated_sub(sk,
Glauber Costa0e90b312012-01-20 04:57:16 +00001944 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001945 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1946
Glauber Costa180d8cd2011-12-11 21:47:02 +00001947 if (sk_under_memory_pressure(sk) &&
1948 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
1949 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001950}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001951EXPORT_SYMBOL(__sk_mem_reclaim);
1952
1953
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954/*
1955 * Set of default routines for initialising struct proto_ops when
1956 * the protocol does not support a particular function. In certain
1957 * cases where it makes no sense for a protocol to have a "do nothing"
1958 * function, some default processing is provided.
1959 */
1960
1961int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1962{
1963 return -EOPNOTSUPP;
1964}
Eric Dumazet2a915252009-05-27 11:30:05 +00001965EXPORT_SYMBOL(sock_no_bind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001967int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 int len, int flags)
1969{
1970 return -EOPNOTSUPP;
1971}
Eric Dumazet2a915252009-05-27 11:30:05 +00001972EXPORT_SYMBOL(sock_no_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973
1974int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1975{
1976 return -EOPNOTSUPP;
1977}
Eric Dumazet2a915252009-05-27 11:30:05 +00001978EXPORT_SYMBOL(sock_no_socketpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979
1980int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1981{
1982 return -EOPNOTSUPP;
1983}
Eric Dumazet2a915252009-05-27 11:30:05 +00001984EXPORT_SYMBOL(sock_no_accept);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001986int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 int *len, int peer)
1988{
1989 return -EOPNOTSUPP;
1990}
Eric Dumazet2a915252009-05-27 11:30:05 +00001991EXPORT_SYMBOL(sock_no_getname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992
Eric Dumazet2a915252009-05-27 11:30:05 +00001993unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994{
1995 return 0;
1996}
Eric Dumazet2a915252009-05-27 11:30:05 +00001997EXPORT_SYMBOL(sock_no_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998
1999int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2000{
2001 return -EOPNOTSUPP;
2002}
Eric Dumazet2a915252009-05-27 11:30:05 +00002003EXPORT_SYMBOL(sock_no_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004
2005int sock_no_listen(struct socket *sock, int backlog)
2006{
2007 return -EOPNOTSUPP;
2008}
Eric Dumazet2a915252009-05-27 11:30:05 +00002009EXPORT_SYMBOL(sock_no_listen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010
2011int sock_no_shutdown(struct socket *sock, int how)
2012{
2013 return -EOPNOTSUPP;
2014}
Eric Dumazet2a915252009-05-27 11:30:05 +00002015EXPORT_SYMBOL(sock_no_shutdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016
2017int sock_no_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002018 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019{
2020 return -EOPNOTSUPP;
2021}
Eric Dumazet2a915252009-05-27 11:30:05 +00002022EXPORT_SYMBOL(sock_no_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023
2024int sock_no_getsockopt(struct socket *sock, int level, int optname,
2025 char __user *optval, int __user *optlen)
2026{
2027 return -EOPNOTSUPP;
2028}
Eric Dumazet2a915252009-05-27 11:30:05 +00002029EXPORT_SYMBOL(sock_no_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030
2031int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2032 size_t len)
2033{
2034 return -EOPNOTSUPP;
2035}
Eric Dumazet2a915252009-05-27 11:30:05 +00002036EXPORT_SYMBOL(sock_no_sendmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037
2038int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2039 size_t len, int flags)
2040{
2041 return -EOPNOTSUPP;
2042}
Eric Dumazet2a915252009-05-27 11:30:05 +00002043EXPORT_SYMBOL(sock_no_recvmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044
2045int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2046{
2047 /* Mirror missing mmap method error code */
2048 return -ENODEV;
2049}
Eric Dumazet2a915252009-05-27 11:30:05 +00002050EXPORT_SYMBOL(sock_no_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051
2052ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2053{
2054 ssize_t res;
2055 struct msghdr msg = {.msg_flags = flags};
2056 struct kvec iov;
2057 char *kaddr = kmap(page);
2058 iov.iov_base = kaddr + offset;
2059 iov.iov_len = size;
2060 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2061 kunmap(page);
2062 return res;
2063}
Eric Dumazet2a915252009-05-27 11:30:05 +00002064EXPORT_SYMBOL(sock_no_sendpage);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065
2066/*
2067 * Default Socket Callbacks
2068 */
2069
2070static void sock_def_wakeup(struct sock *sk)
2071{
Eric Dumazet43815482010-04-29 11:01:49 +00002072 struct socket_wq *wq;
2073
2074 rcu_read_lock();
2075 wq = rcu_dereference(sk->sk_wq);
2076 if (wq_has_sleeper(wq))
2077 wake_up_interruptible_all(&wq->wait);
2078 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079}
2080
2081static void sock_def_error_report(struct sock *sk)
2082{
Eric Dumazet43815482010-04-29 11:01:49 +00002083 struct socket_wq *wq;
2084
2085 rcu_read_lock();
2086 wq = rcu_dereference(sk->sk_wq);
2087 if (wq_has_sleeper(wq))
2088 wake_up_interruptible_poll(&wq->wait, POLLERR);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002089 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Eric Dumazet43815482010-04-29 11:01:49 +00002090 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091}
2092
2093static void sock_def_readable(struct sock *sk, int len)
2094{
Eric Dumazet43815482010-04-29 11:01:49 +00002095 struct socket_wq *wq;
2096
2097 rcu_read_lock();
2098 wq = rcu_dereference(sk->sk_wq);
2099 if (wq_has_sleeper(wq))
Eric Dumazet2c6607c2011-01-06 10:54:29 -08002100 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
Davide Libenzi37e55402009-03-31 15:24:21 -07002101 POLLRDNORM | POLLRDBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002102 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Eric Dumazet43815482010-04-29 11:01:49 +00002103 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104}
2105
2106static void sock_def_write_space(struct sock *sk)
2107{
Eric Dumazet43815482010-04-29 11:01:49 +00002108 struct socket_wq *wq;
2109
2110 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111
2112 /* Do not wake up a writer until he can make "significant"
2113 * progress. --DaveM
2114 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002115 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
Eric Dumazet43815482010-04-29 11:01:49 +00002116 wq = rcu_dereference(sk->sk_wq);
2117 if (wq_has_sleeper(wq))
2118 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
Davide Libenzi37e55402009-03-31 15:24:21 -07002119 POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120
2121 /* Should agree with poll, otherwise some programs break */
2122 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002123 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 }
2125
Eric Dumazet43815482010-04-29 11:01:49 +00002126 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127}
2128
2129static void sock_def_destruct(struct sock *sk)
2130{
Jesper Juhla51482b2005-11-08 09:41:34 -08002131 kfree(sk->sk_protinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132}
2133
2134void sk_send_sigurg(struct sock *sk)
2135{
2136 if (sk->sk_socket && sk->sk_socket->file)
2137 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002138 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139}
Eric Dumazet2a915252009-05-27 11:30:05 +00002140EXPORT_SYMBOL(sk_send_sigurg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141
2142void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2143 unsigned long expires)
2144{
2145 if (!mod_timer(timer, expires))
2146 sock_hold(sk);
2147}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148EXPORT_SYMBOL(sk_reset_timer);
2149
2150void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2151{
2152 if (timer_pending(timer) && del_timer(timer))
2153 __sock_put(sk);
2154}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155EXPORT_SYMBOL(sk_stop_timer);
2156
2157void sock_init_data(struct socket *sock, struct sock *sk)
2158{
2159 skb_queue_head_init(&sk->sk_receive_queue);
2160 skb_queue_head_init(&sk->sk_write_queue);
2161 skb_queue_head_init(&sk->sk_error_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07002162#ifdef CONFIG_NET_DMA
2163 skb_queue_head_init(&sk->sk_async_wait_queue);
2164#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165
2166 sk->sk_send_head = NULL;
2167
2168 init_timer(&sk->sk_timer);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002169
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 sk->sk_allocation = GFP_KERNEL;
2171 sk->sk_rcvbuf = sysctl_rmem_default;
2172 sk->sk_sndbuf = sysctl_wmem_default;
2173 sk->sk_state = TCP_CLOSE;
David S. Miller972692e2008-06-17 22:41:38 -07002174 sk_set_socket(sk, sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175
2176 sock_set_flag(sk, SOCK_ZAPPED);
2177
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002178 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 sk->sk_type = sock->type;
Eric Dumazet43815482010-04-29 11:01:49 +00002180 sk->sk_wq = sock->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 sock->sk = sk;
2182 } else
Eric Dumazet43815482010-04-29 11:01:49 +00002183 sk->sk_wq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184
Eric Dumazetb6c67122010-04-08 23:03:29 +00002185 spin_lock_init(&sk->sk_dst_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 rwlock_init(&sk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07002187 lockdep_set_class_and_name(&sk->sk_callback_lock,
2188 af_callback_keys + sk->sk_family,
2189 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190
2191 sk->sk_state_change = sock_def_wakeup;
2192 sk->sk_data_ready = sock_def_readable;
2193 sk->sk_write_space = sock_def_write_space;
2194 sk->sk_error_report = sock_def_error_report;
2195 sk->sk_destruct = sock_def_destruct;
2196
Eric Dumazet5640f762012-09-23 23:04:42 +00002197 sk->sk_frag.page = NULL;
2198 sk->sk_frag.offset = 0;
Pavel Emelyanovef64a542012-02-21 07:31:34 +00002199 sk->sk_peek_off = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200
Eric W. Biederman109f6e32010-06-13 03:30:14 +00002201 sk->sk_peer_pid = NULL;
2202 sk->sk_peer_cred = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 sk->sk_write_pending = 0;
2204 sk->sk_rcvlowat = 1;
2205 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2206 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2207
Eric Dumazetf37f0af2008-04-13 21:39:26 -07002208 sk->sk_stamp = ktime_set(-1L, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00002210 /*
2211 * Before updating sk_refcnt, we must commit prior changes to memory
2212 * (Documentation/RCU/rculist_nulls.txt for details)
2213 */
2214 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 atomic_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08002216 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217}
Eric Dumazet2a915252009-05-27 11:30:05 +00002218EXPORT_SYMBOL(sock_init_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002220void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221{
2222 might_sleep();
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002223 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02002224 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002226 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002227 spin_unlock(&sk->sk_lock.slock);
2228 /*
2229 * The sk_lock has mutex_lock() semantics here:
2230 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002231 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002232 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233}
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002234EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002236void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237{
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002238 /*
2239 * The sk_lock has mutex_unlock() semantics:
2240 */
2241 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2242
2243 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 if (sk->sk_backlog.tail)
2245 __release_sock(sk);
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002246
2247 if (sk->sk_prot->release_cb)
2248 sk->sk_prot->release_cb(sk);
2249
John Heffnerd2e91172007-09-12 10:44:19 +02002250 sk->sk_lock.owned = 0;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002251 if (waitqueue_active(&sk->sk_lock.wq))
2252 wake_up(&sk->sk_lock.wq);
2253 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254}
2255EXPORT_SYMBOL(release_sock);
2256
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002257/**
2258 * lock_sock_fast - fast version of lock_sock
2259 * @sk: socket
2260 *
2261 * This version should be used for very small section, where process wont block
2262 * return false if fast path is taken
2263 * sk_lock.slock locked, owned = 0, BH disabled
2264 * return true if slow path is taken
2265 * sk_lock.slock unlocked, owned = 1, BH enabled
2266 */
2267bool lock_sock_fast(struct sock *sk)
2268{
2269 might_sleep();
2270 spin_lock_bh(&sk->sk_lock.slock);
2271
2272 if (!sk->sk_lock.owned)
2273 /*
2274 * Note : We must disable BH
2275 */
2276 return false;
2277
2278 __lock_sock(sk);
2279 sk->sk_lock.owned = 1;
2280 spin_unlock(&sk->sk_lock.slock);
2281 /*
2282 * The sk_lock has mutex_lock() semantics here:
2283 */
2284 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2285 local_bh_enable();
2286 return true;
2287}
2288EXPORT_SYMBOL(lock_sock_fast);
2289
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002291{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002292 struct timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002294 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002295 tv = ktime_to_timeval(sk->sk_stamp);
2296 if (tv.tv_sec == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 return -ENOENT;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002298 if (tv.tv_sec == 0) {
2299 sk->sk_stamp = ktime_get_real();
2300 tv = ktime_to_timeval(sk->sk_stamp);
2301 }
2302 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002303}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304EXPORT_SYMBOL(sock_get_timestamp);
2305
Eric Dumazetae40eb12007-03-18 17:33:16 -07002306int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2307{
2308 struct timespec ts;
2309 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002310 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetae40eb12007-03-18 17:33:16 -07002311 ts = ktime_to_timespec(sk->sk_stamp);
2312 if (ts.tv_sec == -1)
2313 return -ENOENT;
2314 if (ts.tv_sec == 0) {
2315 sk->sk_stamp = ktime_get_real();
2316 ts = ktime_to_timespec(sk->sk_stamp);
2317 }
2318 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2319}
2320EXPORT_SYMBOL(sock_get_timestampns);
2321
Patrick Ohly20d49472009-02-12 05:03:38 +00002322void sock_enable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002323{
Patrick Ohly20d49472009-02-12 05:03:38 +00002324 if (!sock_flag(sk, flag)) {
Eric Dumazet08e29af2011-11-28 12:04:18 +00002325 unsigned long previous_flags = sk->sk_flags;
2326
Patrick Ohly20d49472009-02-12 05:03:38 +00002327 sock_set_flag(sk, flag);
2328 /*
2329 * we just set one of the two flags which require net
2330 * time stamping, but time stamping might have been on
2331 * already because of the other one
2332 */
Eric Dumazet08e29af2011-11-28 12:04:18 +00002333 if (!(previous_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002334 net_enable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 }
2336}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337
2338/*
2339 * Get a socket option on an socket.
2340 *
2341 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2342 * asynchronous errors should be reported by getsockopt. We assume
2343 * this means if you specify SO_ERROR (otherwise whats the point of it).
2344 */
2345int sock_common_getsockopt(struct socket *sock, int level, int optname,
2346 char __user *optval, int __user *optlen)
2347{
2348 struct sock *sk = sock->sk;
2349
2350 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2351}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352EXPORT_SYMBOL(sock_common_getsockopt);
2353
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002354#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002355int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2356 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002357{
2358 struct sock *sk = sock->sk;
2359
Johannes Berg1e51f952007-03-06 13:44:06 -08002360 if (sk->sk_prot->compat_getsockopt != NULL)
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002361 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2362 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002363 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2364}
2365EXPORT_SYMBOL(compat_sock_common_getsockopt);
2366#endif
2367
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2369 struct msghdr *msg, size_t size, int flags)
2370{
2371 struct sock *sk = sock->sk;
2372 int addr_len = 0;
2373 int err;
2374
2375 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2376 flags & ~MSG_DONTWAIT, &addr_len);
2377 if (err >= 0)
2378 msg->msg_namelen = addr_len;
2379 return err;
2380}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381EXPORT_SYMBOL(sock_common_recvmsg);
2382
2383/*
2384 * Set socket options on an inet socket.
2385 */
2386int sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002387 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388{
2389 struct sock *sk = sock->sk;
2390
2391 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2392}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393EXPORT_SYMBOL(sock_common_setsockopt);
2394
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002395#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002396int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002397 char __user *optval, unsigned int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002398{
2399 struct sock *sk = sock->sk;
2400
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002401 if (sk->sk_prot->compat_setsockopt != NULL)
2402 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2403 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002404 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2405}
2406EXPORT_SYMBOL(compat_sock_common_setsockopt);
2407#endif
2408
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409void sk_common_release(struct sock *sk)
2410{
2411 if (sk->sk_prot->destroy)
2412 sk->sk_prot->destroy(sk);
2413
2414 /*
2415 * Observation: when sock_common_release is called, processes have
2416 * no access to socket. But net still has.
2417 * Step one, detach it from networking:
2418 *
2419 * A. Remove from hash tables.
2420 */
2421
2422 sk->sk_prot->unhash(sk);
2423
2424 /*
2425 * In this point socket cannot receive new packets, but it is possible
2426 * that some packets are in flight because some CPU runs receiver and
2427 * did hash table lookup before we unhashed socket. They will achieve
2428 * receive queue and will be purged by socket destructor.
2429 *
2430 * Also we still have packets pending on receive queue and probably,
2431 * our own packets waiting in device queues. sock_destroy will drain
2432 * receive queue, but transmitted packets will delay socket destruction
2433 * until the last reference will be released.
2434 */
2435
2436 sock_orphan(sk);
2437
2438 xfrm_sk_free_policy(sk);
2439
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07002440 sk_refcnt_debug_release(sk);
Eric Dumazet5640f762012-09-23 23:04:42 +00002441
2442 if (sk->sk_frag.page) {
2443 put_page(sk->sk_frag.page);
2444 sk->sk_frag.page = NULL;
2445 }
2446
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 sock_put(sk);
2448}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449EXPORT_SYMBOL(sk_common_release);
2450
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002451#ifdef CONFIG_PROC_FS
2452#define PROTO_INUSE_NR 64 /* should be enough for the first time */
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002453struct prot_inuse {
2454 int val[PROTO_INUSE_NR];
2455};
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002456
2457static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002458
2459#ifdef CONFIG_NET_NS
2460void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2461{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002462 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002463}
2464EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2465
2466int sock_prot_inuse_get(struct net *net, struct proto *prot)
2467{
2468 int cpu, idx = prot->inuse_idx;
2469 int res = 0;
2470
2471 for_each_possible_cpu(cpu)
2472 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2473
2474 return res >= 0 ? res : 0;
2475}
2476EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2477
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002478static int __net_init sock_inuse_init_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002479{
2480 net->core.inuse = alloc_percpu(struct prot_inuse);
2481 return net->core.inuse ? 0 : -ENOMEM;
2482}
2483
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002484static void __net_exit sock_inuse_exit_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002485{
2486 free_percpu(net->core.inuse);
2487}
2488
2489static struct pernet_operations net_inuse_ops = {
2490 .init = sock_inuse_init_net,
2491 .exit = sock_inuse_exit_net,
2492};
2493
2494static __init int net_inuse_init(void)
2495{
2496 if (register_pernet_subsys(&net_inuse_ops))
2497 panic("Cannot initialize net inuse counters");
2498
2499 return 0;
2500}
2501
2502core_initcall(net_inuse_init);
2503#else
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002504static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2505
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002506void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002507{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002508 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002509}
2510EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2511
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002512int sock_prot_inuse_get(struct net *net, struct proto *prot)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002513{
2514 int cpu, idx = prot->inuse_idx;
2515 int res = 0;
2516
2517 for_each_possible_cpu(cpu)
2518 res += per_cpu(prot_inuse, cpu).val[idx];
2519
2520 return res >= 0 ? res : 0;
2521}
2522EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002523#endif
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002524
2525static void assign_proto_idx(struct proto *prot)
2526{
2527 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2528
2529 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
Joe Perchese005d192012-05-16 19:58:40 +00002530 pr_err("PROTO_INUSE_NR exhausted\n");
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002531 return;
2532 }
2533
2534 set_bit(prot->inuse_idx, proto_inuse_idx);
2535}
2536
2537static void release_proto_idx(struct proto *prot)
2538{
2539 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2540 clear_bit(prot->inuse_idx, proto_inuse_idx);
2541}
2542#else
2543static inline void assign_proto_idx(struct proto *prot)
2544{
2545}
2546
2547static inline void release_proto_idx(struct proto *prot)
2548{
2549}
2550#endif
2551
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552int proto_register(struct proto *prot, int alloc_slab)
2553{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554 if (alloc_slab) {
2555 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
Eric Dumazet271b72c2008-10-29 02:11:14 -07002556 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2557 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558
2559 if (prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002560 pr_crit("%s: Can't create sock SLAB cache!\n",
2561 prot->name);
Pavel Emelyanov60e76632008-03-28 16:39:10 -07002562 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002564
2565 if (prot->rsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002566 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002567 if (prot->rsk_prot->slab_name == NULL)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002568 goto out_free_sock_slab;
2569
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002570 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002571 prot->rsk_prot->obj_size, 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002572 SLAB_HWCACHE_ALIGN, NULL);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002573
2574 if (prot->rsk_prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002575 pr_crit("%s: Can't create request sock SLAB cache!\n",
2576 prot->name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002577 goto out_free_request_sock_slab_name;
2578 }
2579 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002580
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002581 if (prot->twsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002582 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002583
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002584 if (prot->twsk_prot->twsk_slab_name == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002585 goto out_free_request_sock_slab;
2586
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002587 prot->twsk_prot->twsk_slab =
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002588 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002589 prot->twsk_prot->twsk_obj_size,
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002590 0,
2591 SLAB_HWCACHE_ALIGN |
2592 prot->slab_flags,
Paul Mundt20c2df82007-07-20 10:11:58 +09002593 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002594 if (prot->twsk_prot->twsk_slab == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002595 goto out_free_timewait_sock_slab_name;
2596 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 }
2598
Glauber Costa36b77a52011-12-16 00:51:59 +00002599 mutex_lock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 list_add(&prot->node, &proto_list);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002601 assign_proto_idx(prot);
Glauber Costa36b77a52011-12-16 00:51:59 +00002602 mutex_unlock(&proto_list_mutex);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002603 return 0;
2604
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002605out_free_timewait_sock_slab_name:
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002606 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002607out_free_request_sock_slab:
2608 if (prot->rsk_prot && prot->rsk_prot->slab) {
2609 kmem_cache_destroy(prot->rsk_prot->slab);
2610 prot->rsk_prot->slab = NULL;
2611 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002612out_free_request_sock_slab_name:
Dan Carpenter72150e92010-03-06 01:04:45 +00002613 if (prot->rsk_prot)
2614 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002615out_free_sock_slab:
2616 kmem_cache_destroy(prot->slab);
2617 prot->slab = NULL;
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002618out:
2619 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621EXPORT_SYMBOL(proto_register);
2622
2623void proto_unregister(struct proto *prot)
2624{
Glauber Costa36b77a52011-12-16 00:51:59 +00002625 mutex_lock(&proto_list_mutex);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002626 release_proto_idx(prot);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07002627 list_del(&prot->node);
Glauber Costa36b77a52011-12-16 00:51:59 +00002628 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629
2630 if (prot->slab != NULL) {
2631 kmem_cache_destroy(prot->slab);
2632 prot->slab = NULL;
2633 }
2634
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002635 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002636 kmem_cache_destroy(prot->rsk_prot->slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002637 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002638 prot->rsk_prot->slab = NULL;
2639 }
2640
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002641 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002642 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002643 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002644 prot->twsk_prot->twsk_slab = NULL;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002645 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647EXPORT_SYMBOL(proto_unregister);
2648
2649#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Glauber Costa36b77a52011-12-16 00:51:59 +00002651 __acquires(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652{
Glauber Costa36b77a52011-12-16 00:51:59 +00002653 mutex_lock(&proto_list_mutex);
Pavel Emelianov60f04382007-07-09 13:15:14 -07002654 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655}
2656
2657static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2658{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002659 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660}
2661
2662static void proto_seq_stop(struct seq_file *seq, void *v)
Glauber Costa36b77a52011-12-16 00:51:59 +00002663 __releases(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664{
Glauber Costa36b77a52011-12-16 00:51:59 +00002665 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666}
2667
2668static char proto_method_implemented(const void *method)
2669{
2670 return method == NULL ? 'n' : 'y';
2671}
Glauber Costa180d8cd2011-12-11 21:47:02 +00002672static long sock_prot_memory_allocated(struct proto *proto)
2673{
Jeffrin Josecb75a362012-04-25 19:17:29 +05302674 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002675}
2676
2677static char *sock_prot_memory_pressure(struct proto *proto)
2678{
2679 return proto->memory_pressure != NULL ?
2680 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2681}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682
2683static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2684{
Glauber Costa180d8cd2011-12-11 21:47:02 +00002685
Eric Dumazet8d987e52010-11-09 23:24:26 +00002686 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2688 proto->name,
2689 proto->obj_size,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002690 sock_prot_inuse_get(seq_file_net(seq), proto),
Glauber Costa180d8cd2011-12-11 21:47:02 +00002691 sock_prot_memory_allocated(proto),
2692 sock_prot_memory_pressure(proto),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693 proto->max_header,
2694 proto->slab == NULL ? "no" : "yes",
2695 module_name(proto->owner),
2696 proto_method_implemented(proto->close),
2697 proto_method_implemented(proto->connect),
2698 proto_method_implemented(proto->disconnect),
2699 proto_method_implemented(proto->accept),
2700 proto_method_implemented(proto->ioctl),
2701 proto_method_implemented(proto->init),
2702 proto_method_implemented(proto->destroy),
2703 proto_method_implemented(proto->shutdown),
2704 proto_method_implemented(proto->setsockopt),
2705 proto_method_implemented(proto->getsockopt),
2706 proto_method_implemented(proto->sendmsg),
2707 proto_method_implemented(proto->recvmsg),
2708 proto_method_implemented(proto->sendpage),
2709 proto_method_implemented(proto->bind),
2710 proto_method_implemented(proto->backlog_rcv),
2711 proto_method_implemented(proto->hash),
2712 proto_method_implemented(proto->unhash),
2713 proto_method_implemented(proto->get_port),
2714 proto_method_implemented(proto->enter_memory_pressure));
2715}
2716
2717static int proto_seq_show(struct seq_file *seq, void *v)
2718{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002719 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2721 "protocol",
2722 "size",
2723 "sockets",
2724 "memory",
2725 "press",
2726 "maxhdr",
2727 "slab",
2728 "module",
2729 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2730 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07002731 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732 return 0;
2733}
2734
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002735static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 .start = proto_seq_start,
2737 .next = proto_seq_next,
2738 .stop = proto_seq_stop,
2739 .show = proto_seq_show,
2740};
2741
2742static int proto_seq_open(struct inode *inode, struct file *file)
2743{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002744 return seq_open_net(inode, file, &proto_seq_ops,
2745 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746}
2747
Arjan van de Ven9a321442007-02-12 00:55:35 -08002748static const struct file_operations proto_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 .owner = THIS_MODULE,
2750 .open = proto_seq_open,
2751 .read = seq_read,
2752 .llseek = seq_lseek,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002753 .release = seq_release_net,
2754};
2755
2756static __net_init int proto_init_net(struct net *net)
2757{
2758 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
2759 return -ENOMEM;
2760
2761 return 0;
2762}
2763
2764static __net_exit void proto_exit_net(struct net *net)
2765{
2766 proc_net_remove(net, "protocols");
2767}
2768
2769
2770static __net_initdata struct pernet_operations proto_net_ops = {
2771 .init = proto_init_net,
2772 .exit = proto_exit_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773};
2774
2775static int __init proto_init(void)
2776{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002777 return register_pernet_subsys(&proto_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778}
2779
2780subsys_initcall(proto_init);
2781
2782#endif /* PROC_FS */