blob: af65d17517b8e563d9ac462e7c3ab6c72a464953 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
Joe Perchese005d192012-05-16 19:58:40 +000092#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
Randy Dunlap4fc268d2006-01-11 12:17:47 -080094#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/errno.h>
96#include <linux/types.h>
97#include <linux/socket.h>
98#include <linux/in.h>
99#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/module.h>
101#include <linux/proc_fs.h>
102#include <linux/seq_file.h>
103#include <linux/sched.h>
104#include <linux/timer.h>
105#include <linux/string.h>
106#include <linux/sockios.h>
107#include <linux/net.h>
108#include <linux/mm.h>
109#include <linux/slab.h>
110#include <linux/interrupt.h>
111#include <linux/poll.h>
112#include <linux/tcp.h>
113#include <linux/init.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -0400114#include <linux/highmem.h>
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000115#include <linux/user_namespace.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100116#include <linux/static_key.h>
David S. Miller3969eb32012-01-09 13:44:23 -0800117#include <linux/memcontrol.h>
David S. Miller8c1ae102012-05-03 02:25:55 -0400118#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
120#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122#include <linux/netdevice.h>
123#include <net/protocol.h>
124#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200125#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700126#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#include <net/sock.h>
Patrick Ohly20d49472009-02-12 05:03:38 +0000128#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129#include <net/xfrm.h>
130#include <linux/ipsec.h>
Herbert Xuf8451722010-05-24 00:12:34 -0700131#include <net/cls_cgroup.h>
Neil Horman5bc14212011-11-22 05:10:51 +0000132#include <net/netprio_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
134#include <linux/filter.h>
135
Satoru Moriya3847ce32011-06-17 12:00:03 +0000136#include <trace/events/sock.h>
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138#ifdef CONFIG_INET
139#include <net/tcp.h>
140#endif
141
Glauber Costa36b77a52011-12-16 00:51:59 +0000142static DEFINE_MUTEX(proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000143static LIST_HEAD(proto_list);
144
Eric W. Biedermanc35b4e22014-04-23 14:26:56 -0700145/**
146 * sk_ns_capable - General socket capability test
147 * @sk: Socket to use a capability on or through
148 * @user_ns: The user namespace of the capability to use
149 * @cap: The capability to use
150 *
151 * Test to see if the opener of the socket had when the socket was
152 * created and the current process has the capability @cap in the user
153 * namespace @user_ns.
154 */
155bool sk_ns_capable(const struct sock *sk,
156 struct user_namespace *user_ns, int cap)
157{
158 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
159 ns_capable(user_ns, cap);
160}
161EXPORT_SYMBOL(sk_ns_capable);
162
163/**
164 * sk_capable - Socket global capability test
165 * @sk: Socket to use a capability on or through
166 * @cap: The global capbility to use
167 *
168 * Test to see if the opener of the socket had when the socket was
169 * created and the current process has the capability @cap in all user
170 * namespaces.
171 */
172bool sk_capable(const struct sock *sk, int cap)
173{
174 return sk_ns_capable(sk, &init_user_ns, cap);
175}
176EXPORT_SYMBOL(sk_capable);
177
178/**
179 * sk_net_capable - Network namespace socket capability test
180 * @sk: Socket to use a capability on or through
181 * @cap: The capability to use
182 *
183 * Test to see if the opener of the socket had when the socke was created
184 * and the current process has the capability @cap over the network namespace
185 * the socket is a member of.
186 */
187bool sk_net_capable(const struct sock *sk, int cap)
188{
189 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
190}
191EXPORT_SYMBOL(sk_net_capable);
192
193
Andrew Mortonc255a452012-07-31 16:43:02 -0700194#ifdef CONFIG_MEMCG_KMEM
Glauber Costa1d62e432012-04-09 19:36:33 -0300195int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000196{
197 struct proto *proto;
198 int ret = 0;
199
Glauber Costa36b77a52011-12-16 00:51:59 +0000200 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000201 list_for_each_entry(proto, &proto_list, node) {
202 if (proto->init_cgroup) {
Glauber Costa1d62e432012-04-09 19:36:33 -0300203 ret = proto->init_cgroup(memcg, ss);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000204 if (ret)
205 goto out;
206 }
207 }
208
Glauber Costa36b77a52011-12-16 00:51:59 +0000209 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000210 return ret;
211out:
212 list_for_each_entry_continue_reverse(proto, &proto_list, node)
213 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300214 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000215 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000216 return ret;
217}
218
Glauber Costa1d62e432012-04-09 19:36:33 -0300219void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000220{
221 struct proto *proto;
222
Glauber Costa36b77a52011-12-16 00:51:59 +0000223 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000224 list_for_each_entry_reverse(proto, &proto_list, node)
225 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300226 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000227 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000228}
229#endif
230
Ingo Molnarda21f242006-07-03 00:25:12 -0700231/*
232 * Each address family might have different locking rules, so we have
233 * one slock key per address family:
234 */
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700235static struct lock_class_key af_family_keys[AF_MAX];
236static struct lock_class_key af_family_slock_keys[AF_MAX];
237
stephen hemmingercbda4ea2013-02-22 07:59:10 +0000238#if defined(CONFIG_MEMCG_KMEM)
Ingo Molnarc5905af2012-02-24 08:31:31 +0100239struct static_key memcg_socket_limit_enabled;
Glauber Costae1aab162011-12-11 21:47:03 +0000240EXPORT_SYMBOL(memcg_socket_limit_enabled);
stephen hemmingercbda4ea2013-02-22 07:59:10 +0000241#endif
Glauber Costae1aab162011-12-11 21:47:03 +0000242
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700243/*
244 * Make lock validator output more readable. (we pre-construct these
245 * strings build-time, so that runtime initialization of socket
246 * locks is fast):
247 */
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700248static const char *const af_family_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700249 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
250 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
251 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
252 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
253 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
254 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
255 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800256 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700257 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800258 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700259 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700260 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800261 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
Federico Vaga456db6a2013-05-28 05:02:44 +0000262 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700263};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700264static const char *const af_family_slock_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700265 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
266 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
267 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
268 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
269 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
270 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
271 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800272 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700273 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800274 "slock-27" , "slock-28" , "slock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700275 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700276 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800277 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
Federico Vaga456db6a2013-05-28 05:02:44 +0000278 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700279};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700280static const char *const af_family_clock_key_strings[AF_MAX+1] = {
Peter Zijlstra443aef02007-07-19 01:49:00 -0700281 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
282 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
283 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
284 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
285 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
286 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
287 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800288 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
Peter Zijlstra443aef02007-07-19 01:49:00 -0700289 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
Oliver Hartkoppb4942af2008-07-23 14:06:04 -0700290 "clock-27" , "clock-28" , "clock-AF_CAN" ,
David Howellse51f8022007-07-21 19:30:16 -0700291 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700292 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800293 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
Federico Vaga456db6a2013-05-28 05:02:44 +0000294 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX"
Peter Zijlstra443aef02007-07-19 01:49:00 -0700295};
Ingo Molnarda21f242006-07-03 00:25:12 -0700296
297/*
298 * sk_callback_lock locking rules are per-address-family,
299 * so split the lock classes by using a per-AF key:
300 */
301static struct lock_class_key af_callback_keys[AF_MAX];
302
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303/* Take into consideration the size of the struct sk_buff overhead in the
304 * determination of these values, since that is non-constant across
305 * platforms. This makes socket queueing behavior and performance
306 * not depend upon such differences.
307 */
308#define _SK_MEM_PACKETS 256
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000309#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
311#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
312
313/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700314__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200315EXPORT_SYMBOL(sysctl_wmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700316__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200317EXPORT_SYMBOL(sysctl_rmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700318__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
319__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300321/* Maximal space eaten by iovec or ancillary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700322int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Eric Dumazet2a915252009-05-27 11:30:05 +0000323EXPORT_SYMBOL(sysctl_optmem_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
Mel Gormanc93bdd02012-07-31 16:44:19 -0700325struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
326EXPORT_SYMBOL_GPL(memalloc_socks);
327
Mel Gorman7cb02402012-07-31 16:44:16 -0700328/**
329 * sk_set_memalloc - sets %SOCK_MEMALLOC
330 * @sk: socket to set it on
331 *
332 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
333 * It's the responsibility of the admin to adjust min_free_kbytes
334 * to meet the requirements
335 */
336void sk_set_memalloc(struct sock *sk)
337{
338 sock_set_flag(sk, SOCK_MEMALLOC);
339 sk->sk_allocation |= __GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700340 static_key_slow_inc(&memalloc_socks);
Mel Gorman7cb02402012-07-31 16:44:16 -0700341}
342EXPORT_SYMBOL_GPL(sk_set_memalloc);
343
344void sk_clear_memalloc(struct sock *sk)
345{
346 sock_reset_flag(sk, SOCK_MEMALLOC);
347 sk->sk_allocation &= ~__GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700348 static_key_slow_dec(&memalloc_socks);
Mel Gormanc76562b2012-07-31 16:44:41 -0700349
350 /*
351 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
352 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
353 * it has rmem allocations there is a risk that the user of the
354 * socket cannot make forward progress due to exceeding the rmem
355 * limits. By rights, sk_clear_memalloc() should only be called
356 * on sockets being torn down but warn and reset the accounting if
357 * that assumption breaks.
358 */
359 if (WARN_ON(sk->sk_forward_alloc))
360 sk_mem_reclaim(sk);
Mel Gorman7cb02402012-07-31 16:44:16 -0700361}
362EXPORT_SYMBOL_GPL(sk_clear_memalloc);
363
Mel Gormanb4b9e352012-07-31 16:44:26 -0700364int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
365{
366 int ret;
367 unsigned long pflags = current->flags;
368
369 /* these should have been dropped before queueing */
370 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
371
372 current->flags |= PF_MEMALLOC;
373 ret = sk->sk_backlog_rcv(sk, skb);
374 tsk_restore_flags(current, pflags, PF_MEMALLOC);
375
376 return ret;
377}
378EXPORT_SYMBOL(__sk_backlog_rcv);
379
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
381{
382 struct timeval tv;
383
384 if (optlen < sizeof(tv))
385 return -EINVAL;
386 if (copy_from_user(&tv, optval, sizeof(tv)))
387 return -EFAULT;
Vasily Averinba780732007-05-24 16:58:54 -0700388 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
389 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
Vasily Averinba780732007-05-24 16:58:54 -0700391 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700392 static int warned __read_mostly;
393
Vasily Averinba780732007-05-24 16:58:54 -0700394 *timeo_p = 0;
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700395 if (warned < 10 && net_ratelimit()) {
Vasily Averinba780732007-05-24 16:58:54 -0700396 warned++;
Joe Perchese005d192012-05-16 19:58:40 +0000397 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
398 __func__, current->comm, task_pid_nr(current));
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700399 }
Vasily Averinba780732007-05-24 16:58:54 -0700400 return 0;
401 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 *timeo_p = MAX_SCHEDULE_TIMEOUT;
403 if (tv.tv_sec == 0 && tv.tv_usec == 0)
404 return 0;
405 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
406 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
407 return 0;
408}
409
410static void sock_warn_obsolete_bsdism(const char *name)
411{
412 static int warned;
413 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900414 if (strcmp(warncomm, current->comm) && warned < 5) {
415 strcpy(warncomm, current->comm);
Joe Perchese005d192012-05-16 19:58:40 +0000416 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
417 warncomm, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 warned++;
419 }
420}
421
Eric Dumazet08e29af2011-11-28 12:04:18 +0000422#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
423
424static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900425{
Eric Dumazet08e29af2011-11-28 12:04:18 +0000426 if (sk->sk_flags & flags) {
427 sk->sk_flags &= ~flags;
428 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +0000429 net_disable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 }
431}
432
433
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800434int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
435{
Eric Dumazet766e90372009-10-14 20:40:11 -0700436 int err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800437 int skb_len;
Neil Horman3b885782009-10-12 13:26:31 -0700438 unsigned long flags;
439 struct sk_buff_head *list = &sk->sk_receive_queue;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800440
Eric Dumazet0fd7bac2011-12-21 07:11:44 +0000441 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700442 atomic_inc(&sk->sk_drops);
Satoru Moriya3847ce32011-06-17 12:00:03 +0000443 trace_sock_rcvqueue_full(sk, skb);
Eric Dumazet766e90372009-10-14 20:40:11 -0700444 return -ENOMEM;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800445 }
446
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700447 err = sk_filter(sk, skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800448 if (err)
Eric Dumazet766e90372009-10-14 20:40:11 -0700449 return err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800450
Mel Gormanc76562b2012-07-31 16:44:41 -0700451 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700452 atomic_inc(&sk->sk_drops);
453 return -ENOBUFS;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800454 }
455
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800456 skb->dev = NULL;
457 skb_set_owner_r(skb, sk);
David S. Miller49ad9592008-12-17 22:11:38 -0800458
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800459 /* Cache the SKB length before we tack it onto the receive
460 * queue. Once it is added it no longer belongs to us and
461 * may be freed by other threads of control pulling packets
462 * from the queue.
463 */
464 skb_len = skb->len;
465
Eric Dumazet7fee2262010-05-11 23:19:48 +0000466 /* we escape from rcu protected region, make sure we dont leak
467 * a norefcounted dst
468 */
469 skb_dst_force(skb);
470
Neil Horman3b885782009-10-12 13:26:31 -0700471 spin_lock_irqsave(&list->lock, flags);
472 skb->dropcount = atomic_read(&sk->sk_drops);
473 __skb_queue_tail(list, skb);
474 spin_unlock_irqrestore(&list->lock, flags);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800475
476 if (!sock_flag(sk, SOCK_DEAD))
477 sk->sk_data_ready(sk, skb_len);
Eric Dumazet766e90372009-10-14 20:40:11 -0700478 return 0;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800479}
480EXPORT_SYMBOL(sock_queue_rcv_skb);
481
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200482int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800483{
484 int rc = NET_RX_SUCCESS;
485
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700486 if (sk_filter(sk, skb))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800487 goto discard_and_relse;
488
489 skb->dev = NULL;
490
Eric Dumazetf545a382012-04-22 23:34:26 +0000491 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
Eric Dumazetc3774112010-04-27 15:13:20 -0700492 atomic_inc(&sk->sk_drops);
493 goto discard_and_relse;
494 }
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200495 if (nested)
496 bh_lock_sock_nested(sk);
497 else
498 bh_lock_sock(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700499 if (!sock_owned_by_user(sk)) {
500 /*
501 * trylock + unlock semantics:
502 */
503 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
504
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700505 rc = sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700506
507 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
Eric Dumazetf545a382012-04-22 23:34:26 +0000508 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
Zhu Yi8eae9392010-03-04 18:01:40 +0000509 bh_unlock_sock(sk);
510 atomic_inc(&sk->sk_drops);
511 goto discard_and_relse;
512 }
513
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800514 bh_unlock_sock(sk);
515out:
516 sock_put(sk);
517 return rc;
518discard_and_relse:
519 kfree_skb(skb);
520 goto out;
521}
522EXPORT_SYMBOL(sk_receive_skb);
523
Krishna Kumarea94ff32009-10-19 23:46:45 +0000524void sk_reset_txq(struct sock *sk)
525{
526 sk_tx_queue_clear(sk);
527}
528EXPORT_SYMBOL(sk_reset_txq);
529
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800530struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
531{
Eric Dumazetb6c67122010-04-08 23:03:29 +0000532 struct dst_entry *dst = __sk_dst_get(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800533
534 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
Krishna Kumare022f0b2009-10-19 23:46:20 +0000535 sk_tx_queue_clear(sk);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +0000536 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800537 dst_release(dst);
538 return NULL;
539 }
540
541 return dst;
542}
543EXPORT_SYMBOL(__sk_dst_check);
544
545struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
546{
547 struct dst_entry *dst = sk_dst_get(sk);
548
549 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
550 sk_dst_reset(sk);
551 dst_release(dst);
552 return NULL;
553 }
554
555 return dst;
556}
557EXPORT_SYMBOL(sk_dst_check);
558
Brian Haleyc91f6df2012-11-26 05:21:08 +0000559static int sock_setbindtodevice(struct sock *sk, char __user *optval,
560 int optlen)
David S. Miller48788092007-09-14 16:41:03 -0700561{
562 int ret = -ENOPROTOOPT;
563#ifdef CONFIG_NETDEVICES
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900564 struct net *net = sock_net(sk);
David S. Miller48788092007-09-14 16:41:03 -0700565 char devname[IFNAMSIZ];
566 int index;
567
568 /* Sorry... */
569 ret = -EPERM;
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000570 if (!ns_capable(net->user_ns, CAP_NET_RAW))
David S. Miller48788092007-09-14 16:41:03 -0700571 goto out;
572
573 ret = -EINVAL;
574 if (optlen < 0)
575 goto out;
576
577 /* Bind this socket to a particular device like "eth0",
578 * as specified in the passed interface name. If the
579 * name is "" or the option length is zero the socket
580 * is not bound.
581 */
582 if (optlen > IFNAMSIZ - 1)
583 optlen = IFNAMSIZ - 1;
584 memset(devname, 0, sizeof(devname));
585
586 ret = -EFAULT;
587 if (copy_from_user(devname, optval, optlen))
588 goto out;
589
David S. Miller000ba2e2009-11-05 22:37:11 -0800590 index = 0;
591 if (devname[0] != '\0') {
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800592 struct net_device *dev;
David S. Miller48788092007-09-14 16:41:03 -0700593
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800594 rcu_read_lock();
595 dev = dev_get_by_name_rcu(net, devname);
596 if (dev)
597 index = dev->ifindex;
598 rcu_read_unlock();
David S. Miller48788092007-09-14 16:41:03 -0700599 ret = -ENODEV;
600 if (!dev)
601 goto out;
David S. Miller48788092007-09-14 16:41:03 -0700602 }
603
604 lock_sock(sk);
605 sk->sk_bound_dev_if = index;
606 sk_dst_reset(sk);
607 release_sock(sk);
608
609 ret = 0;
610
611out:
612#endif
613
614 return ret;
615}
616
Brian Haleyc91f6df2012-11-26 05:21:08 +0000617static int sock_getbindtodevice(struct sock *sk, char __user *optval,
618 int __user *optlen, int len)
619{
620 int ret = -ENOPROTOOPT;
621#ifdef CONFIG_NETDEVICES
622 struct net *net = sock_net(sk);
Brian Haleyc91f6df2012-11-26 05:21:08 +0000623 char devname[IFNAMSIZ];
Brian Haleyc91f6df2012-11-26 05:21:08 +0000624
625 if (sk->sk_bound_dev_if == 0) {
626 len = 0;
627 goto zero;
628 }
629
630 ret = -EINVAL;
631 if (len < IFNAMSIZ)
632 goto out;
633
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200634 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
635 if (ret)
Brian Haleyc91f6df2012-11-26 05:21:08 +0000636 goto out;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000637
638 len = strlen(devname) + 1;
639
640 ret = -EFAULT;
641 if (copy_to_user(optval, devname, len))
642 goto out;
643
644zero:
645 ret = -EFAULT;
646 if (put_user(len, optlen))
647 goto out;
648
649 ret = 0;
650
651out:
652#endif
653
654 return ret;
655}
656
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800657static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
658{
659 if (valbool)
660 sock_set_flag(sk, bit);
661 else
662 sock_reset_flag(sk, bit);
663}
664
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665/*
666 * This is meant for all protocols to use and covers goings on
667 * at the socket level. Everything here is generic.
668 */
669
670int sock_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -0700671 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672{
Eric Dumazet2a915252009-05-27 11:30:05 +0000673 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 int val;
675 int valbool;
676 struct linger ling;
677 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900678
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 /*
680 * Options without arguments
681 */
682
David S. Miller48788092007-09-14 16:41:03 -0700683 if (optname == SO_BINDTODEVICE)
Brian Haleyc91f6df2012-11-26 05:21:08 +0000684 return sock_setbindtodevice(sk, optval, optlen);
David S. Miller48788092007-09-14 16:41:03 -0700685
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700686 if (optlen < sizeof(int))
687 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900688
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 if (get_user(val, (int __user *)optval))
690 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900691
Eric Dumazet2a915252009-05-27 11:30:05 +0000692 valbool = val ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
694 lock_sock(sk);
695
Eric Dumazet2a915252009-05-27 11:30:05 +0000696 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700697 case SO_DEBUG:
Eric Dumazet2a915252009-05-27 11:30:05 +0000698 if (val && !capable(CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700699 ret = -EACCES;
Eric Dumazet2a915252009-05-27 11:30:05 +0000700 else
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800701 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700702 break;
703 case SO_REUSEADDR:
Pavel Emelyanov4a17fd52012-04-19 03:39:36 +0000704 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700705 break;
Tom Herbert055dc212013-01-22 09:49:50 +0000706 case SO_REUSEPORT:
707 sk->sk_reuseport = valbool;
708 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700709 case SO_TYPE:
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000710 case SO_PROTOCOL:
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000711 case SO_DOMAIN:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700712 case SO_ERROR:
713 ret = -ENOPROTOOPT;
714 break;
715 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800716 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700717 break;
718 case SO_BROADCAST:
719 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
720 break;
721 case SO_SNDBUF:
722 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000723 * about it this is right. Otherwise apps have to
724 * play 'guess the biggest size' games. RCVBUF/SNDBUF
725 * are treated in BSD as hints
726 */
727 val = min_t(u32, val, sysctl_wmem_max);
Patrick McHardyb0573de2005-08-09 19:30:51 -0700728set_sndbuf:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700729 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
Eric Dumazet82981932012-04-26 20:07:59 +0000730 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
731 /* Wake up sending tasks if we upped the value. */
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700732 sk->sk_write_space(sk);
733 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700735 case SO_SNDBUFFORCE:
736 if (!capable(CAP_NET_ADMIN)) {
737 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 break;
739 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700740 goto set_sndbuf;
741
742 case SO_RCVBUF:
743 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000744 * about it this is right. Otherwise apps have to
745 * play 'guess the biggest size' games. RCVBUF/SNDBUF
746 * are treated in BSD as hints
747 */
748 val = min_t(u32, val, sysctl_rmem_max);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700749set_rcvbuf:
750 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
751 /*
752 * We double it on the way in to account for
753 * "struct sk_buff" etc. overhead. Applications
754 * assume that the SO_RCVBUF setting they make will
755 * allow that much actual data to be received on that
756 * socket.
757 *
758 * Applications are unaware that "struct sk_buff" and
759 * other overheads allocate from the receive buffer
760 * during socket buffer allocation.
761 *
762 * And after considering the possible alternatives,
763 * returning the value we actually used in getsockopt
764 * is the most desirable behavior.
765 */
Eric Dumazet82981932012-04-26 20:07:59 +0000766 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700767 break;
768
769 case SO_RCVBUFFORCE:
770 if (!capable(CAP_NET_ADMIN)) {
771 ret = -EPERM;
772 break;
773 }
774 goto set_rcvbuf;
775
776 case SO_KEEPALIVE:
777#ifdef CONFIG_INET
Eric Dumazet3e109862012-09-24 07:00:11 +0000778 if (sk->sk_protocol == IPPROTO_TCP &&
779 sk->sk_type == SOCK_STREAM)
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700780 tcp_set_keepalive(sk, valbool);
781#endif
782 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
783 break;
784
785 case SO_OOBINLINE:
786 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
787 break;
788
789 case SO_NO_CHECK:
790 sk->sk_no_check = valbool;
791 break;
792
793 case SO_PRIORITY:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000794 if ((val >= 0 && val <= 6) ||
795 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700796 sk->sk_priority = val;
797 else
798 ret = -EPERM;
799 break;
800
801 case SO_LINGER:
802 if (optlen < sizeof(ling)) {
803 ret = -EINVAL; /* 1003.1g */
804 break;
805 }
Eric Dumazet2a915252009-05-27 11:30:05 +0000806 if (copy_from_user(&ling, optval, sizeof(ling))) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700807 ret = -EFAULT;
808 break;
809 }
810 if (!ling.l_onoff)
811 sock_reset_flag(sk, SOCK_LINGER);
812 else {
813#if (BITS_PER_LONG == 32)
814 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
815 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
816 else
817#endif
818 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
819 sock_set_flag(sk, SOCK_LINGER);
820 }
821 break;
822
823 case SO_BSDCOMPAT:
824 sock_warn_obsolete_bsdism("setsockopt");
825 break;
826
827 case SO_PASSCRED:
828 if (valbool)
829 set_bit(SOCK_PASSCRED, &sock->flags);
830 else
831 clear_bit(SOCK_PASSCRED, &sock->flags);
832 break;
833
834 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700835 case SO_TIMESTAMPNS:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700836 if (valbool) {
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700837 if (optname == SO_TIMESTAMP)
838 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
839 else
840 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700841 sock_set_flag(sk, SOCK_RCVTSTAMP);
Patrick Ohly20d49472009-02-12 05:03:38 +0000842 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700843 } else {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700844 sock_reset_flag(sk, SOCK_RCVTSTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700845 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
846 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700847 break;
848
Patrick Ohly20d49472009-02-12 05:03:38 +0000849 case SO_TIMESTAMPING:
850 if (val & ~SOF_TIMESTAMPING_MASK) {
Rémi Denis-Courmontf249fb72009-07-20 00:47:04 +0000851 ret = -EINVAL;
Patrick Ohly20d49472009-02-12 05:03:38 +0000852 break;
853 }
854 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
855 val & SOF_TIMESTAMPING_TX_HARDWARE);
856 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
857 val & SOF_TIMESTAMPING_TX_SOFTWARE);
858 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
859 val & SOF_TIMESTAMPING_RX_HARDWARE);
860 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
861 sock_enable_timestamp(sk,
862 SOCK_TIMESTAMPING_RX_SOFTWARE);
863 else
864 sock_disable_timestamp(sk,
Eric Dumazet08e29af2011-11-28 12:04:18 +0000865 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
Patrick Ohly20d49472009-02-12 05:03:38 +0000866 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
867 val & SOF_TIMESTAMPING_SOFTWARE);
868 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
869 val & SOF_TIMESTAMPING_SYS_HARDWARE);
870 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
871 val & SOF_TIMESTAMPING_RAW_HARDWARE);
872 break;
873
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700874 case SO_RCVLOWAT:
875 if (val < 0)
876 val = INT_MAX;
877 sk->sk_rcvlowat = val ? : 1;
878 break;
879
880 case SO_RCVTIMEO:
881 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
882 break;
883
884 case SO_SNDTIMEO:
885 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
886 break;
887
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700888 case SO_ATTACH_FILTER:
889 ret = -EINVAL;
890 if (optlen == sizeof(struct sock_fprog)) {
891 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700893 ret = -EFAULT;
894 if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700897 ret = sk_attach_filter(&fprog, sk);
898 }
899 break;
900
901 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -0700902 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700903 break;
904
Vincent Bernatd59577b2013-01-16 22:55:49 +0100905 case SO_LOCK_FILTER:
906 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
907 ret = -EPERM;
908 else
909 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
910 break;
911
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700912 case SO_PASSSEC:
913 if (valbool)
914 set_bit(SOCK_PASSSEC, &sock->flags);
915 else
916 clear_bit(SOCK_PASSSEC, &sock->flags);
917 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800918 case SO_MARK:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000919 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800920 ret = -EPERM;
Eric Dumazet2a915252009-05-27 11:30:05 +0000921 else
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800922 sk->sk_mark = val;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800923 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700924
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 /* We implement the SO_SNDLOWAT etc to
926 not be settable (1003.1g 5.3) */
Neil Horman3b885782009-10-12 13:26:31 -0700927 case SO_RXQ_OVFL:
Johannes Berg8083f0f2011-10-07 03:30:20 +0000928 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
Neil Horman3b885782009-10-12 13:26:31 -0700929 break;
Johannes Berg6e3e9392011-11-09 10:15:42 +0100930
931 case SO_WIFI_STATUS:
932 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
933 break;
934
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000935 case SO_PEEK_OFF:
936 if (sock->ops->set_peek_off)
Sasha Levind90d9ff2013-12-07 17:26:27 -0500937 ret = sock->ops->set_peek_off(sk, val);
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000938 else
939 ret = -EOPNOTSUPP;
940 break;
Ben Greear3bdc0eb2012-02-11 15:39:30 +0000941
942 case SO_NOFCS:
943 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
944 break;
945
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +0000946 case SO_SELECT_ERR_QUEUE:
947 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
948 break;
949
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700950 default:
951 ret = -ENOPROTOOPT;
952 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900953 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 release_sock(sk);
955 return ret;
956}
Eric Dumazet2a915252009-05-27 11:30:05 +0000957EXPORT_SYMBOL(sock_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958
959
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000960void cred_to_ucred(struct pid *pid, const struct cred *cred,
961 struct ucred *ucred)
962{
963 ucred->pid = pid_vnr(pid);
964 ucred->uid = ucred->gid = -1;
965 if (cred) {
966 struct user_namespace *current_ns = current_user_ns();
967
Eric W. Biedermanb2e4f542012-05-23 16:39:45 -0600968 ucred->uid = from_kuid_munged(current_ns, cred->euid);
969 ucred->gid = from_kgid_munged(current_ns, cred->egid);
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000970 }
971}
David S. Miller39247732010-06-16 16:18:25 -0700972EXPORT_SYMBOL_GPL(cred_to_ucred);
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000973
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974int sock_getsockopt(struct socket *sock, int level, int optname,
975 char __user *optval, int __user *optlen)
976{
977 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900978
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700979 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900980 int val;
981 struct linger ling;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 struct timeval tm;
983 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900984
H Hartley Sweeten4d0392b2010-01-15 01:08:58 -0800985 int lv = sizeof(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900987
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700988 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900989 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700990 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900992
Eugene Teo50fee1d2009-02-23 15:38:41 -0800993 memset(&v, 0, sizeof(v));
Clément Lecignedf0bca02009-02-12 16:59:09 -0800994
Eric Dumazet2a915252009-05-27 11:30:05 +0000995 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700996 case SO_DEBUG:
997 v.val = sock_flag(sk, SOCK_DBG);
998 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900999
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001000 case SO_DONTROUTE:
1001 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1002 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001003
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001004 case SO_BROADCAST:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001005 v.val = sock_flag(sk, SOCK_BROADCAST);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001006 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001008 case SO_SNDBUF:
1009 v.val = sk->sk_sndbuf;
1010 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001011
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001012 case SO_RCVBUF:
1013 v.val = sk->sk_rcvbuf;
1014 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001016 case SO_REUSEADDR:
1017 v.val = sk->sk_reuse;
1018 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019
Tom Herbert055dc212013-01-22 09:49:50 +00001020 case SO_REUSEPORT:
1021 v.val = sk->sk_reuseport;
1022 break;
1023
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001024 case SO_KEEPALIVE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001025 v.val = sock_flag(sk, SOCK_KEEPOPEN);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001026 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001028 case SO_TYPE:
1029 v.val = sk->sk_type;
1030 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031
Jan Engelhardt49c794e2009-08-04 07:28:28 +00001032 case SO_PROTOCOL:
1033 v.val = sk->sk_protocol;
1034 break;
1035
Jan Engelhardt0d6038e2009-08-04 07:28:29 +00001036 case SO_DOMAIN:
1037 v.val = sk->sk_family;
1038 break;
1039
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001040 case SO_ERROR:
1041 v.val = -sock_error(sk);
Eric Dumazet2a915252009-05-27 11:30:05 +00001042 if (v.val == 0)
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001043 v.val = xchg(&sk->sk_err_soft, 0);
1044 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001046 case SO_OOBINLINE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001047 v.val = sock_flag(sk, SOCK_URGINLINE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001048 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001049
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001050 case SO_NO_CHECK:
1051 v.val = sk->sk_no_check;
1052 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001054 case SO_PRIORITY:
1055 v.val = sk->sk_priority;
1056 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001057
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001058 case SO_LINGER:
1059 lv = sizeof(v.ling);
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001060 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001061 v.ling.l_linger = sk->sk_lingertime / HZ;
1062 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001063
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001064 case SO_BSDCOMPAT:
1065 sock_warn_obsolete_bsdism("getsockopt");
1066 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001068 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -07001069 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1070 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1071 break;
1072
1073 case SO_TIMESTAMPNS:
1074 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001075 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076
Patrick Ohly20d49472009-02-12 05:03:38 +00001077 case SO_TIMESTAMPING:
1078 v.val = 0;
1079 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
1080 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
1081 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
1082 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
1083 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
1084 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
1085 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1086 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
1087 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
1088 v.val |= SOF_TIMESTAMPING_SOFTWARE;
1089 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
1090 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
1091 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
1092 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
1093 break;
1094
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001095 case SO_RCVTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +00001096 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001097 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1098 v.tm.tv_sec = 0;
1099 v.tm.tv_usec = 0;
1100 } else {
1101 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1102 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001104 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001106 case SO_SNDTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +00001107 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001108 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1109 v.tm.tv_sec = 0;
1110 v.tm.tv_usec = 0;
1111 } else {
1112 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1113 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1114 }
1115 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001117 case SO_RCVLOWAT:
1118 v.val = sk->sk_rcvlowat;
1119 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -07001120
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001121 case SO_SNDLOWAT:
Eric Dumazet2a915252009-05-27 11:30:05 +00001122 v.val = 1;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001123 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001125 case SO_PASSCRED:
Eric Dumazet82981932012-04-26 20:07:59 +00001126 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001127 break;
1128
1129 case SO_PEERCRED:
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001130 {
1131 struct ucred peercred;
1132 if (len > sizeof(peercred))
1133 len = sizeof(peercred);
1134 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1135 if (copy_to_user(optval, &peercred, len))
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001136 return -EFAULT;
1137 goto lenout;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001138 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001139
1140 case SO_PEERNAME:
1141 {
1142 char address[128];
1143
1144 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1145 return -ENOTCONN;
1146 if (lv < len)
1147 return -EINVAL;
1148 if (copy_to_user(optval, address, len))
1149 return -EFAULT;
1150 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001152
1153 /* Dubious BSD thing... Probably nobody even uses it, but
1154 * the UNIX standard wants it for whatever reason... -DaveM
1155 */
1156 case SO_ACCEPTCONN:
1157 v.val = sk->sk_state == TCP_LISTEN;
1158 break;
1159
1160 case SO_PASSSEC:
Eric Dumazet82981932012-04-26 20:07:59 +00001161 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001162 break;
1163
1164 case SO_PEERSEC:
1165 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1166
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001167 case SO_MARK:
1168 v.val = sk->sk_mark;
1169 break;
1170
Neil Horman3b885782009-10-12 13:26:31 -07001171 case SO_RXQ_OVFL:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001172 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
Neil Horman3b885782009-10-12 13:26:31 -07001173 break;
1174
Johannes Berg6e3e9392011-11-09 10:15:42 +01001175 case SO_WIFI_STATUS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001176 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
Johannes Berg6e3e9392011-11-09 10:15:42 +01001177 break;
1178
Pavel Emelyanovef64a542012-02-21 07:31:34 +00001179 case SO_PEEK_OFF:
1180 if (!sock->ops->set_peek_off)
1181 return -EOPNOTSUPP;
1182
1183 v.val = sk->sk_peek_off;
1184 break;
David S. Millerbc2f7992012-02-24 14:48:34 -05001185 case SO_NOFCS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001186 v.val = sock_flag(sk, SOCK_NOFCS);
David S. Millerbc2f7992012-02-24 14:48:34 -05001187 break;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001188
Pavel Emelyanovf7b86bf2012-10-18 23:55:56 +00001189 case SO_BINDTODEVICE:
Brian Haleyc91f6df2012-11-26 05:21:08 +00001190 return sock_getbindtodevice(sk, optval, optlen, len);
1191
Pavel Emelyanova8fc9272012-11-01 02:01:48 +00001192 case SO_GET_FILTER:
1193 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1194 if (len < 0)
1195 return len;
1196
1197 goto lenout;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001198
Vincent Bernatd59577b2013-01-16 22:55:49 +01001199 case SO_LOCK_FILTER:
1200 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1201 break;
1202
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +00001203 case SO_SELECT_ERR_QUEUE:
1204 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1205 break;
1206
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001207 default:
1208 return -ENOPROTOOPT;
1209 }
1210
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 if (len > lv)
1212 len = lv;
1213 if (copy_to_user(optval, &v, len))
1214 return -EFAULT;
1215lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001216 if (put_user(len, optlen))
1217 return -EFAULT;
1218 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219}
1220
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001221/*
1222 * Initialize an sk_lock.
1223 *
1224 * (We also register the sk_lock with the lock validator.)
1225 */
Dave Jonesb6f99a22007-03-22 12:27:49 -07001226static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001227{
Peter Zijlstraed075362006-12-06 20:35:24 -08001228 sock_lock_init_class_and_name(sk,
1229 af_family_slock_key_strings[sk->sk_family],
1230 af_family_slock_keys + sk->sk_family,
1231 af_family_key_strings[sk->sk_family],
1232 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001233}
1234
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001235/*
1236 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1237 * even temporarly, because of RCU lookups. sk_node should also be left as is.
Eric Dumazet68835ab2010-11-30 19:04:07 +00001238 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001239 */
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001240static void sock_copy(struct sock *nsk, const struct sock *osk)
1241{
1242#ifdef CONFIG_SECURITY_NETWORK
1243 void *sptr = nsk->sk_security;
1244#endif
Eric Dumazet68835ab2010-11-30 19:04:07 +00001245 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1246
1247 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1248 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1249
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001250#ifdef CONFIG_SECURITY_NETWORK
1251 nsk->sk_security = sptr;
1252 security_sk_clone(osk, nsk);
1253#endif
1254}
1255
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001256void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1257{
1258 unsigned long nulls1, nulls2;
1259
1260 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1261 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1262 if (nulls1 > nulls2)
1263 swap(nulls1, nulls2);
1264
1265 if (nulls1 != 0)
1266 memset((char *)sk, 0, nulls1);
1267 memset((char *)sk + nulls1 + sizeof(void *), 0,
1268 nulls2 - nulls1 - sizeof(void *));
1269 memset((char *)sk + nulls2 + sizeof(void *), 0,
1270 size - nulls2 - sizeof(void *));
1271}
1272EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1273
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001274static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1275 int family)
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001276{
1277 struct sock *sk;
1278 struct kmem_cache *slab;
1279
1280 slab = prot->slab;
Eric Dumazete912b112009-07-08 19:36:05 +00001281 if (slab != NULL) {
1282 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1283 if (!sk)
1284 return sk;
1285 if (priority & __GFP_ZERO) {
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001286 if (prot->clear_sk)
1287 prot->clear_sk(sk, prot->obj_size);
1288 else
1289 sk_prot_clear_nulls(sk, prot->obj_size);
Eric Dumazete912b112009-07-08 19:36:05 +00001290 }
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001291 } else
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001292 sk = kmalloc(prot->obj_size, priority);
1293
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001294 if (sk != NULL) {
Vegard Nossuma98b65a2009-02-26 14:46:57 +01001295 kmemcheck_annotate_bitfield(sk, flags);
1296
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001297 if (security_sk_alloc(sk, family, priority))
1298 goto out_free;
1299
1300 if (!try_module_get(prot->owner))
1301 goto out_free_sec;
Krishna Kumare022f0b2009-10-19 23:46:20 +00001302 sk_tx_queue_clear(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001303 }
1304
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001305 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001306
1307out_free_sec:
1308 security_sk_free(sk);
1309out_free:
1310 if (slab != NULL)
1311 kmem_cache_free(slab, sk);
1312 else
1313 kfree(sk);
1314 return NULL;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001315}
1316
1317static void sk_prot_free(struct proto *prot, struct sock *sk)
1318{
1319 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001320 struct module *owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001321
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001322 owner = prot->owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001323 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001324
1325 security_sk_free(sk);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001326 if (slab != NULL)
1327 kmem_cache_free(slab, sk);
1328 else
1329 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001330 module_put(owner);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001331}
1332
Daniel Wagner8fb974c2012-09-12 16:12:02 +02001333#if IS_ENABLED(CONFIG_NET_CLS_CGROUP)
Zefan Li211d2f972013-04-08 20:03:35 +00001334void sock_update_classid(struct sock *sk)
Herbert Xuf8451722010-05-24 00:12:34 -07001335{
Paul E. McKenney11441822010-10-06 17:15:35 -07001336 u32 classid;
Herbert Xuf8451722010-05-24 00:12:34 -07001337
Zefan Li211d2f972013-04-08 20:03:35 +00001338 classid = task_cls_classid(current);
Neil Horman3afa6d02012-08-20 07:59:10 +00001339 if (classid != sk->sk_classid)
Herbert Xuf8451722010-05-24 00:12:34 -07001340 sk->sk_classid = classid;
1341}
Herbert Xu82862742010-05-24 00:14:10 -07001342EXPORT_SYMBOL(sock_update_classid);
Daniel Wagner8fb974c2012-09-12 16:12:02 +02001343#endif
Neil Horman5bc14212011-11-22 05:10:51 +00001344
Daniel Wagner51e4e7f2012-09-12 16:12:03 +02001345#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
Zefan Li6ffd4642013-04-08 20:03:47 +00001346void sock_update_netprioidx(struct sock *sk)
Neil Horman5bc14212011-11-22 05:10:51 +00001347{
Neil Horman5bc14212011-11-22 05:10:51 +00001348 if (in_interrupt())
1349 return;
Neil Horman2b73bc62012-02-10 05:43:38 +00001350
Zefan Li6ffd4642013-04-08 20:03:47 +00001351 sk->sk_cgrp_prioidx = task_netprioidx(current);
Neil Horman5bc14212011-11-22 05:10:51 +00001352}
1353EXPORT_SYMBOL_GPL(sock_update_netprioidx);
Herbert Xuf8451722010-05-24 00:12:34 -07001354#endif
1355
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356/**
1357 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001358 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001359 * @family: protocol family
1360 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1361 * @prot: struct proto associated with this new sock instance
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001363struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001364 struct proto *prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365{
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001366 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001368 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001370 sk->sk_family = family;
1371 /*
1372 * See comment in struct sock definition to understand
1373 * why we need sk_prot_creator -acme
1374 */
1375 sk->sk_prot = sk->sk_prot_creator = prot;
1376 sock_lock_init(sk);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001377 sock_net_set(sk, get_net(net));
Jarek Poplawskid66ee052009-08-30 23:15:36 +00001378 atomic_set(&sk->sk_wmem_alloc, 1);
Herbert Xuf8451722010-05-24 00:12:34 -07001379
Zefan Li211d2f972013-04-08 20:03:35 +00001380 sock_update_classid(sk);
Zefan Li6ffd4642013-04-08 20:03:47 +00001381 sock_update_netprioidx(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 }
Frank Filza79af592005-09-27 15:23:38 -07001383
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001384 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385}
Eric Dumazet2a915252009-05-27 11:30:05 +00001386EXPORT_SYMBOL(sk_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387
Eric Dumazet2b85a342009-06-11 02:55:43 -07001388static void __sk_free(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389{
1390 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391
1392 if (sk->sk_destruct)
1393 sk->sk_destruct(sk);
1394
Paul E. McKenneya898def2010-02-22 17:04:49 -08001395 filter = rcu_dereference_check(sk->sk_filter,
1396 atomic_read(&sk->sk_wmem_alloc) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -07001398 sk_filter_uncharge(sk, filter);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001399 RCU_INIT_POINTER(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 }
1401
Eric Dumazet08e29af2011-11-28 12:04:18 +00001402 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403
1404 if (atomic_read(&sk->sk_omem_alloc))
Joe Perchese005d192012-05-16 19:58:40 +00001405 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1406 __func__, atomic_read(&sk->sk_omem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001408 if (sk->sk_peer_cred)
1409 put_cred(sk->sk_peer_cred);
1410 put_pid(sk->sk_peer_pid);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001411 put_net(sock_net(sk));
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001412 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413}
Eric Dumazet2b85a342009-06-11 02:55:43 -07001414
1415void sk_free(struct sock *sk)
1416{
1417 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001418 * We subtract one from sk_wmem_alloc and can know if
Eric Dumazet2b85a342009-06-11 02:55:43 -07001419 * some packets are still in some tx queue.
1420 * If not null, sock_wfree() will call __sk_free(sk) later
1421 */
1422 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1423 __sk_free(sk);
1424}
Eric Dumazet2a915252009-05-27 11:30:05 +00001425EXPORT_SYMBOL(sk_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
Denis V. Lunevedf02082008-02-29 11:18:32 -08001427/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001428 * Last sock_put should drop reference to sk->sk_net. It has already
1429 * been dropped in sk_change_net. Taking reference to stopping namespace
Denis V. Lunevedf02082008-02-29 11:18:32 -08001430 * is not an option.
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001431 * Take reference to a socket to remove it from hash _alive_ and after that
Denis V. Lunevedf02082008-02-29 11:18:32 -08001432 * destroy it in the context of init_net.
1433 */
1434void sk_release_kernel(struct sock *sk)
1435{
1436 if (sk == NULL || sk->sk_socket == NULL)
1437 return;
1438
1439 sock_hold(sk);
1440 sock_release(sk->sk_socket);
Denis V. Lunev65a18ec2008-04-16 01:59:46 -07001441 release_net(sock_net(sk));
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001442 sock_net_set(sk, get_net(&init_net));
Denis V. Lunevedf02082008-02-29 11:18:32 -08001443 sock_put(sk);
1444}
David S. Miller45af1752008-02-29 11:33:19 -08001445EXPORT_SYMBOL(sk_release_kernel);
Denis V. Lunevedf02082008-02-29 11:18:32 -08001446
Stephen Rothwell475f1b52012-01-09 16:33:16 +11001447static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1448{
1449 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1450 sock_update_memcg(newsk);
1451}
1452
Eric Dumazete56c57d2011-11-08 17:07:07 -05001453/**
1454 * sk_clone_lock - clone a socket, and lock its clone
1455 * @sk: the socket to clone
1456 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1457 *
1458 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1459 */
1460struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001461{
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001462 struct sock *newsk;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001463
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001464 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001465 if (newsk != NULL) {
1466 struct sk_filter *filter;
1467
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001468 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001469
1470 /* SANITY */
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001471 get_net(sock_net(newsk));
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001472 sk_node_init(&newsk->sk_node);
1473 sock_lock_init(newsk);
1474 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001475 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Zhu Yi8eae9392010-03-04 18:01:40 +00001476 newsk->sk_backlog.len = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001477
1478 atomic_set(&newsk->sk_rmem_alloc, 0);
Eric Dumazet2b85a342009-06-11 02:55:43 -07001479 /*
1480 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1481 */
1482 atomic_set(&newsk->sk_wmem_alloc, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001483 atomic_set(&newsk->sk_omem_alloc, 0);
1484 skb_queue_head_init(&newsk->sk_receive_queue);
1485 skb_queue_head_init(&newsk->sk_write_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07001486#ifdef CONFIG_NET_DMA
1487 skb_queue_head_init(&newsk->sk_async_wait_queue);
1488#endif
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001489
Eric Dumazetb6c67122010-04-08 23:03:29 +00001490 spin_lock_init(&newsk->sk_dst_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001491 rwlock_init(&newsk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001492 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1493 af_callback_keys + newsk->sk_family,
1494 af_family_clock_key_strings[newsk->sk_family]);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001495
1496 newsk->sk_dst_cache = NULL;
1497 newsk->sk_wmem_queued = 0;
1498 newsk->sk_forward_alloc = 0;
1499 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001500 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1501
1502 sock_reset_flag(newsk, SOCK_DONE);
1503 skb_queue_head_init(&newsk->sk_error_queue);
1504
Eric Dumazet0d7da9d2010-10-25 03:47:05 +00001505 filter = rcu_dereference_protected(newsk->sk_filter, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001506 if (filter != NULL)
1507 sk_filter_charge(newsk, filter);
1508
1509 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1510 /* It is still raw copy of parent, so invalidate
1511 * destructor and make plain sk_free() */
1512 newsk->sk_destruct = NULL;
Thomas Gleixnerb0691c82011-10-25 02:30:50 +00001513 bh_unlock_sock(newsk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001514 sk_free(newsk);
1515 newsk = NULL;
1516 goto out;
1517 }
1518
1519 newsk->sk_err = 0;
1520 newsk->sk_priority = 0;
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001521 /*
1522 * Before updating sk_refcnt, we must commit prior changes to memory
1523 * (Documentation/RCU/rculist_nulls.txt for details)
1524 */
1525 smp_wmb();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001526 atomic_set(&newsk->sk_refcnt, 2);
1527
1528 /*
1529 * Increment the counter in the same struct proto as the master
1530 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1531 * is the same as sk->sk_prot->socks, as this field was copied
1532 * with memcpy).
1533 *
1534 * This _changes_ the previous behaviour, where
1535 * tcp_create_openreq_child always was incrementing the
1536 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1537 * to be taken into account in all callers. -acme
1538 */
1539 sk_refcnt_debug_inc(newsk);
David S. Miller972692e2008-06-17 22:41:38 -07001540 sk_set_socket(newsk, NULL);
Eric Dumazet43815482010-04-29 11:01:49 +00001541 newsk->sk_wq = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001542
Glauber Costaf3f511e2012-01-05 20:16:39 +00001543 sk_update_clone(sk, newsk);
1544
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001545 if (newsk->sk_prot->sockets_allocated)
Glauber Costa180d8cd2011-12-11 21:47:02 +00001546 sk_sockets_allocated_inc(newsk);
Octavian Purdila704da5602010-01-08 00:00:09 -08001547
Eric Dumazet08e29af2011-11-28 12:04:18 +00001548 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
Octavian Purdila704da5602010-01-08 00:00:09 -08001549 net_enable_timestamp();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001550 }
1551out:
1552 return newsk;
1553}
Eric Dumazete56c57d2011-11-08 17:07:07 -05001554EXPORT_SYMBOL_GPL(sk_clone_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001555
Andi Kleen99580892007-04-20 17:12:43 -07001556void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1557{
1558 __sk_dst_set(sk, dst);
1559 sk->sk_route_caps = dst->dev->features;
1560 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07001561 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Eric Dumazeta4654192010-05-16 00:36:33 -07001562 sk->sk_route_caps &= ~sk->sk_route_nocaps;
Andi Kleen99580892007-04-20 17:12:43 -07001563 if (sk_can_gso(sk)) {
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001564 if (dst->header_len) {
Andi Kleen99580892007-04-20 17:12:43 -07001565 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001566 } else {
Andi Kleen99580892007-04-20 17:12:43 -07001567 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001568 sk->sk_gso_max_size = dst->dev->gso_max_size;
Ben Hutchings14853482012-07-30 16:11:42 +00001569 sk->sk_gso_max_segs = dst->dev->gso_max_segs;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001570 }
Andi Kleen99580892007-04-20 17:12:43 -07001571 }
1572}
1573EXPORT_SYMBOL_GPL(sk_setup_caps);
1574
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575/*
1576 * Simple resource managers for sockets.
1577 */
1578
1579
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001580/*
1581 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 */
1583void sock_wfree(struct sk_buff *skb)
1584{
1585 struct sock *sk = skb->sk;
Eric Dumazetd99927f2009-09-24 10:49:24 +00001586 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587
Eric Dumazetd99927f2009-09-24 10:49:24 +00001588 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1589 /*
1590 * Keep a reference on sk_wmem_alloc, this will be released
1591 * after sk_write_space() call
1592 */
1593 atomic_sub(len - 1, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 sk->sk_write_space(sk);
Eric Dumazetd99927f2009-09-24 10:49:24 +00001595 len = 1;
1596 }
Eric Dumazet2b85a342009-06-11 02:55:43 -07001597 /*
Eric Dumazetd99927f2009-09-24 10:49:24 +00001598 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1599 * could not do because of in-flight packets
Eric Dumazet2b85a342009-06-11 02:55:43 -07001600 */
Eric Dumazetd99927f2009-09-24 10:49:24 +00001601 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
Eric Dumazet2b85a342009-06-11 02:55:43 -07001602 __sk_free(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603}
Eric Dumazet2a915252009-05-27 11:30:05 +00001604EXPORT_SYMBOL(sock_wfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001606/*
1607 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 */
1609void sock_rfree(struct sk_buff *skb)
1610{
1611 struct sock *sk = skb->sk;
Eric Dumazetd361fd52010-07-10 22:45:17 +00001612 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613
Eric Dumazetd361fd52010-07-10 22:45:17 +00001614 atomic_sub(len, &sk->sk_rmem_alloc);
1615 sk_mem_uncharge(sk, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616}
Eric Dumazet2a915252009-05-27 11:30:05 +00001617EXPORT_SYMBOL(sock_rfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618
David S. Miller41063e92012-06-19 21:22:05 -07001619void sock_edemux(struct sk_buff *skb)
1620{
Eric Dumazete8123472012-09-02 23:57:18 +00001621 struct sock *sk = skb->sk;
1622
Randy Dunlap1c463e52012-09-10 09:13:07 -07001623#ifdef CONFIG_INET
Eric Dumazete8123472012-09-02 23:57:18 +00001624 if (sk->sk_state == TCP_TIME_WAIT)
1625 inet_twsk_put(inet_twsk(sk));
1626 else
Randy Dunlap1c463e52012-09-10 09:13:07 -07001627#endif
Eric Dumazete8123472012-09-02 23:57:18 +00001628 sock_put(sk);
David S. Miller41063e92012-06-19 21:22:05 -07001629}
1630EXPORT_SYMBOL(sock_edemux);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631
Eric W. Biederman976d02012012-05-23 17:16:53 -06001632kuid_t sock_i_uid(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633{
Eric W. Biederman976d02012012-05-23 17:16:53 -06001634 kuid_t uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635
Eric Dumazetf064af12010-09-22 12:43:39 +00001636 read_lock_bh(&sk->sk_callback_lock);
Eric W. Biederman976d02012012-05-23 17:16:53 -06001637 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
Eric Dumazetf064af12010-09-22 12:43:39 +00001638 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 return uid;
1640}
Eric Dumazet2a915252009-05-27 11:30:05 +00001641EXPORT_SYMBOL(sock_i_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642
1643unsigned long sock_i_ino(struct sock *sk)
1644{
1645 unsigned long ino;
1646
Eric Dumazetf064af12010-09-22 12:43:39 +00001647 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001649 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 return ino;
1651}
Eric Dumazet2a915252009-05-27 11:30:05 +00001652EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653
1654/*
1655 * Allocate a skb from the socket's send buffer.
1656 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001657struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001658 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659{
1660 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Eric Dumazet2a915252009-05-27 11:30:05 +00001661 struct sk_buff *skb = alloc_skb(size, priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 if (skb) {
1663 skb_set_owner_w(skb, sk);
1664 return skb;
1665 }
1666 }
1667 return NULL;
1668}
Eric Dumazet2a915252009-05-27 11:30:05 +00001669EXPORT_SYMBOL(sock_wmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670
1671/*
1672 * Allocate a skb from the socket's receive buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001673 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001674struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001675 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676{
1677 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1678 struct sk_buff *skb = alloc_skb(size, priority);
1679 if (skb) {
1680 skb_set_owner_r(skb, sk);
1681 return skb;
1682 }
1683 }
1684 return NULL;
1685}
1686
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001687/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001689 */
Al Virodd0fc662005-10-07 07:46:04 +01001690void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691{
Eric Dumazet95c96172012-04-15 05:58:06 +00001692 if ((unsigned int)size <= sysctl_optmem_max &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1694 void *mem;
1695 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001696 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 */
1698 atomic_add(size, &sk->sk_omem_alloc);
1699 mem = kmalloc(size, priority);
1700 if (mem)
1701 return mem;
1702 atomic_sub(size, &sk->sk_omem_alloc);
1703 }
1704 return NULL;
1705}
Eric Dumazet2a915252009-05-27 11:30:05 +00001706EXPORT_SYMBOL(sock_kmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707
1708/*
1709 * Free an option memory block.
1710 */
1711void sock_kfree_s(struct sock *sk, void *mem, int size)
1712{
1713 kfree(mem);
1714 atomic_sub(size, &sk->sk_omem_alloc);
1715}
Eric Dumazet2a915252009-05-27 11:30:05 +00001716EXPORT_SYMBOL(sock_kfree_s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717
1718/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1719 I think, these locks should be removed for datagram sockets.
1720 */
Eric Dumazet2a915252009-05-27 11:30:05 +00001721static long sock_wait_for_wmem(struct sock *sk, long timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722{
1723 DEFINE_WAIT(wait);
1724
1725 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1726 for (;;) {
1727 if (!timeo)
1728 break;
1729 if (signal_pending(current))
1730 break;
1731 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001732 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1734 break;
1735 if (sk->sk_shutdown & SEND_SHUTDOWN)
1736 break;
1737 if (sk->sk_err)
1738 break;
1739 timeo = schedule_timeout(timeo);
1740 }
Eric Dumazetaa395142010-04-20 13:03:51 +00001741 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 return timeo;
1743}
1744
1745
1746/*
1747 * Generic send/receive buffer handlers
1748 */
1749
Herbert Xu4cc7f682009-02-04 16:55:54 -08001750struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1751 unsigned long data_len, int noblock,
1752 int *errcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753{
1754 struct sk_buff *skb;
Al Viro7d877f32005-10-21 03:20:43 -04001755 gfp_t gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 long timeo;
1757 int err;
Jason Wangcc9b17a2012-05-30 21:18:10 +00001758 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1759
1760 err = -EMSGSIZE;
1761 if (npages > MAX_SKB_FRAGS)
1762 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763
1764 gfp_mask = sk->sk_allocation;
1765 if (gfp_mask & __GFP_WAIT)
1766 gfp_mask |= __GFP_REPEAT;
1767
1768 timeo = sock_sndtimeo(sk, noblock);
1769 while (1) {
1770 err = sock_error(sk);
1771 if (err != 0)
1772 goto failure;
1773
1774 err = -EPIPE;
1775 if (sk->sk_shutdown & SEND_SHUTDOWN)
1776 goto failure;
1777
1778 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Larry Woodmandb38c1792006-11-03 16:05:45 -08001779 skb = alloc_skb(header_len, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 if (skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 int i;
1782
1783 /* No pages, we're done... */
1784 if (!data_len)
1785 break;
1786
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 skb->truesize += data_len;
1788 skb_shinfo(skb)->nr_frags = npages;
1789 for (i = 0; i < npages; i++) {
1790 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791
1792 page = alloc_pages(sk->sk_allocation, 0);
1793 if (!page) {
1794 err = -ENOBUFS;
1795 skb_shinfo(skb)->nr_frags = i;
1796 kfree_skb(skb);
1797 goto failure;
1798 }
1799
Ian Campbellea2ab692011-08-22 23:44:58 +00001800 __skb_fill_page_desc(skb, i,
1801 page, 0,
1802 (data_len >= PAGE_SIZE ?
1803 PAGE_SIZE :
1804 data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 data_len -= PAGE_SIZE;
1806 }
1807
1808 /* Full success... */
1809 break;
1810 }
1811 err = -ENOBUFS;
1812 goto failure;
1813 }
1814 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1815 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1816 err = -EAGAIN;
1817 if (!timeo)
1818 goto failure;
1819 if (signal_pending(current))
1820 goto interrupted;
1821 timeo = sock_wait_for_wmem(sk, timeo);
1822 }
1823
1824 skb_set_owner_w(skb, sk);
1825 return skb;
1826
1827interrupted:
1828 err = sock_intr_errno(timeo);
1829failure:
1830 *errcode = err;
1831 return NULL;
1832}
Herbert Xu4cc7f682009-02-04 16:55:54 -08001833EXPORT_SYMBOL(sock_alloc_send_pskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001835struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 int noblock, int *errcode)
1837{
1838 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1839}
Eric Dumazet2a915252009-05-27 11:30:05 +00001840EXPORT_SYMBOL(sock_alloc_send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841
Eric Dumazet5640f762012-09-23 23:04:42 +00001842/* On 32bit arches, an skb frag is limited to 2^15 */
1843#define SKB_FRAG_PAGE_ORDER get_order(32768)
1844
1845bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1846{
1847 int order;
1848
1849 if (pfrag->page) {
1850 if (atomic_read(&pfrag->page->_count) == 1) {
1851 pfrag->offset = 0;
1852 return true;
1853 }
1854 if (pfrag->offset < pfrag->size)
1855 return true;
1856 put_page(pfrag->page);
1857 }
1858
1859 /* We restrict high order allocations to users that can afford to wait */
1860 order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
1861
1862 do {
1863 gfp_t gfp = sk->sk_allocation;
1864
1865 if (order)
Eric Dumazeta9e3d782014-02-06 10:42:42 -08001866 gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
Eric Dumazet5640f762012-09-23 23:04:42 +00001867 pfrag->page = alloc_pages(gfp, order);
1868 if (likely(pfrag->page)) {
1869 pfrag->offset = 0;
1870 pfrag->size = PAGE_SIZE << order;
1871 return true;
1872 }
1873 } while (--order >= 0);
1874
1875 sk_enter_memory_pressure(sk);
1876 sk_stream_moderate_sndbuf(sk);
1877 return false;
1878}
1879EXPORT_SYMBOL(sk_page_frag_refill);
1880
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881static void __lock_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001882 __releases(&sk->sk_lock.slock)
1883 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884{
1885 DEFINE_WAIT(wait);
1886
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001887 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1889 TASK_UNINTERRUPTIBLE);
1890 spin_unlock_bh(&sk->sk_lock.slock);
1891 schedule();
1892 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001893 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 break;
1895 }
1896 finish_wait(&sk->sk_lock.wq, &wait);
1897}
1898
1899static void __release_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001900 __releases(&sk->sk_lock.slock)
1901 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902{
1903 struct sk_buff *skb = sk->sk_backlog.head;
1904
1905 do {
1906 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1907 bh_unlock_sock(sk);
1908
1909 do {
1910 struct sk_buff *next = skb->next;
1911
Eric Dumazete4cbb022012-04-30 16:07:09 +00001912 prefetch(next);
Eric Dumazet7fee2262010-05-11 23:19:48 +00001913 WARN_ON_ONCE(skb_dst_is_noref(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 skb->next = NULL;
Peter Zijlstrac57943a2008-10-07 14:18:42 -07001915 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916
1917 /*
1918 * We are in process context here with softirqs
1919 * disabled, use cond_resched_softirq() to preempt.
1920 * This is safe to do because we've taken the backlog
1921 * queue private:
1922 */
1923 cond_resched_softirq();
1924
1925 skb = next;
1926 } while (skb != NULL);
1927
1928 bh_lock_sock(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001929 } while ((skb = sk->sk_backlog.head) != NULL);
Zhu Yi8eae9392010-03-04 18:01:40 +00001930
1931 /*
1932 * Doing the zeroing here guarantee we can not loop forever
1933 * while a wild producer attempts to flood us.
1934 */
1935 sk->sk_backlog.len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936}
1937
1938/**
1939 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001940 * @sk: sock to wait on
1941 * @timeo: for how long
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 *
1943 * Now socket state including sk->sk_err is changed only under lock,
1944 * hence we may omit checks after joining wait queue.
1945 * We check receive queue before schedule() only as optimization;
1946 * it is very likely that release_sock() added new data.
1947 */
1948int sk_wait_data(struct sock *sk, long *timeo)
1949{
1950 int rc;
1951 DEFINE_WAIT(wait);
1952
Eric Dumazetaa395142010-04-20 13:03:51 +00001953 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1955 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1956 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001957 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 return rc;
1959}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960EXPORT_SYMBOL(sk_wait_data);
1961
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001962/**
1963 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1964 * @sk: socket
1965 * @size: memory size to allocate
1966 * @kind: allocation type
1967 *
1968 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1969 * rmem allocation. This function assumes that protocols which have
1970 * memory_pressure use sk_wmem_queued as write buffer accounting.
1971 */
1972int __sk_mem_schedule(struct sock *sk, int size, int kind)
1973{
1974 struct proto *prot = sk->sk_prot;
1975 int amt = sk_mem_pages(size);
Eric Dumazet8d987e52010-11-09 23:24:26 +00001976 long allocated;
Glauber Costae1aab162011-12-11 21:47:03 +00001977 int parent_status = UNDER_LIMIT;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001978
1979 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001980
Glauber Costae1aab162011-12-11 21:47:03 +00001981 allocated = sk_memory_allocated_add(sk, amt, &parent_status);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001982
1983 /* Under limit. */
Glauber Costae1aab162011-12-11 21:47:03 +00001984 if (parent_status == UNDER_LIMIT &&
1985 allocated <= sk_prot_mem_limits(sk, 0)) {
Glauber Costa180d8cd2011-12-11 21:47:02 +00001986 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001987 return 1;
1988 }
1989
Glauber Costae1aab162011-12-11 21:47:03 +00001990 /* Under pressure. (we or our parents) */
1991 if ((parent_status > SOFT_LIMIT) ||
1992 allocated > sk_prot_mem_limits(sk, 1))
Glauber Costa180d8cd2011-12-11 21:47:02 +00001993 sk_enter_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001994
Glauber Costae1aab162011-12-11 21:47:03 +00001995 /* Over hard limit (we or our parents) */
1996 if ((parent_status == OVER_LIMIT) ||
1997 (allocated > sk_prot_mem_limits(sk, 2)))
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001998 goto suppress_allocation;
1999
2000 /* guarantee minimum buffer size under pressure */
2001 if (kind == SK_MEM_RECV) {
2002 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
2003 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002004
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002005 } else { /* SK_MEM_SEND */
2006 if (sk->sk_type == SOCK_STREAM) {
2007 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
2008 return 1;
2009 } else if (atomic_read(&sk->sk_wmem_alloc) <
2010 prot->sysctl_wmem[0])
2011 return 1;
2012 }
2013
Glauber Costa180d8cd2011-12-11 21:47:02 +00002014 if (sk_has_memory_pressure(sk)) {
Eric Dumazet17483762008-11-25 21:16:35 -08002015 int alloc;
2016
Glauber Costa180d8cd2011-12-11 21:47:02 +00002017 if (!sk_under_memory_pressure(sk))
Eric Dumazet17483762008-11-25 21:16:35 -08002018 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002019 alloc = sk_sockets_allocated_read_positive(sk);
2020 if (sk_prot_mem_limits(sk, 2) > alloc *
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002021 sk_mem_pages(sk->sk_wmem_queued +
2022 atomic_read(&sk->sk_rmem_alloc) +
2023 sk->sk_forward_alloc))
2024 return 1;
2025 }
2026
2027suppress_allocation:
2028
2029 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2030 sk_stream_moderate_sndbuf(sk);
2031
2032 /* Fail only if socket is _under_ its sndbuf.
2033 * In this case we cannot block, so that we have to fail.
2034 */
2035 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2036 return 1;
2037 }
2038
Satoru Moriya3847ce32011-06-17 12:00:03 +00002039 trace_sock_exceed_buf_limit(sk, prot, allocated);
2040
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002041 /* Alas. Undo changes. */
2042 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002043
Glauber Costa0e90b312012-01-20 04:57:16 +00002044 sk_memory_allocated_sub(sk, amt);
Glauber Costa180d8cd2011-12-11 21:47:02 +00002045
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002046 return 0;
2047}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002048EXPORT_SYMBOL(__sk_mem_schedule);
2049
2050/**
2051 * __sk_reclaim - reclaim memory_allocated
2052 * @sk: socket
2053 */
2054void __sk_mem_reclaim(struct sock *sk)
2055{
Glauber Costa180d8cd2011-12-11 21:47:02 +00002056 sk_memory_allocated_sub(sk,
Glauber Costa0e90b312012-01-20 04:57:16 +00002057 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002058 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
2059
Glauber Costa180d8cd2011-12-11 21:47:02 +00002060 if (sk_under_memory_pressure(sk) &&
2061 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2062 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002063}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002064EXPORT_SYMBOL(__sk_mem_reclaim);
2065
2066
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067/*
2068 * Set of default routines for initialising struct proto_ops when
2069 * the protocol does not support a particular function. In certain
2070 * cases where it makes no sense for a protocol to have a "do nothing"
2071 * function, some default processing is provided.
2072 */
2073
2074int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2075{
2076 return -EOPNOTSUPP;
2077}
Eric Dumazet2a915252009-05-27 11:30:05 +00002078EXPORT_SYMBOL(sock_no_bind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002080int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 int len, int flags)
2082{
2083 return -EOPNOTSUPP;
2084}
Eric Dumazet2a915252009-05-27 11:30:05 +00002085EXPORT_SYMBOL(sock_no_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086
2087int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2088{
2089 return -EOPNOTSUPP;
2090}
Eric Dumazet2a915252009-05-27 11:30:05 +00002091EXPORT_SYMBOL(sock_no_socketpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092
2093int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2094{
2095 return -EOPNOTSUPP;
2096}
Eric Dumazet2a915252009-05-27 11:30:05 +00002097EXPORT_SYMBOL(sock_no_accept);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002099int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 int *len, int peer)
2101{
2102 return -EOPNOTSUPP;
2103}
Eric Dumazet2a915252009-05-27 11:30:05 +00002104EXPORT_SYMBOL(sock_no_getname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105
Eric Dumazet2a915252009-05-27 11:30:05 +00002106unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107{
2108 return 0;
2109}
Eric Dumazet2a915252009-05-27 11:30:05 +00002110EXPORT_SYMBOL(sock_no_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111
2112int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2113{
2114 return -EOPNOTSUPP;
2115}
Eric Dumazet2a915252009-05-27 11:30:05 +00002116EXPORT_SYMBOL(sock_no_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
2118int sock_no_listen(struct socket *sock, int backlog)
2119{
2120 return -EOPNOTSUPP;
2121}
Eric Dumazet2a915252009-05-27 11:30:05 +00002122EXPORT_SYMBOL(sock_no_listen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123
2124int sock_no_shutdown(struct socket *sock, int how)
2125{
2126 return -EOPNOTSUPP;
2127}
Eric Dumazet2a915252009-05-27 11:30:05 +00002128EXPORT_SYMBOL(sock_no_shutdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129
2130int sock_no_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002131 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132{
2133 return -EOPNOTSUPP;
2134}
Eric Dumazet2a915252009-05-27 11:30:05 +00002135EXPORT_SYMBOL(sock_no_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136
2137int sock_no_getsockopt(struct socket *sock, int level, int optname,
2138 char __user *optval, int __user *optlen)
2139{
2140 return -EOPNOTSUPP;
2141}
Eric Dumazet2a915252009-05-27 11:30:05 +00002142EXPORT_SYMBOL(sock_no_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143
2144int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2145 size_t len)
2146{
2147 return -EOPNOTSUPP;
2148}
Eric Dumazet2a915252009-05-27 11:30:05 +00002149EXPORT_SYMBOL(sock_no_sendmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150
2151int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2152 size_t len, int flags)
2153{
2154 return -EOPNOTSUPP;
2155}
Eric Dumazet2a915252009-05-27 11:30:05 +00002156EXPORT_SYMBOL(sock_no_recvmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157
2158int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2159{
2160 /* Mirror missing mmap method error code */
2161 return -ENODEV;
2162}
Eric Dumazet2a915252009-05-27 11:30:05 +00002163EXPORT_SYMBOL(sock_no_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164
2165ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2166{
2167 ssize_t res;
2168 struct msghdr msg = {.msg_flags = flags};
2169 struct kvec iov;
2170 char *kaddr = kmap(page);
2171 iov.iov_base = kaddr + offset;
2172 iov.iov_len = size;
2173 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2174 kunmap(page);
2175 return res;
2176}
Eric Dumazet2a915252009-05-27 11:30:05 +00002177EXPORT_SYMBOL(sock_no_sendpage);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178
2179/*
2180 * Default Socket Callbacks
2181 */
2182
2183static void sock_def_wakeup(struct sock *sk)
2184{
Eric Dumazet43815482010-04-29 11:01:49 +00002185 struct socket_wq *wq;
2186
2187 rcu_read_lock();
2188 wq = rcu_dereference(sk->sk_wq);
2189 if (wq_has_sleeper(wq))
2190 wake_up_interruptible_all(&wq->wait);
2191 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192}
2193
2194static void sock_def_error_report(struct sock *sk)
2195{
Eric Dumazet43815482010-04-29 11:01:49 +00002196 struct socket_wq *wq;
2197
2198 rcu_read_lock();
2199 wq = rcu_dereference(sk->sk_wq);
2200 if (wq_has_sleeper(wq))
2201 wake_up_interruptible_poll(&wq->wait, POLLERR);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002202 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Eric Dumazet43815482010-04-29 11:01:49 +00002203 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204}
2205
2206static void sock_def_readable(struct sock *sk, int len)
2207{
Eric Dumazet43815482010-04-29 11:01:49 +00002208 struct socket_wq *wq;
2209
2210 rcu_read_lock();
2211 wq = rcu_dereference(sk->sk_wq);
2212 if (wq_has_sleeper(wq))
Eric Dumazet2c6607c2011-01-06 10:54:29 -08002213 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
Davide Libenzi37e55402009-03-31 15:24:21 -07002214 POLLRDNORM | POLLRDBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002215 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Eric Dumazet43815482010-04-29 11:01:49 +00002216 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217}
2218
2219static void sock_def_write_space(struct sock *sk)
2220{
Eric Dumazet43815482010-04-29 11:01:49 +00002221 struct socket_wq *wq;
2222
2223 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
2225 /* Do not wake up a writer until he can make "significant"
2226 * progress. --DaveM
2227 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002228 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
Eric Dumazet43815482010-04-29 11:01:49 +00002229 wq = rcu_dereference(sk->sk_wq);
2230 if (wq_has_sleeper(wq))
2231 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
Davide Libenzi37e55402009-03-31 15:24:21 -07002232 POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233
2234 /* Should agree with poll, otherwise some programs break */
2235 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002236 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 }
2238
Eric Dumazet43815482010-04-29 11:01:49 +00002239 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240}
2241
2242static void sock_def_destruct(struct sock *sk)
2243{
Jesper Juhla51482b2005-11-08 09:41:34 -08002244 kfree(sk->sk_protinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245}
2246
2247void sk_send_sigurg(struct sock *sk)
2248{
2249 if (sk->sk_socket && sk->sk_socket->file)
2250 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002251 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252}
Eric Dumazet2a915252009-05-27 11:30:05 +00002253EXPORT_SYMBOL(sk_send_sigurg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254
2255void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2256 unsigned long expires)
2257{
2258 if (!mod_timer(timer, expires))
2259 sock_hold(sk);
2260}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261EXPORT_SYMBOL(sk_reset_timer);
2262
2263void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2264{
Ying Xue25cc4ae2013-02-03 20:32:57 +00002265 if (del_timer(timer))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 __sock_put(sk);
2267}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268EXPORT_SYMBOL(sk_stop_timer);
2269
2270void sock_init_data(struct socket *sock, struct sock *sk)
2271{
2272 skb_queue_head_init(&sk->sk_receive_queue);
2273 skb_queue_head_init(&sk->sk_write_queue);
2274 skb_queue_head_init(&sk->sk_error_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07002275#ifdef CONFIG_NET_DMA
2276 skb_queue_head_init(&sk->sk_async_wait_queue);
2277#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278
2279 sk->sk_send_head = NULL;
2280
2281 init_timer(&sk->sk_timer);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002282
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 sk->sk_allocation = GFP_KERNEL;
2284 sk->sk_rcvbuf = sysctl_rmem_default;
2285 sk->sk_sndbuf = sysctl_wmem_default;
2286 sk->sk_state = TCP_CLOSE;
David S. Miller972692e2008-06-17 22:41:38 -07002287 sk_set_socket(sk, sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288
2289 sock_set_flag(sk, SOCK_ZAPPED);
2290
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002291 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 sk->sk_type = sock->type;
Eric Dumazet43815482010-04-29 11:01:49 +00002293 sk->sk_wq = sock->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294 sock->sk = sk;
2295 } else
Eric Dumazet43815482010-04-29 11:01:49 +00002296 sk->sk_wq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297
Eric Dumazetb6c67122010-04-08 23:03:29 +00002298 spin_lock_init(&sk->sk_dst_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 rwlock_init(&sk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07002300 lockdep_set_class_and_name(&sk->sk_callback_lock,
2301 af_callback_keys + sk->sk_family,
2302 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303
2304 sk->sk_state_change = sock_def_wakeup;
2305 sk->sk_data_ready = sock_def_readable;
2306 sk->sk_write_space = sock_def_write_space;
2307 sk->sk_error_report = sock_def_error_report;
2308 sk->sk_destruct = sock_def_destruct;
2309
Eric Dumazet5640f762012-09-23 23:04:42 +00002310 sk->sk_frag.page = NULL;
2311 sk->sk_frag.offset = 0;
Pavel Emelyanovef64a542012-02-21 07:31:34 +00002312 sk->sk_peek_off = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313
Eric W. Biederman109f6e32010-06-13 03:30:14 +00002314 sk->sk_peer_pid = NULL;
2315 sk->sk_peer_cred = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 sk->sk_write_pending = 0;
2317 sk->sk_rcvlowat = 1;
2318 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2319 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2320
Eric Dumazetf37f0af2008-04-13 21:39:26 -07002321 sk->sk_stamp = ktime_set(-1L, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322
Eric Dumazet5e25ba52013-08-27 05:46:32 -07002323 sk->sk_pacing_rate = ~0U;
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00002324 /*
2325 * Before updating sk_refcnt, we must commit prior changes to memory
2326 * (Documentation/RCU/rculist_nulls.txt for details)
2327 */
2328 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 atomic_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08002330 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331}
Eric Dumazet2a915252009-05-27 11:30:05 +00002332EXPORT_SYMBOL(sock_init_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002334void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335{
2336 might_sleep();
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002337 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02002338 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002340 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002341 spin_unlock(&sk->sk_lock.slock);
2342 /*
2343 * The sk_lock has mutex_lock() semantics here:
2344 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002345 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002346 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347}
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002348EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002350void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351{
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002352 /*
2353 * The sk_lock has mutex_unlock() semantics:
2354 */
2355 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2356
2357 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 if (sk->sk_backlog.tail)
2359 __release_sock(sk);
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002360
Eric Dumazetcbbb5a22014-03-10 09:50:11 -07002361 /* Warning : release_cb() might need to release sk ownership,
2362 * ie call sock_release_ownership(sk) before us.
2363 */
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002364 if (sk->sk_prot->release_cb)
2365 sk->sk_prot->release_cb(sk);
2366
Eric Dumazetcbbb5a22014-03-10 09:50:11 -07002367 sock_release_ownership(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002368 if (waitqueue_active(&sk->sk_lock.wq))
2369 wake_up(&sk->sk_lock.wq);
2370 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371}
2372EXPORT_SYMBOL(release_sock);
2373
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002374/**
2375 * lock_sock_fast - fast version of lock_sock
2376 * @sk: socket
2377 *
2378 * This version should be used for very small section, where process wont block
2379 * return false if fast path is taken
2380 * sk_lock.slock locked, owned = 0, BH disabled
2381 * return true if slow path is taken
2382 * sk_lock.slock unlocked, owned = 1, BH enabled
2383 */
2384bool lock_sock_fast(struct sock *sk)
2385{
2386 might_sleep();
2387 spin_lock_bh(&sk->sk_lock.slock);
2388
2389 if (!sk->sk_lock.owned)
2390 /*
2391 * Note : We must disable BH
2392 */
2393 return false;
2394
2395 __lock_sock(sk);
2396 sk->sk_lock.owned = 1;
2397 spin_unlock(&sk->sk_lock.slock);
2398 /*
2399 * The sk_lock has mutex_lock() semantics here:
2400 */
2401 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2402 local_bh_enable();
2403 return true;
2404}
2405EXPORT_SYMBOL(lock_sock_fast);
2406
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002408{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002409 struct timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002411 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002412 tv = ktime_to_timeval(sk->sk_stamp);
2413 if (tv.tv_sec == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 return -ENOENT;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002415 if (tv.tv_sec == 0) {
2416 sk->sk_stamp = ktime_get_real();
2417 tv = ktime_to_timeval(sk->sk_stamp);
2418 }
2419 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002420}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421EXPORT_SYMBOL(sock_get_timestamp);
2422
Eric Dumazetae40eb12007-03-18 17:33:16 -07002423int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2424{
2425 struct timespec ts;
2426 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002427 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetae40eb12007-03-18 17:33:16 -07002428 ts = ktime_to_timespec(sk->sk_stamp);
2429 if (ts.tv_sec == -1)
2430 return -ENOENT;
2431 if (ts.tv_sec == 0) {
2432 sk->sk_stamp = ktime_get_real();
2433 ts = ktime_to_timespec(sk->sk_stamp);
2434 }
2435 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2436}
2437EXPORT_SYMBOL(sock_get_timestampns);
2438
Patrick Ohly20d49472009-02-12 05:03:38 +00002439void sock_enable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002440{
Patrick Ohly20d49472009-02-12 05:03:38 +00002441 if (!sock_flag(sk, flag)) {
Eric Dumazet08e29af2011-11-28 12:04:18 +00002442 unsigned long previous_flags = sk->sk_flags;
2443
Patrick Ohly20d49472009-02-12 05:03:38 +00002444 sock_set_flag(sk, flag);
2445 /*
2446 * we just set one of the two flags which require net
2447 * time stamping, but time stamping might have been on
2448 * already because of the other one
2449 */
Eric Dumazet08e29af2011-11-28 12:04:18 +00002450 if (!(previous_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002451 net_enable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 }
2453}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454
2455/*
2456 * Get a socket option on an socket.
2457 *
2458 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2459 * asynchronous errors should be reported by getsockopt. We assume
2460 * this means if you specify SO_ERROR (otherwise whats the point of it).
2461 */
2462int sock_common_getsockopt(struct socket *sock, int level, int optname,
2463 char __user *optval, int __user *optlen)
2464{
2465 struct sock *sk = sock->sk;
2466
2467 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2468}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469EXPORT_SYMBOL(sock_common_getsockopt);
2470
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002471#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002472int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2473 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002474{
2475 struct sock *sk = sock->sk;
2476
Johannes Berg1e51f952007-03-06 13:44:06 -08002477 if (sk->sk_prot->compat_getsockopt != NULL)
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002478 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2479 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002480 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2481}
2482EXPORT_SYMBOL(compat_sock_common_getsockopt);
2483#endif
2484
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2486 struct msghdr *msg, size_t size, int flags)
2487{
2488 struct sock *sk = sock->sk;
2489 int addr_len = 0;
2490 int err;
2491
2492 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2493 flags & ~MSG_DONTWAIT, &addr_len);
2494 if (err >= 0)
2495 msg->msg_namelen = addr_len;
2496 return err;
2497}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498EXPORT_SYMBOL(sock_common_recvmsg);
2499
2500/*
2501 * Set socket options on an inet socket.
2502 */
2503int sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002504 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505{
2506 struct sock *sk = sock->sk;
2507
2508 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2509}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510EXPORT_SYMBOL(sock_common_setsockopt);
2511
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002512#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002513int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002514 char __user *optval, unsigned int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002515{
2516 struct sock *sk = sock->sk;
2517
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002518 if (sk->sk_prot->compat_setsockopt != NULL)
2519 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2520 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002521 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2522}
2523EXPORT_SYMBOL(compat_sock_common_setsockopt);
2524#endif
2525
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526void sk_common_release(struct sock *sk)
2527{
2528 if (sk->sk_prot->destroy)
2529 sk->sk_prot->destroy(sk);
2530
2531 /*
2532 * Observation: when sock_common_release is called, processes have
2533 * no access to socket. But net still has.
2534 * Step one, detach it from networking:
2535 *
2536 * A. Remove from hash tables.
2537 */
2538
2539 sk->sk_prot->unhash(sk);
2540
2541 /*
2542 * In this point socket cannot receive new packets, but it is possible
2543 * that some packets are in flight because some CPU runs receiver and
2544 * did hash table lookup before we unhashed socket. They will achieve
2545 * receive queue and will be purged by socket destructor.
2546 *
2547 * Also we still have packets pending on receive queue and probably,
2548 * our own packets waiting in device queues. sock_destroy will drain
2549 * receive queue, but transmitted packets will delay socket destruction
2550 * until the last reference will be released.
2551 */
2552
2553 sock_orphan(sk);
2554
2555 xfrm_sk_free_policy(sk);
2556
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07002557 sk_refcnt_debug_release(sk);
Eric Dumazet5640f762012-09-23 23:04:42 +00002558
2559 if (sk->sk_frag.page) {
2560 put_page(sk->sk_frag.page);
2561 sk->sk_frag.page = NULL;
2562 }
2563
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 sock_put(sk);
2565}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566EXPORT_SYMBOL(sk_common_release);
2567
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002568#ifdef CONFIG_PROC_FS
2569#define PROTO_INUSE_NR 64 /* should be enough for the first time */
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002570struct prot_inuse {
2571 int val[PROTO_INUSE_NR];
2572};
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002573
2574static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002575
2576#ifdef CONFIG_NET_NS
2577void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2578{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002579 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002580}
2581EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2582
2583int sock_prot_inuse_get(struct net *net, struct proto *prot)
2584{
2585 int cpu, idx = prot->inuse_idx;
2586 int res = 0;
2587
2588 for_each_possible_cpu(cpu)
2589 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2590
2591 return res >= 0 ? res : 0;
2592}
2593EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2594
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002595static int __net_init sock_inuse_init_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002596{
2597 net->core.inuse = alloc_percpu(struct prot_inuse);
2598 return net->core.inuse ? 0 : -ENOMEM;
2599}
2600
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002601static void __net_exit sock_inuse_exit_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002602{
2603 free_percpu(net->core.inuse);
2604}
2605
2606static struct pernet_operations net_inuse_ops = {
2607 .init = sock_inuse_init_net,
2608 .exit = sock_inuse_exit_net,
2609};
2610
2611static __init int net_inuse_init(void)
2612{
2613 if (register_pernet_subsys(&net_inuse_ops))
2614 panic("Cannot initialize net inuse counters");
2615
2616 return 0;
2617}
2618
2619core_initcall(net_inuse_init);
2620#else
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002621static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2622
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002623void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002624{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002625 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002626}
2627EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2628
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002629int sock_prot_inuse_get(struct net *net, struct proto *prot)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002630{
2631 int cpu, idx = prot->inuse_idx;
2632 int res = 0;
2633
2634 for_each_possible_cpu(cpu)
2635 res += per_cpu(prot_inuse, cpu).val[idx];
2636
2637 return res >= 0 ? res : 0;
2638}
2639EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002640#endif
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002641
2642static void assign_proto_idx(struct proto *prot)
2643{
2644 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2645
2646 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
Joe Perchese005d192012-05-16 19:58:40 +00002647 pr_err("PROTO_INUSE_NR exhausted\n");
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002648 return;
2649 }
2650
2651 set_bit(prot->inuse_idx, proto_inuse_idx);
2652}
2653
2654static void release_proto_idx(struct proto *prot)
2655{
2656 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2657 clear_bit(prot->inuse_idx, proto_inuse_idx);
2658}
2659#else
2660static inline void assign_proto_idx(struct proto *prot)
2661{
2662}
2663
2664static inline void release_proto_idx(struct proto *prot)
2665{
2666}
2667#endif
2668
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669int proto_register(struct proto *prot, int alloc_slab)
2670{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671 if (alloc_slab) {
2672 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
Eric Dumazet271b72c2008-10-29 02:11:14 -07002673 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2674 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675
2676 if (prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002677 pr_crit("%s: Can't create sock SLAB cache!\n",
2678 prot->name);
Pavel Emelyanov60e76632008-03-28 16:39:10 -07002679 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002681
2682 if (prot->rsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002683 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002684 if (prot->rsk_prot->slab_name == NULL)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002685 goto out_free_sock_slab;
2686
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002687 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002688 prot->rsk_prot->obj_size, 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002689 SLAB_HWCACHE_ALIGN, NULL);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002690
2691 if (prot->rsk_prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002692 pr_crit("%s: Can't create request sock SLAB cache!\n",
2693 prot->name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002694 goto out_free_request_sock_slab_name;
2695 }
2696 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002697
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002698 if (prot->twsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002699 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002700
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002701 if (prot->twsk_prot->twsk_slab_name == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002702 goto out_free_request_sock_slab;
2703
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002704 prot->twsk_prot->twsk_slab =
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002705 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002706 prot->twsk_prot->twsk_obj_size,
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002707 0,
2708 SLAB_HWCACHE_ALIGN |
2709 prot->slab_flags,
Paul Mundt20c2df82007-07-20 10:11:58 +09002710 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002711 if (prot->twsk_prot->twsk_slab == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002712 goto out_free_timewait_sock_slab_name;
2713 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714 }
2715
Glauber Costa36b77a52011-12-16 00:51:59 +00002716 mutex_lock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717 list_add(&prot->node, &proto_list);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002718 assign_proto_idx(prot);
Glauber Costa36b77a52011-12-16 00:51:59 +00002719 mutex_unlock(&proto_list_mutex);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002720 return 0;
2721
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002722out_free_timewait_sock_slab_name:
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002723 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002724out_free_request_sock_slab:
2725 if (prot->rsk_prot && prot->rsk_prot->slab) {
2726 kmem_cache_destroy(prot->rsk_prot->slab);
2727 prot->rsk_prot->slab = NULL;
2728 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002729out_free_request_sock_slab_name:
Dan Carpenter72150e92010-03-06 01:04:45 +00002730 if (prot->rsk_prot)
2731 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002732out_free_sock_slab:
2733 kmem_cache_destroy(prot->slab);
2734 prot->slab = NULL;
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002735out:
2736 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738EXPORT_SYMBOL(proto_register);
2739
2740void proto_unregister(struct proto *prot)
2741{
Glauber Costa36b77a52011-12-16 00:51:59 +00002742 mutex_lock(&proto_list_mutex);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002743 release_proto_idx(prot);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07002744 list_del(&prot->node);
Glauber Costa36b77a52011-12-16 00:51:59 +00002745 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746
2747 if (prot->slab != NULL) {
2748 kmem_cache_destroy(prot->slab);
2749 prot->slab = NULL;
2750 }
2751
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002752 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002753 kmem_cache_destroy(prot->rsk_prot->slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002754 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002755 prot->rsk_prot->slab = NULL;
2756 }
2757
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002758 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002759 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002760 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002761 prot->twsk_prot->twsk_slab = NULL;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002762 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764EXPORT_SYMBOL(proto_unregister);
2765
2766#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Glauber Costa36b77a52011-12-16 00:51:59 +00002768 __acquires(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769{
Glauber Costa36b77a52011-12-16 00:51:59 +00002770 mutex_lock(&proto_list_mutex);
Pavel Emelianov60f04382007-07-09 13:15:14 -07002771 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772}
2773
2774static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2775{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002776 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777}
2778
2779static void proto_seq_stop(struct seq_file *seq, void *v)
Glauber Costa36b77a52011-12-16 00:51:59 +00002780 __releases(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781{
Glauber Costa36b77a52011-12-16 00:51:59 +00002782 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783}
2784
2785static char proto_method_implemented(const void *method)
2786{
2787 return method == NULL ? 'n' : 'y';
2788}
Glauber Costa180d8cd2011-12-11 21:47:02 +00002789static long sock_prot_memory_allocated(struct proto *proto)
2790{
Jeffrin Josecb75a362012-04-25 19:17:29 +05302791 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002792}
2793
2794static char *sock_prot_memory_pressure(struct proto *proto)
2795{
2796 return proto->memory_pressure != NULL ?
2797 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2798}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799
2800static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2801{
Glauber Costa180d8cd2011-12-11 21:47:02 +00002802
Eric Dumazet8d987e52010-11-09 23:24:26 +00002803 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2805 proto->name,
2806 proto->obj_size,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002807 sock_prot_inuse_get(seq_file_net(seq), proto),
Glauber Costa180d8cd2011-12-11 21:47:02 +00002808 sock_prot_memory_allocated(proto),
2809 sock_prot_memory_pressure(proto),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810 proto->max_header,
2811 proto->slab == NULL ? "no" : "yes",
2812 module_name(proto->owner),
2813 proto_method_implemented(proto->close),
2814 proto_method_implemented(proto->connect),
2815 proto_method_implemented(proto->disconnect),
2816 proto_method_implemented(proto->accept),
2817 proto_method_implemented(proto->ioctl),
2818 proto_method_implemented(proto->init),
2819 proto_method_implemented(proto->destroy),
2820 proto_method_implemented(proto->shutdown),
2821 proto_method_implemented(proto->setsockopt),
2822 proto_method_implemented(proto->getsockopt),
2823 proto_method_implemented(proto->sendmsg),
2824 proto_method_implemented(proto->recvmsg),
2825 proto_method_implemented(proto->sendpage),
2826 proto_method_implemented(proto->bind),
2827 proto_method_implemented(proto->backlog_rcv),
2828 proto_method_implemented(proto->hash),
2829 proto_method_implemented(proto->unhash),
2830 proto_method_implemented(proto->get_port),
2831 proto_method_implemented(proto->enter_memory_pressure));
2832}
2833
2834static int proto_seq_show(struct seq_file *seq, void *v)
2835{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002836 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2838 "protocol",
2839 "size",
2840 "sockets",
2841 "memory",
2842 "press",
2843 "maxhdr",
2844 "slab",
2845 "module",
2846 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2847 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07002848 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 return 0;
2850}
2851
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002852static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853 .start = proto_seq_start,
2854 .next = proto_seq_next,
2855 .stop = proto_seq_stop,
2856 .show = proto_seq_show,
2857};
2858
2859static int proto_seq_open(struct inode *inode, struct file *file)
2860{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002861 return seq_open_net(inode, file, &proto_seq_ops,
2862 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863}
2864
Arjan van de Ven9a321442007-02-12 00:55:35 -08002865static const struct file_operations proto_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 .owner = THIS_MODULE,
2867 .open = proto_seq_open,
2868 .read = seq_read,
2869 .llseek = seq_lseek,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002870 .release = seq_release_net,
2871};
2872
2873static __net_init int proto_init_net(struct net *net)
2874{
Gao fengd4beaa62013-02-18 01:34:54 +00002875 if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
Eric Dumazet14e943d2008-11-19 15:14:01 -08002876 return -ENOMEM;
2877
2878 return 0;
2879}
2880
2881static __net_exit void proto_exit_net(struct net *net)
2882{
Gao fengece31ff2013-02-18 01:34:56 +00002883 remove_proc_entry("protocols", net->proc_net);
Eric Dumazet14e943d2008-11-19 15:14:01 -08002884}
2885
2886
2887static __net_initdata struct pernet_operations proto_net_ops = {
2888 .init = proto_init_net,
2889 .exit = proto_exit_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890};
2891
2892static int __init proto_init(void)
2893{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002894 return register_pernet_subsys(&proto_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895}
2896
2897subsys_initcall(proto_init);
2898
2899#endif /* PROC_FS */