blob: 0725cf0cb685787b2122606437da53299fb24621 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
Joe Perchese005d192012-05-16 19:58:40 +000092#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
Randy Dunlap4fc268d2006-01-11 12:17:47 -080094#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/errno.h>
Richard Cochrancb820f82013-07-19 19:40:09 +020096#include <linux/errqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <linux/types.h>
98#include <linux/socket.h>
99#include <linux/in.h>
100#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <linux/module.h>
102#include <linux/proc_fs.h>
103#include <linux/seq_file.h>
104#include <linux/sched.h>
105#include <linux/timer.h>
106#include <linux/string.h>
107#include <linux/sockios.h>
108#include <linux/net.h>
109#include <linux/mm.h>
110#include <linux/slab.h>
111#include <linux/interrupt.h>
112#include <linux/poll.h>
113#include <linux/tcp.h>
114#include <linux/init.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -0400115#include <linux/highmem.h>
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000116#include <linux/user_namespace.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100117#include <linux/static_key.h>
David S. Miller3969eb32012-01-09 13:44:23 -0800118#include <linux/memcontrol.h>
David S. Miller8c1ae102012-05-03 02:25:55 -0400119#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
121#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123#include <linux/netdevice.h>
124#include <net/protocol.h>
125#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200126#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700127#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128#include <net/sock.h>
Patrick Ohly20d49472009-02-12 05:03:38 +0000129#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130#include <net/xfrm.h>
131#include <linux/ipsec.h>
Herbert Xuf8451722010-05-24 00:12:34 -0700132#include <net/cls_cgroup.h>
Neil Horman5bc14212011-11-22 05:10:51 +0000133#include <net/netprio_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135#include <linux/filter.h>
136
Satoru Moriya3847ce32011-06-17 12:00:03 +0000137#include <trace/events/sock.h>
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139#ifdef CONFIG_INET
140#include <net/tcp.h>
141#endif
142
Eliezer Tamir076bb0c2013-07-10 17:13:17 +0300143#include <net/busy_poll.h>
Eliezer Tamir06021292013-06-10 11:39:50 +0300144
Glauber Costa36b77a52011-12-16 00:51:59 +0000145static DEFINE_MUTEX(proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000146static LIST_HEAD(proto_list);
147
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700148/**
149 * sk_ns_capable - General socket capability test
150 * @sk: Socket to use a capability on or through
151 * @user_ns: The user namespace of the capability to use
152 * @cap: The capability to use
153 *
154 * Test to see if the opener of the socket had when the socket was
155 * created and the current process has the capability @cap in the user
156 * namespace @user_ns.
157 */
158bool sk_ns_capable(const struct sock *sk,
159 struct user_namespace *user_ns, int cap)
160{
161 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
162 ns_capable(user_ns, cap);
163}
164EXPORT_SYMBOL(sk_ns_capable);
165
166/**
167 * sk_capable - Socket global capability test
168 * @sk: Socket to use a capability on or through
Masanari Iidae793c0f2014-09-04 23:44:36 +0900169 * @cap: The global capability to use
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700170 *
171 * Test to see if the opener of the socket had when the socket was
172 * created and the current process has the capability @cap in all user
173 * namespaces.
174 */
175bool sk_capable(const struct sock *sk, int cap)
176{
177 return sk_ns_capable(sk, &init_user_ns, cap);
178}
179EXPORT_SYMBOL(sk_capable);
180
181/**
182 * sk_net_capable - Network namespace socket capability test
183 * @sk: Socket to use a capability on or through
184 * @cap: The capability to use
185 *
Masanari Iidae793c0f2014-09-04 23:44:36 +0900186 * Test to see if the opener of the socket had when the socket was created
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700187 * and the current process has the capability @cap over the network namespace
188 * the socket is a member of.
189 */
190bool sk_net_capable(const struct sock *sk, int cap)
191{
192 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
193}
194EXPORT_SYMBOL(sk_net_capable);
195
196
Andrew Mortonc255a452012-07-31 16:43:02 -0700197#ifdef CONFIG_MEMCG_KMEM
Glauber Costa1d62e432012-04-09 19:36:33 -0300198int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000199{
200 struct proto *proto;
201 int ret = 0;
202
Glauber Costa36b77a52011-12-16 00:51:59 +0000203 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000204 list_for_each_entry(proto, &proto_list, node) {
205 if (proto->init_cgroup) {
Glauber Costa1d62e432012-04-09 19:36:33 -0300206 ret = proto->init_cgroup(memcg, ss);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000207 if (ret)
208 goto out;
209 }
210 }
211
Glauber Costa36b77a52011-12-16 00:51:59 +0000212 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000213 return ret;
214out:
215 list_for_each_entry_continue_reverse(proto, &proto_list, node)
216 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300217 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000218 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000219 return ret;
220}
221
Glauber Costa1d62e432012-04-09 19:36:33 -0300222void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000223{
224 struct proto *proto;
225
Glauber Costa36b77a52011-12-16 00:51:59 +0000226 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000227 list_for_each_entry_reverse(proto, &proto_list, node)
228 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300229 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000230 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000231}
232#endif
233
Ingo Molnarda21f242006-07-03 00:25:12 -0700234/*
235 * Each address family might have different locking rules, so we have
236 * one slock key per address family:
237 */
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700238static struct lock_class_key af_family_keys[AF_MAX];
239static struct lock_class_key af_family_slock_keys[AF_MAX];
240
stephen hemmingercbda4ea2013-02-22 07:59:10 +0000241#if defined(CONFIG_MEMCG_KMEM)
Ingo Molnarc5905af2012-02-24 08:31:31 +0100242struct static_key memcg_socket_limit_enabled;
Glauber Costae1aab162011-12-11 21:47:03 +0000243EXPORT_SYMBOL(memcg_socket_limit_enabled);
stephen hemmingercbda4ea2013-02-22 07:59:10 +0000244#endif
Glauber Costae1aab162011-12-11 21:47:03 +0000245
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700246/*
247 * Make lock validator output more readable. (we pre-construct these
248 * strings build-time, so that runtime initialization of socket
249 * locks is fast):
250 */
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700251static const char *const af_family_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700252 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
253 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
254 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
255 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
256 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
257 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
258 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800259 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700260 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800261 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700262 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700263 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800264 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
Federico Vaga456db6a2013-05-28 05:02:44 +0000265 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700266};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700267static const char *const af_family_slock_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700268 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
269 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
270 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
271 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
272 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
273 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
274 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800275 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700276 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800277 "slock-27" , "slock-28" , "slock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700278 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700279 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800280 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
Federico Vaga456db6a2013-05-28 05:02:44 +0000281 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700282};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700283static const char *const af_family_clock_key_strings[AF_MAX+1] = {
Peter Zijlstra443aef02007-07-19 01:49:00 -0700284 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
285 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
286 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
287 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
288 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
289 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
290 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800291 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
Peter Zijlstra443aef02007-07-19 01:49:00 -0700292 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
Oliver Hartkoppb4942af2008-07-23 14:06:04 -0700293 "clock-27" , "clock-28" , "clock-AF_CAN" ,
David Howellse51f8022007-07-21 19:30:16 -0700294 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700295 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800296 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
Federico Vaga456db6a2013-05-28 05:02:44 +0000297 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX"
Peter Zijlstra443aef02007-07-19 01:49:00 -0700298};
Ingo Molnarda21f242006-07-03 00:25:12 -0700299
300/*
301 * sk_callback_lock locking rules are per-address-family,
302 * so split the lock classes by using a per-AF key:
303 */
304static struct lock_class_key af_callback_keys[AF_MAX];
305
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306/* Take into consideration the size of the struct sk_buff overhead in the
307 * determination of these values, since that is non-constant across
308 * platforms. This makes socket queueing behavior and performance
309 * not depend upon such differences.
310 */
311#define _SK_MEM_PACKETS 256
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000312#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
314#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
315
316/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700317__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200318EXPORT_SYMBOL(sysctl_wmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700319__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200320EXPORT_SYMBOL(sysctl_rmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700321__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
322__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300324/* Maximal space eaten by iovec or ancillary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700325int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Eric Dumazet2a915252009-05-27 11:30:05 +0000326EXPORT_SYMBOL(sysctl_optmem_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327
Mel Gormanc93bdd02012-07-31 16:44:19 -0700328struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
329EXPORT_SYMBOL_GPL(memalloc_socks);
330
Mel Gorman7cb02402012-07-31 16:44:16 -0700331/**
332 * sk_set_memalloc - sets %SOCK_MEMALLOC
333 * @sk: socket to set it on
334 *
335 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
336 * It's the responsibility of the admin to adjust min_free_kbytes
337 * to meet the requirements
338 */
339void sk_set_memalloc(struct sock *sk)
340{
341 sock_set_flag(sk, SOCK_MEMALLOC);
342 sk->sk_allocation |= __GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700343 static_key_slow_inc(&memalloc_socks);
Mel Gorman7cb02402012-07-31 16:44:16 -0700344}
345EXPORT_SYMBOL_GPL(sk_set_memalloc);
346
347void sk_clear_memalloc(struct sock *sk)
348{
349 sock_reset_flag(sk, SOCK_MEMALLOC);
350 sk->sk_allocation &= ~__GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700351 static_key_slow_dec(&memalloc_socks);
Mel Gormanc76562b2012-07-31 16:44:41 -0700352
353 /*
354 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
355 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
356 * it has rmem allocations there is a risk that the user of the
357 * socket cannot make forward progress due to exceeding the rmem
358 * limits. By rights, sk_clear_memalloc() should only be called
359 * on sockets being torn down but warn and reset the accounting if
360 * that assumption breaks.
361 */
362 if (WARN_ON(sk->sk_forward_alloc))
363 sk_mem_reclaim(sk);
Mel Gorman7cb02402012-07-31 16:44:16 -0700364}
365EXPORT_SYMBOL_GPL(sk_clear_memalloc);
366
Mel Gormanb4b9e352012-07-31 16:44:26 -0700367int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
368{
369 int ret;
370 unsigned long pflags = current->flags;
371
372 /* these should have been dropped before queueing */
373 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
374
375 current->flags |= PF_MEMALLOC;
376 ret = sk->sk_backlog_rcv(sk, skb);
377 tsk_restore_flags(current, pflags, PF_MEMALLOC);
378
379 return ret;
380}
381EXPORT_SYMBOL(__sk_backlog_rcv);
382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
384{
385 struct timeval tv;
386
387 if (optlen < sizeof(tv))
388 return -EINVAL;
389 if (copy_from_user(&tv, optval, sizeof(tv)))
390 return -EFAULT;
Vasily Averinba780732007-05-24 16:58:54 -0700391 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
392 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
Vasily Averinba780732007-05-24 16:58:54 -0700394 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700395 static int warned __read_mostly;
396
Vasily Averinba780732007-05-24 16:58:54 -0700397 *timeo_p = 0;
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700398 if (warned < 10 && net_ratelimit()) {
Vasily Averinba780732007-05-24 16:58:54 -0700399 warned++;
Joe Perchese005d192012-05-16 19:58:40 +0000400 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
401 __func__, current->comm, task_pid_nr(current));
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700402 }
Vasily Averinba780732007-05-24 16:58:54 -0700403 return 0;
404 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 *timeo_p = MAX_SCHEDULE_TIMEOUT;
406 if (tv.tv_sec == 0 && tv.tv_usec == 0)
407 return 0;
408 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
409 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
410 return 0;
411}
412
413static void sock_warn_obsolete_bsdism(const char *name)
414{
415 static int warned;
416 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900417 if (strcmp(warncomm, current->comm) && warned < 5) {
418 strcpy(warncomm, current->comm);
Joe Perchese005d192012-05-16 19:58:40 +0000419 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
420 warncomm, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 warned++;
422 }
423}
424
Eric Dumazet08e29af2011-11-28 12:04:18 +0000425#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
426
427static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900428{
Eric Dumazet08e29af2011-11-28 12:04:18 +0000429 if (sk->sk_flags & flags) {
430 sk->sk_flags &= ~flags;
431 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +0000432 net_disable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 }
434}
435
436
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800437int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
438{
Eric Dumazet766e90372009-10-14 20:40:11 -0700439 int err;
Neil Horman3b885782009-10-12 13:26:31 -0700440 unsigned long flags;
441 struct sk_buff_head *list = &sk->sk_receive_queue;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800442
Eric Dumazet0fd7bac2011-12-21 07:11:44 +0000443 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700444 atomic_inc(&sk->sk_drops);
Satoru Moriya3847ce32011-06-17 12:00:03 +0000445 trace_sock_rcvqueue_full(sk, skb);
Eric Dumazet766e90372009-10-14 20:40:11 -0700446 return -ENOMEM;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800447 }
448
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700449 err = sk_filter(sk, skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800450 if (err)
Eric Dumazet766e90372009-10-14 20:40:11 -0700451 return err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800452
Mel Gormanc76562b2012-07-31 16:44:41 -0700453 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700454 atomic_inc(&sk->sk_drops);
455 return -ENOBUFS;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800456 }
457
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800458 skb->dev = NULL;
459 skb_set_owner_r(skb, sk);
David S. Miller49ad9592008-12-17 22:11:38 -0800460
Eric Dumazet7fee2262010-05-11 23:19:48 +0000461 /* we escape from rcu protected region, make sure we dont leak
462 * a norefcounted dst
463 */
464 skb_dst_force(skb);
465
Neil Horman3b885782009-10-12 13:26:31 -0700466 spin_lock_irqsave(&list->lock, flags);
467 skb->dropcount = atomic_read(&sk->sk_drops);
468 __skb_queue_tail(list, skb);
469 spin_unlock_irqrestore(&list->lock, flags);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800470
471 if (!sock_flag(sk, SOCK_DEAD))
David S. Miller676d2362014-04-11 16:15:36 -0400472 sk->sk_data_ready(sk);
Eric Dumazet766e90372009-10-14 20:40:11 -0700473 return 0;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800474}
475EXPORT_SYMBOL(sock_queue_rcv_skb);
476
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200477int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800478{
479 int rc = NET_RX_SUCCESS;
480
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700481 if (sk_filter(sk, skb))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800482 goto discard_and_relse;
483
484 skb->dev = NULL;
485
Sorin Dumitru274f4822014-07-22 21:16:51 +0300486 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
Eric Dumazetc3774112010-04-27 15:13:20 -0700487 atomic_inc(&sk->sk_drops);
488 goto discard_and_relse;
489 }
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200490 if (nested)
491 bh_lock_sock_nested(sk);
492 else
493 bh_lock_sock(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700494 if (!sock_owned_by_user(sk)) {
495 /*
496 * trylock + unlock semantics:
497 */
498 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
499
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700500 rc = sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700501
502 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
Eric Dumazetf545a382012-04-22 23:34:26 +0000503 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
Zhu Yi8eae9392010-03-04 18:01:40 +0000504 bh_unlock_sock(sk);
505 atomic_inc(&sk->sk_drops);
506 goto discard_and_relse;
507 }
508
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800509 bh_unlock_sock(sk);
510out:
511 sock_put(sk);
512 return rc;
513discard_and_relse:
514 kfree_skb(skb);
515 goto out;
516}
517EXPORT_SYMBOL(sk_receive_skb);
518
519struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
520{
Eric Dumazetb6c67122010-04-08 23:03:29 +0000521 struct dst_entry *dst = __sk_dst_get(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800522
523 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
Krishna Kumare022f0b2009-10-19 23:46:20 +0000524 sk_tx_queue_clear(sk);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +0000525 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800526 dst_release(dst);
527 return NULL;
528 }
529
530 return dst;
531}
532EXPORT_SYMBOL(__sk_dst_check);
533
534struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
535{
536 struct dst_entry *dst = sk_dst_get(sk);
537
538 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
539 sk_dst_reset(sk);
540 dst_release(dst);
541 return NULL;
542 }
543
544 return dst;
545}
546EXPORT_SYMBOL(sk_dst_check);
547
Brian Haleyc91f6df2012-11-26 05:21:08 +0000548static int sock_setbindtodevice(struct sock *sk, char __user *optval,
549 int optlen)
David S. Miller48788092007-09-14 16:41:03 -0700550{
551 int ret = -ENOPROTOOPT;
552#ifdef CONFIG_NETDEVICES
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900553 struct net *net = sock_net(sk);
David S. Miller48788092007-09-14 16:41:03 -0700554 char devname[IFNAMSIZ];
555 int index;
556
557 /* Sorry... */
558 ret = -EPERM;
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000559 if (!ns_capable(net->user_ns, CAP_NET_RAW))
David S. Miller48788092007-09-14 16:41:03 -0700560 goto out;
561
562 ret = -EINVAL;
563 if (optlen < 0)
564 goto out;
565
566 /* Bind this socket to a particular device like "eth0",
567 * as specified in the passed interface name. If the
568 * name is "" or the option length is zero the socket
569 * is not bound.
570 */
571 if (optlen > IFNAMSIZ - 1)
572 optlen = IFNAMSIZ - 1;
573 memset(devname, 0, sizeof(devname));
574
575 ret = -EFAULT;
576 if (copy_from_user(devname, optval, optlen))
577 goto out;
578
David S. Miller000ba2e2009-11-05 22:37:11 -0800579 index = 0;
580 if (devname[0] != '\0') {
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800581 struct net_device *dev;
David S. Miller48788092007-09-14 16:41:03 -0700582
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800583 rcu_read_lock();
584 dev = dev_get_by_name_rcu(net, devname);
585 if (dev)
586 index = dev->ifindex;
587 rcu_read_unlock();
David S. Miller48788092007-09-14 16:41:03 -0700588 ret = -ENODEV;
589 if (!dev)
590 goto out;
David S. Miller48788092007-09-14 16:41:03 -0700591 }
592
593 lock_sock(sk);
594 sk->sk_bound_dev_if = index;
595 sk_dst_reset(sk);
596 release_sock(sk);
597
598 ret = 0;
599
600out:
601#endif
602
603 return ret;
604}
605
Brian Haleyc91f6df2012-11-26 05:21:08 +0000606static int sock_getbindtodevice(struct sock *sk, char __user *optval,
607 int __user *optlen, int len)
608{
609 int ret = -ENOPROTOOPT;
610#ifdef CONFIG_NETDEVICES
611 struct net *net = sock_net(sk);
Brian Haleyc91f6df2012-11-26 05:21:08 +0000612 char devname[IFNAMSIZ];
Brian Haleyc91f6df2012-11-26 05:21:08 +0000613
614 if (sk->sk_bound_dev_if == 0) {
615 len = 0;
616 goto zero;
617 }
618
619 ret = -EINVAL;
620 if (len < IFNAMSIZ)
621 goto out;
622
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200623 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
624 if (ret)
Brian Haleyc91f6df2012-11-26 05:21:08 +0000625 goto out;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000626
627 len = strlen(devname) + 1;
628
629 ret = -EFAULT;
630 if (copy_to_user(optval, devname, len))
631 goto out;
632
633zero:
634 ret = -EFAULT;
635 if (put_user(len, optlen))
636 goto out;
637
638 ret = 0;
639
640out:
641#endif
642
643 return ret;
644}
645
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800646static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
647{
648 if (valbool)
649 sock_set_flag(sk, bit);
650 else
651 sock_reset_flag(sk, bit);
652}
653
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654/*
655 * This is meant for all protocols to use and covers goings on
656 * at the socket level. Everything here is generic.
657 */
658
659int sock_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -0700660 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661{
Eric Dumazet2a915252009-05-27 11:30:05 +0000662 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 int val;
664 int valbool;
665 struct linger ling;
666 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900667
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 /*
669 * Options without arguments
670 */
671
David S. Miller48788092007-09-14 16:41:03 -0700672 if (optname == SO_BINDTODEVICE)
Brian Haleyc91f6df2012-11-26 05:21:08 +0000673 return sock_setbindtodevice(sk, optval, optlen);
David S. Miller48788092007-09-14 16:41:03 -0700674
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700675 if (optlen < sizeof(int))
676 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 if (get_user(val, (int __user *)optval))
679 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900680
Eric Dumazet2a915252009-05-27 11:30:05 +0000681 valbool = val ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
683 lock_sock(sk);
684
Eric Dumazet2a915252009-05-27 11:30:05 +0000685 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700686 case SO_DEBUG:
Eric Dumazet2a915252009-05-27 11:30:05 +0000687 if (val && !capable(CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700688 ret = -EACCES;
Eric Dumazet2a915252009-05-27 11:30:05 +0000689 else
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800690 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700691 break;
692 case SO_REUSEADDR:
Pavel Emelyanov4a17fd52012-04-19 03:39:36 +0000693 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700694 break;
Tom Herbert055dc212013-01-22 09:49:50 +0000695 case SO_REUSEPORT:
696 sk->sk_reuseport = valbool;
697 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700698 case SO_TYPE:
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000699 case SO_PROTOCOL:
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000700 case SO_DOMAIN:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700701 case SO_ERROR:
702 ret = -ENOPROTOOPT;
703 break;
704 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800705 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700706 break;
707 case SO_BROADCAST:
708 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
709 break;
710 case SO_SNDBUF:
711 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000712 * about it this is right. Otherwise apps have to
713 * play 'guess the biggest size' games. RCVBUF/SNDBUF
714 * are treated in BSD as hints
715 */
716 val = min_t(u32, val, sysctl_wmem_max);
Patrick McHardyb0573de2005-08-09 19:30:51 -0700717set_sndbuf:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700718 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
Eric Dumazet82981932012-04-26 20:07:59 +0000719 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
720 /* Wake up sending tasks if we upped the value. */
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700721 sk->sk_write_space(sk);
722 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700724 case SO_SNDBUFFORCE:
725 if (!capable(CAP_NET_ADMIN)) {
726 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 break;
728 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700729 goto set_sndbuf;
730
731 case SO_RCVBUF:
732 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000733 * about it this is right. Otherwise apps have to
734 * play 'guess the biggest size' games. RCVBUF/SNDBUF
735 * are treated in BSD as hints
736 */
737 val = min_t(u32, val, sysctl_rmem_max);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700738set_rcvbuf:
739 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
740 /*
741 * We double it on the way in to account for
742 * "struct sk_buff" etc. overhead. Applications
743 * assume that the SO_RCVBUF setting they make will
744 * allow that much actual data to be received on that
745 * socket.
746 *
747 * Applications are unaware that "struct sk_buff" and
748 * other overheads allocate from the receive buffer
749 * during socket buffer allocation.
750 *
751 * And after considering the possible alternatives,
752 * returning the value we actually used in getsockopt
753 * is the most desirable behavior.
754 */
Eric Dumazet82981932012-04-26 20:07:59 +0000755 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700756 break;
757
758 case SO_RCVBUFFORCE:
759 if (!capable(CAP_NET_ADMIN)) {
760 ret = -EPERM;
761 break;
762 }
763 goto set_rcvbuf;
764
765 case SO_KEEPALIVE:
766#ifdef CONFIG_INET
Eric Dumazet3e109862012-09-24 07:00:11 +0000767 if (sk->sk_protocol == IPPROTO_TCP &&
768 sk->sk_type == SOCK_STREAM)
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700769 tcp_set_keepalive(sk, valbool);
770#endif
771 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
772 break;
773
774 case SO_OOBINLINE:
775 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
776 break;
777
778 case SO_NO_CHECK:
Tom Herbert28448b82014-05-23 08:47:19 -0700779 sk->sk_no_check_tx = valbool;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700780 break;
781
782 case SO_PRIORITY:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000783 if ((val >= 0 && val <= 6) ||
784 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700785 sk->sk_priority = val;
786 else
787 ret = -EPERM;
788 break;
789
790 case SO_LINGER:
791 if (optlen < sizeof(ling)) {
792 ret = -EINVAL; /* 1003.1g */
793 break;
794 }
Eric Dumazet2a915252009-05-27 11:30:05 +0000795 if (copy_from_user(&ling, optval, sizeof(ling))) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700796 ret = -EFAULT;
797 break;
798 }
799 if (!ling.l_onoff)
800 sock_reset_flag(sk, SOCK_LINGER);
801 else {
802#if (BITS_PER_LONG == 32)
803 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
804 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
805 else
806#endif
807 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
808 sock_set_flag(sk, SOCK_LINGER);
809 }
810 break;
811
812 case SO_BSDCOMPAT:
813 sock_warn_obsolete_bsdism("setsockopt");
814 break;
815
816 case SO_PASSCRED:
817 if (valbool)
818 set_bit(SOCK_PASSCRED, &sock->flags);
819 else
820 clear_bit(SOCK_PASSCRED, &sock->flags);
821 break;
822
823 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700824 case SO_TIMESTAMPNS:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700825 if (valbool) {
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700826 if (optname == SO_TIMESTAMP)
827 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
828 else
829 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700830 sock_set_flag(sk, SOCK_RCVTSTAMP);
Patrick Ohly20d49472009-02-12 05:03:38 +0000831 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700832 } else {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700833 sock_reset_flag(sk, SOCK_RCVTSTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700834 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
835 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700836 break;
837
Patrick Ohly20d49472009-02-12 05:03:38 +0000838 case SO_TIMESTAMPING:
839 if (val & ~SOF_TIMESTAMPING_MASK) {
Rémi Denis-Courmontf249fb72009-07-20 00:47:04 +0000840 ret = -EINVAL;
Patrick Ohly20d49472009-02-12 05:03:38 +0000841 break;
842 }
Willem de Bruijn09c2d252014-08-04 22:11:47 -0400843 if (val & SOF_TIMESTAMPING_OPT_ID &&
Willem de Bruijn4ed2d762014-08-04 22:11:49 -0400844 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
845 if (sk->sk_protocol == IPPROTO_TCP) {
846 if (sk->sk_state != TCP_ESTABLISHED) {
847 ret = -EINVAL;
848 break;
849 }
850 sk->sk_tskey = tcp_sk(sk)->snd_una;
851 } else {
852 sk->sk_tskey = 0;
853 }
854 }
Willem de Bruijnb9f40e22014-08-04 22:11:46 -0400855 sk->sk_tsflags = val;
Patrick Ohly20d49472009-02-12 05:03:38 +0000856 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
857 sock_enable_timestamp(sk,
858 SOCK_TIMESTAMPING_RX_SOFTWARE);
859 else
860 sock_disable_timestamp(sk,
Eric Dumazet08e29af2011-11-28 12:04:18 +0000861 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
Patrick Ohly20d49472009-02-12 05:03:38 +0000862 break;
863
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700864 case SO_RCVLOWAT:
865 if (val < 0)
866 val = INT_MAX;
867 sk->sk_rcvlowat = val ? : 1;
868 break;
869
870 case SO_RCVTIMEO:
871 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
872 break;
873
874 case SO_SNDTIMEO:
875 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
876 break;
877
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700878 case SO_ATTACH_FILTER:
879 ret = -EINVAL;
880 if (optlen == sizeof(struct sock_fprog)) {
881 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700883 ret = -EFAULT;
884 if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700887 ret = sk_attach_filter(&fprog, sk);
888 }
889 break;
890
891 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -0700892 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700893 break;
894
Vincent Bernatd59577b2013-01-16 22:55:49 +0100895 case SO_LOCK_FILTER:
896 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
897 ret = -EPERM;
898 else
899 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
900 break;
901
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700902 case SO_PASSSEC:
903 if (valbool)
904 set_bit(SOCK_PASSSEC, &sock->flags);
905 else
906 clear_bit(SOCK_PASSSEC, &sock->flags);
907 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800908 case SO_MARK:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000909 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800910 ret = -EPERM;
Eric Dumazet2a915252009-05-27 11:30:05 +0000911 else
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800912 sk->sk_mark = val;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800913 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700914
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 /* We implement the SO_SNDLOWAT etc to
916 not be settable (1003.1g 5.3) */
Neil Horman3b885782009-10-12 13:26:31 -0700917 case SO_RXQ_OVFL:
Johannes Berg8083f0f2011-10-07 03:30:20 +0000918 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
Neil Horman3b885782009-10-12 13:26:31 -0700919 break;
Johannes Berg6e3e9392011-11-09 10:15:42 +0100920
921 case SO_WIFI_STATUS:
922 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
923 break;
924
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000925 case SO_PEEK_OFF:
926 if (sock->ops->set_peek_off)
Sasha Levin12663bf2013-12-07 17:26:27 -0500927 ret = sock->ops->set_peek_off(sk, val);
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000928 else
929 ret = -EOPNOTSUPP;
930 break;
Ben Greear3bdc0eb2012-02-11 15:39:30 +0000931
932 case SO_NOFCS:
933 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
934 break;
935
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +0000936 case SO_SELECT_ERR_QUEUE:
937 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
938 break;
939
Cong Wange0d10952013-08-01 11:10:25 +0800940#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir64b0dc52013-07-10 17:13:36 +0300941 case SO_BUSY_POLL:
Eliezer Tamirdafcc432013-06-14 16:33:57 +0300942 /* allow unprivileged users to decrease the value */
943 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
944 ret = -EPERM;
945 else {
946 if (val < 0)
947 ret = -EINVAL;
948 else
949 sk->sk_ll_usec = val;
950 }
951 break;
952#endif
Eric Dumazet62748f32013-09-24 08:20:52 -0700953
954 case SO_MAX_PACING_RATE:
955 sk->sk_max_pacing_rate = val;
956 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
957 sk->sk_max_pacing_rate);
958 break;
959
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700960 default:
961 ret = -ENOPROTOOPT;
962 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900963 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 release_sock(sk);
965 return ret;
966}
Eric Dumazet2a915252009-05-27 11:30:05 +0000967EXPORT_SYMBOL(sock_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
969
stephen hemminger8f098982014-01-03 09:17:14 -0800970static void cred_to_ucred(struct pid *pid, const struct cred *cred,
971 struct ucred *ucred)
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000972{
973 ucred->pid = pid_vnr(pid);
974 ucred->uid = ucred->gid = -1;
975 if (cred) {
976 struct user_namespace *current_ns = current_user_ns();
977
Eric W. Biedermanb2e4f542012-05-23 16:39:45 -0600978 ucred->uid = from_kuid_munged(current_ns, cred->euid);
979 ucred->gid = from_kgid_munged(current_ns, cred->egid);
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000980 }
981}
982
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983int sock_getsockopt(struct socket *sock, int level, int optname,
984 char __user *optval, int __user *optlen)
985{
986 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900987
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700988 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900989 int val;
990 struct linger ling;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 struct timeval tm;
992 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900993
H Hartley Sweeten4d0392b2010-01-15 01:08:58 -0800994 int lv = sizeof(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900996
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700997 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900998 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700999 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001001
Eugene Teo50fee1d2009-02-23 15:38:41 -08001002 memset(&v, 0, sizeof(v));
Clément Lecignedf0bca02009-02-12 16:59:09 -08001003
Eric Dumazet2a915252009-05-27 11:30:05 +00001004 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001005 case SO_DEBUG:
1006 v.val = sock_flag(sk, SOCK_DBG);
1007 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001008
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001009 case SO_DONTROUTE:
1010 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1011 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001012
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001013 case SO_BROADCAST:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001014 v.val = sock_flag(sk, SOCK_BROADCAST);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001015 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001017 case SO_SNDBUF:
1018 v.val = sk->sk_sndbuf;
1019 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001020
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001021 case SO_RCVBUF:
1022 v.val = sk->sk_rcvbuf;
1023 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001025 case SO_REUSEADDR:
1026 v.val = sk->sk_reuse;
1027 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028
Tom Herbert055dc212013-01-22 09:49:50 +00001029 case SO_REUSEPORT:
1030 v.val = sk->sk_reuseport;
1031 break;
1032
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001033 case SO_KEEPALIVE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001034 v.val = sock_flag(sk, SOCK_KEEPOPEN);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001035 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001037 case SO_TYPE:
1038 v.val = sk->sk_type;
1039 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040
Jan Engelhardt49c794e2009-08-04 07:28:28 +00001041 case SO_PROTOCOL:
1042 v.val = sk->sk_protocol;
1043 break;
1044
Jan Engelhardt0d6038e2009-08-04 07:28:29 +00001045 case SO_DOMAIN:
1046 v.val = sk->sk_family;
1047 break;
1048
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001049 case SO_ERROR:
1050 v.val = -sock_error(sk);
Eric Dumazet2a915252009-05-27 11:30:05 +00001051 if (v.val == 0)
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001052 v.val = xchg(&sk->sk_err_soft, 0);
1053 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001055 case SO_OOBINLINE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001056 v.val = sock_flag(sk, SOCK_URGINLINE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001057 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001058
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001059 case SO_NO_CHECK:
Tom Herbert28448b82014-05-23 08:47:19 -07001060 v.val = sk->sk_no_check_tx;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001061 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001063 case SO_PRIORITY:
1064 v.val = sk->sk_priority;
1065 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001066
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001067 case SO_LINGER:
1068 lv = sizeof(v.ling);
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001069 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001070 v.ling.l_linger = sk->sk_lingertime / HZ;
1071 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001072
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001073 case SO_BSDCOMPAT:
1074 sock_warn_obsolete_bsdism("getsockopt");
1075 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001077 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -07001078 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1079 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1080 break;
1081
1082 case SO_TIMESTAMPNS:
1083 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001084 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
Patrick Ohly20d49472009-02-12 05:03:38 +00001086 case SO_TIMESTAMPING:
Willem de Bruijnb9f40e22014-08-04 22:11:46 -04001087 v.val = sk->sk_tsflags;
Patrick Ohly20d49472009-02-12 05:03:38 +00001088 break;
1089
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001090 case SO_RCVTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +00001091 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001092 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1093 v.tm.tv_sec = 0;
1094 v.tm.tv_usec = 0;
1095 } else {
1096 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1097 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001099 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001101 case SO_SNDTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +00001102 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001103 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1104 v.tm.tv_sec = 0;
1105 v.tm.tv_usec = 0;
1106 } else {
1107 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1108 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1109 }
1110 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001112 case SO_RCVLOWAT:
1113 v.val = sk->sk_rcvlowat;
1114 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -07001115
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001116 case SO_SNDLOWAT:
Eric Dumazet2a915252009-05-27 11:30:05 +00001117 v.val = 1;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001118 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001120 case SO_PASSCRED:
Eric Dumazet82981932012-04-26 20:07:59 +00001121 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001122 break;
1123
1124 case SO_PEERCRED:
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001125 {
1126 struct ucred peercred;
1127 if (len > sizeof(peercred))
1128 len = sizeof(peercred);
1129 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1130 if (copy_to_user(optval, &peercred, len))
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001131 return -EFAULT;
1132 goto lenout;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001133 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001134
1135 case SO_PEERNAME:
1136 {
1137 char address[128];
1138
1139 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1140 return -ENOTCONN;
1141 if (lv < len)
1142 return -EINVAL;
1143 if (copy_to_user(optval, address, len))
1144 return -EFAULT;
1145 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001147
1148 /* Dubious BSD thing... Probably nobody even uses it, but
1149 * the UNIX standard wants it for whatever reason... -DaveM
1150 */
1151 case SO_ACCEPTCONN:
1152 v.val = sk->sk_state == TCP_LISTEN;
1153 break;
1154
1155 case SO_PASSSEC:
Eric Dumazet82981932012-04-26 20:07:59 +00001156 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001157 break;
1158
1159 case SO_PEERSEC:
1160 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1161
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001162 case SO_MARK:
1163 v.val = sk->sk_mark;
1164 break;
1165
Neil Horman3b885782009-10-12 13:26:31 -07001166 case SO_RXQ_OVFL:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001167 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
Neil Horman3b885782009-10-12 13:26:31 -07001168 break;
1169
Johannes Berg6e3e9392011-11-09 10:15:42 +01001170 case SO_WIFI_STATUS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001171 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
Johannes Berg6e3e9392011-11-09 10:15:42 +01001172 break;
1173
Pavel Emelyanovef64a542012-02-21 07:31:34 +00001174 case SO_PEEK_OFF:
1175 if (!sock->ops->set_peek_off)
1176 return -EOPNOTSUPP;
1177
1178 v.val = sk->sk_peek_off;
1179 break;
David S. Millerbc2f7992012-02-24 14:48:34 -05001180 case SO_NOFCS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001181 v.val = sock_flag(sk, SOCK_NOFCS);
David S. Millerbc2f7992012-02-24 14:48:34 -05001182 break;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001183
Pavel Emelyanovf7b86bf2012-10-18 23:55:56 +00001184 case SO_BINDTODEVICE:
Brian Haleyc91f6df2012-11-26 05:21:08 +00001185 return sock_getbindtodevice(sk, optval, optlen, len);
1186
Pavel Emelyanova8fc9272012-11-01 02:01:48 +00001187 case SO_GET_FILTER:
1188 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1189 if (len < 0)
1190 return len;
1191
1192 goto lenout;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001193
Vincent Bernatd59577b2013-01-16 22:55:49 +01001194 case SO_LOCK_FILTER:
1195 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1196 break;
1197
Michal Sekletarea02f942014-01-17 17:09:45 +01001198 case SO_BPF_EXTENSIONS:
1199 v.val = bpf_tell_extensions();
1200 break;
1201
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +00001202 case SO_SELECT_ERR_QUEUE:
1203 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1204 break;
1205
Cong Wange0d10952013-08-01 11:10:25 +08001206#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir64b0dc52013-07-10 17:13:36 +03001207 case SO_BUSY_POLL:
Eliezer Tamirdafcc432013-06-14 16:33:57 +03001208 v.val = sk->sk_ll_usec;
1209 break;
1210#endif
1211
Eric Dumazet62748f32013-09-24 08:20:52 -07001212 case SO_MAX_PACING_RATE:
1213 v.val = sk->sk_max_pacing_rate;
1214 break;
1215
Eric Dumazet2c8c56e2014-11-11 05:54:28 -08001216 case SO_INCOMING_CPU:
1217 v.val = sk->sk_incoming_cpu;
1218 break;
1219
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001220 default:
1221 return -ENOPROTOOPT;
1222 }
1223
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 if (len > lv)
1225 len = lv;
1226 if (copy_to_user(optval, &v, len))
1227 return -EFAULT;
1228lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001229 if (put_user(len, optlen))
1230 return -EFAULT;
1231 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232}
1233
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001234/*
1235 * Initialize an sk_lock.
1236 *
1237 * (We also register the sk_lock with the lock validator.)
1238 */
Dave Jonesb6f99a22007-03-22 12:27:49 -07001239static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001240{
Peter Zijlstraed075362006-12-06 20:35:24 -08001241 sock_lock_init_class_and_name(sk,
1242 af_family_slock_key_strings[sk->sk_family],
1243 af_family_slock_keys + sk->sk_family,
1244 af_family_key_strings[sk->sk_family],
1245 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001246}
1247
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001248/*
1249 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1250 * even temporarly, because of RCU lookups. sk_node should also be left as is.
Eric Dumazet68835ab2010-11-30 19:04:07 +00001251 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001252 */
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001253static void sock_copy(struct sock *nsk, const struct sock *osk)
1254{
1255#ifdef CONFIG_SECURITY_NETWORK
1256 void *sptr = nsk->sk_security;
1257#endif
Eric Dumazet68835ab2010-11-30 19:04:07 +00001258 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1259
1260 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1261 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1262
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001263#ifdef CONFIG_SECURITY_NETWORK
1264 nsk->sk_security = sptr;
1265 security_sk_clone(osk, nsk);
1266#endif
1267}
1268
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001269void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1270{
1271 unsigned long nulls1, nulls2;
1272
1273 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1274 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1275 if (nulls1 > nulls2)
1276 swap(nulls1, nulls2);
1277
1278 if (nulls1 != 0)
1279 memset((char *)sk, 0, nulls1);
1280 memset((char *)sk + nulls1 + sizeof(void *), 0,
1281 nulls2 - nulls1 - sizeof(void *));
1282 memset((char *)sk + nulls2 + sizeof(void *), 0,
1283 size - nulls2 - sizeof(void *));
1284}
1285EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1286
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001287static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1288 int family)
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001289{
1290 struct sock *sk;
1291 struct kmem_cache *slab;
1292
1293 slab = prot->slab;
Eric Dumazete912b112009-07-08 19:36:05 +00001294 if (slab != NULL) {
1295 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1296 if (!sk)
1297 return sk;
1298 if (priority & __GFP_ZERO) {
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001299 if (prot->clear_sk)
1300 prot->clear_sk(sk, prot->obj_size);
1301 else
1302 sk_prot_clear_nulls(sk, prot->obj_size);
Eric Dumazete912b112009-07-08 19:36:05 +00001303 }
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001304 } else
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001305 sk = kmalloc(prot->obj_size, priority);
1306
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001307 if (sk != NULL) {
Vegard Nossuma98b65a2009-02-26 14:46:57 +01001308 kmemcheck_annotate_bitfield(sk, flags);
1309
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001310 if (security_sk_alloc(sk, family, priority))
1311 goto out_free;
1312
1313 if (!try_module_get(prot->owner))
1314 goto out_free_sec;
Krishna Kumare022f0b2009-10-19 23:46:20 +00001315 sk_tx_queue_clear(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001316 }
1317
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001318 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001319
1320out_free_sec:
1321 security_sk_free(sk);
1322out_free:
1323 if (slab != NULL)
1324 kmem_cache_free(slab, sk);
1325 else
1326 kfree(sk);
1327 return NULL;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001328}
1329
1330static void sk_prot_free(struct proto *prot, struct sock *sk)
1331{
1332 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001333 struct module *owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001334
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001335 owner = prot->owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001336 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001337
1338 security_sk_free(sk);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001339 if (slab != NULL)
1340 kmem_cache_free(slab, sk);
1341 else
1342 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001343 module_put(owner);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001344}
1345
Daniel Borkmann86f85152013-12-29 17:27:11 +01001346#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
Zefan Li6ffd4642013-04-08 20:03:47 +00001347void sock_update_netprioidx(struct sock *sk)
Neil Horman5bc14212011-11-22 05:10:51 +00001348{
Neil Horman5bc14212011-11-22 05:10:51 +00001349 if (in_interrupt())
1350 return;
Neil Horman2b73bc62012-02-10 05:43:38 +00001351
Zefan Li6ffd4642013-04-08 20:03:47 +00001352 sk->sk_cgrp_prioidx = task_netprioidx(current);
Neil Horman5bc14212011-11-22 05:10:51 +00001353}
1354EXPORT_SYMBOL_GPL(sock_update_netprioidx);
Herbert Xuf8451722010-05-24 00:12:34 -07001355#endif
1356
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357/**
1358 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001359 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001360 * @family: protocol family
1361 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1362 * @prot: struct proto associated with this new sock instance
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001364struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001365 struct proto *prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366{
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001367 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001369 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001371 sk->sk_family = family;
1372 /*
1373 * See comment in struct sock definition to understand
1374 * why we need sk_prot_creator -acme
1375 */
1376 sk->sk_prot = sk->sk_prot_creator = prot;
1377 sock_lock_init(sk);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001378 sock_net_set(sk, get_net(net));
Jarek Poplawskid66ee052009-08-30 23:15:36 +00001379 atomic_set(&sk->sk_wmem_alloc, 1);
Herbert Xuf8451722010-05-24 00:12:34 -07001380
Zefan Li211d2f972013-04-08 20:03:35 +00001381 sock_update_classid(sk);
Zefan Li6ffd4642013-04-08 20:03:47 +00001382 sock_update_netprioidx(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 }
Frank Filza79af592005-09-27 15:23:38 -07001384
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001385 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386}
Eric Dumazet2a915252009-05-27 11:30:05 +00001387EXPORT_SYMBOL(sk_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388
Eric Dumazet2b85a342009-06-11 02:55:43 -07001389static void __sk_free(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390{
1391 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392
1393 if (sk->sk_destruct)
1394 sk->sk_destruct(sk);
1395
Paul E. McKenneya898def2010-02-22 17:04:49 -08001396 filter = rcu_dereference_check(sk->sk_filter,
1397 atomic_read(&sk->sk_wmem_alloc) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -07001399 sk_filter_uncharge(sk, filter);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001400 RCU_INIT_POINTER(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 }
1402
Eric Dumazet08e29af2011-11-28 12:04:18 +00001403 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404
1405 if (atomic_read(&sk->sk_omem_alloc))
Joe Perchese005d192012-05-16 19:58:40 +00001406 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1407 __func__, atomic_read(&sk->sk_omem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001409 if (sk->sk_peer_cred)
1410 put_cred(sk->sk_peer_cred);
1411 put_pid(sk->sk_peer_pid);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001412 put_net(sock_net(sk));
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001413 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414}
Eric Dumazet2b85a342009-06-11 02:55:43 -07001415
1416void sk_free(struct sock *sk)
1417{
1418 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001419 * We subtract one from sk_wmem_alloc and can know if
Eric Dumazet2b85a342009-06-11 02:55:43 -07001420 * some packets are still in some tx queue.
1421 * If not null, sock_wfree() will call __sk_free(sk) later
1422 */
1423 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1424 __sk_free(sk);
1425}
Eric Dumazet2a915252009-05-27 11:30:05 +00001426EXPORT_SYMBOL(sk_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427
Denis V. Lunevedf02082008-02-29 11:18:32 -08001428/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001429 * Last sock_put should drop reference to sk->sk_net. It has already
1430 * been dropped in sk_change_net. Taking reference to stopping namespace
Denis V. Lunevedf02082008-02-29 11:18:32 -08001431 * is not an option.
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001432 * Take reference to a socket to remove it from hash _alive_ and after that
Denis V. Lunevedf02082008-02-29 11:18:32 -08001433 * destroy it in the context of init_net.
1434 */
1435void sk_release_kernel(struct sock *sk)
1436{
1437 if (sk == NULL || sk->sk_socket == NULL)
1438 return;
1439
1440 sock_hold(sk);
1441 sock_release(sk->sk_socket);
Denis V. Lunev65a18ec2008-04-16 01:59:46 -07001442 release_net(sock_net(sk));
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001443 sock_net_set(sk, get_net(&init_net));
Denis V. Lunevedf02082008-02-29 11:18:32 -08001444 sock_put(sk);
1445}
David S. Miller45af1752008-02-29 11:33:19 -08001446EXPORT_SYMBOL(sk_release_kernel);
Denis V. Lunevedf02082008-02-29 11:18:32 -08001447
Stephen Rothwell475f1b52012-01-09 16:33:16 +11001448static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1449{
1450 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1451 sock_update_memcg(newsk);
1452}
1453
Eric Dumazete56c57d2011-11-08 17:07:07 -05001454/**
1455 * sk_clone_lock - clone a socket, and lock its clone
1456 * @sk: the socket to clone
1457 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1458 *
1459 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1460 */
1461struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001462{
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001463 struct sock *newsk;
Alexei Starovoitov278571b2014-07-30 20:34:12 -07001464 bool is_charged = true;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001465
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001466 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001467 if (newsk != NULL) {
1468 struct sk_filter *filter;
1469
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001470 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001471
1472 /* SANITY */
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001473 get_net(sock_net(newsk));
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001474 sk_node_init(&newsk->sk_node);
1475 sock_lock_init(newsk);
1476 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001477 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Zhu Yi8eae9392010-03-04 18:01:40 +00001478 newsk->sk_backlog.len = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001479
1480 atomic_set(&newsk->sk_rmem_alloc, 0);
Eric Dumazet2b85a342009-06-11 02:55:43 -07001481 /*
1482 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1483 */
1484 atomic_set(&newsk->sk_wmem_alloc, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001485 atomic_set(&newsk->sk_omem_alloc, 0);
1486 skb_queue_head_init(&newsk->sk_receive_queue);
1487 skb_queue_head_init(&newsk->sk_write_queue);
1488
Eric Dumazetb6c67122010-04-08 23:03:29 +00001489 spin_lock_init(&newsk->sk_dst_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001490 rwlock_init(&newsk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001491 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1492 af_callback_keys + newsk->sk_family,
1493 af_family_clock_key_strings[newsk->sk_family]);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001494
1495 newsk->sk_dst_cache = NULL;
1496 newsk->sk_wmem_queued = 0;
1497 newsk->sk_forward_alloc = 0;
1498 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001499 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1500
1501 sock_reset_flag(newsk, SOCK_DONE);
1502 skb_queue_head_init(&newsk->sk_error_queue);
1503
Eric Dumazet0d7da9d2010-10-25 03:47:05 +00001504 filter = rcu_dereference_protected(newsk->sk_filter, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001505 if (filter != NULL)
Alexei Starovoitov278571b2014-07-30 20:34:12 -07001506 /* though it's an empty new sock, the charging may fail
1507 * if sysctl_optmem_max was changed between creation of
1508 * original socket and cloning
1509 */
1510 is_charged = sk_filter_charge(newsk, filter);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001511
Alexei Starovoitov278571b2014-07-30 20:34:12 -07001512 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) {
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001513 /* It is still raw copy of parent, so invalidate
1514 * destructor and make plain sk_free() */
1515 newsk->sk_destruct = NULL;
Thomas Gleixnerb0691c82011-10-25 02:30:50 +00001516 bh_unlock_sock(newsk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001517 sk_free(newsk);
1518 newsk = NULL;
1519 goto out;
1520 }
1521
1522 newsk->sk_err = 0;
1523 newsk->sk_priority = 0;
Eric Dumazet2c8c56e2014-11-11 05:54:28 -08001524 newsk->sk_incoming_cpu = raw_smp_processor_id();
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001525 /*
1526 * Before updating sk_refcnt, we must commit prior changes to memory
1527 * (Documentation/RCU/rculist_nulls.txt for details)
1528 */
1529 smp_wmb();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001530 atomic_set(&newsk->sk_refcnt, 2);
1531
1532 /*
1533 * Increment the counter in the same struct proto as the master
1534 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1535 * is the same as sk->sk_prot->socks, as this field was copied
1536 * with memcpy).
1537 *
1538 * This _changes_ the previous behaviour, where
1539 * tcp_create_openreq_child always was incrementing the
1540 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1541 * to be taken into account in all callers. -acme
1542 */
1543 sk_refcnt_debug_inc(newsk);
David S. Miller972692e2008-06-17 22:41:38 -07001544 sk_set_socket(newsk, NULL);
Eric Dumazet43815482010-04-29 11:01:49 +00001545 newsk->sk_wq = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001546
Glauber Costaf3f511e2012-01-05 20:16:39 +00001547 sk_update_clone(sk, newsk);
1548
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001549 if (newsk->sk_prot->sockets_allocated)
Glauber Costa180d8cd2011-12-11 21:47:02 +00001550 sk_sockets_allocated_inc(newsk);
Octavian Purdila704da5602010-01-08 00:00:09 -08001551
Eric Dumazet08e29af2011-11-28 12:04:18 +00001552 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
Octavian Purdila704da5602010-01-08 00:00:09 -08001553 net_enable_timestamp();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001554 }
1555out:
1556 return newsk;
1557}
Eric Dumazete56c57d2011-11-08 17:07:07 -05001558EXPORT_SYMBOL_GPL(sk_clone_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001559
Andi Kleen99580892007-04-20 17:12:43 -07001560void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1561{
1562 __sk_dst_set(sk, dst);
1563 sk->sk_route_caps = dst->dev->features;
1564 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07001565 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Eric Dumazeta4654192010-05-16 00:36:33 -07001566 sk->sk_route_caps &= ~sk->sk_route_nocaps;
Andi Kleen99580892007-04-20 17:12:43 -07001567 if (sk_can_gso(sk)) {
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001568 if (dst->header_len) {
Andi Kleen99580892007-04-20 17:12:43 -07001569 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001570 } else {
Andi Kleen99580892007-04-20 17:12:43 -07001571 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001572 sk->sk_gso_max_size = dst->dev->gso_max_size;
Ben Hutchings14853482012-07-30 16:11:42 +00001573 sk->sk_gso_max_segs = dst->dev->gso_max_segs;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001574 }
Andi Kleen99580892007-04-20 17:12:43 -07001575 }
1576}
1577EXPORT_SYMBOL_GPL(sk_setup_caps);
1578
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579/*
1580 * Simple resource managers for sockets.
1581 */
1582
1583
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001584/*
1585 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 */
1587void sock_wfree(struct sk_buff *skb)
1588{
1589 struct sock *sk = skb->sk;
Eric Dumazetd99927f2009-09-24 10:49:24 +00001590 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591
Eric Dumazetd99927f2009-09-24 10:49:24 +00001592 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1593 /*
1594 * Keep a reference on sk_wmem_alloc, this will be released
1595 * after sk_write_space() call
1596 */
1597 atomic_sub(len - 1, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 sk->sk_write_space(sk);
Eric Dumazetd99927f2009-09-24 10:49:24 +00001599 len = 1;
1600 }
Eric Dumazet2b85a342009-06-11 02:55:43 -07001601 /*
Eric Dumazetd99927f2009-09-24 10:49:24 +00001602 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1603 * could not do because of in-flight packets
Eric Dumazet2b85a342009-06-11 02:55:43 -07001604 */
Eric Dumazetd99927f2009-09-24 10:49:24 +00001605 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
Eric Dumazet2b85a342009-06-11 02:55:43 -07001606 __sk_free(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607}
Eric Dumazet2a915252009-05-27 11:30:05 +00001608EXPORT_SYMBOL(sock_wfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609
Eric Dumazetf2f872f2013-07-30 17:55:08 -07001610void skb_orphan_partial(struct sk_buff *skb)
1611{
1612 /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1613 * so we do not completely orphan skb, but transfert all
1614 * accounted bytes but one, to avoid unexpected reorders.
1615 */
1616 if (skb->destructor == sock_wfree
1617#ifdef CONFIG_INET
1618 || skb->destructor == tcp_wfree
1619#endif
1620 ) {
1621 atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
1622 skb->truesize = 1;
1623 } else {
1624 skb_orphan(skb);
1625 }
1626}
1627EXPORT_SYMBOL(skb_orphan_partial);
1628
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001629/*
1630 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 */
1632void sock_rfree(struct sk_buff *skb)
1633{
1634 struct sock *sk = skb->sk;
Eric Dumazetd361fd52010-07-10 22:45:17 +00001635 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636
Eric Dumazetd361fd52010-07-10 22:45:17 +00001637 atomic_sub(len, &sk->sk_rmem_alloc);
1638 sk_mem_uncharge(sk, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639}
Eric Dumazet2a915252009-05-27 11:30:05 +00001640EXPORT_SYMBOL(sock_rfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641
Alexander Duyck62bccb82014-09-04 13:31:35 -04001642void sock_efree(struct sk_buff *skb)
1643{
1644 sock_put(skb->sk);
1645}
1646EXPORT_SYMBOL(sock_efree);
1647
Alexander Duyck82eabd92014-09-04 13:32:11 -04001648#ifdef CONFIG_INET
David S. Miller41063e92012-06-19 21:22:05 -07001649void sock_edemux(struct sk_buff *skb)
1650{
Eric Dumazete8123472012-09-02 23:57:18 +00001651 struct sock *sk = skb->sk;
1652
1653 if (sk->sk_state == TCP_TIME_WAIT)
1654 inet_twsk_put(inet_twsk(sk));
1655 else
1656 sock_put(sk);
David S. Miller41063e92012-06-19 21:22:05 -07001657}
1658EXPORT_SYMBOL(sock_edemux);
Alexander Duyck82eabd92014-09-04 13:32:11 -04001659#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660
Eric W. Biederman976d02012012-05-23 17:16:53 -06001661kuid_t sock_i_uid(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662{
Eric W. Biederman976d02012012-05-23 17:16:53 -06001663 kuid_t uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664
Eric Dumazetf064af12010-09-22 12:43:39 +00001665 read_lock_bh(&sk->sk_callback_lock);
Eric W. Biederman976d02012012-05-23 17:16:53 -06001666 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
Eric Dumazetf064af12010-09-22 12:43:39 +00001667 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 return uid;
1669}
Eric Dumazet2a915252009-05-27 11:30:05 +00001670EXPORT_SYMBOL(sock_i_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671
1672unsigned long sock_i_ino(struct sock *sk)
1673{
1674 unsigned long ino;
1675
Eric Dumazetf064af12010-09-22 12:43:39 +00001676 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001678 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 return ino;
1680}
Eric Dumazet2a915252009-05-27 11:30:05 +00001681EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682
1683/*
1684 * Allocate a skb from the socket's send buffer.
1685 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001686struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001687 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688{
1689 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Eric Dumazet2a915252009-05-27 11:30:05 +00001690 struct sk_buff *skb = alloc_skb(size, priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 if (skb) {
1692 skb_set_owner_w(skb, sk);
1693 return skb;
1694 }
1695 }
1696 return NULL;
1697}
Eric Dumazet2a915252009-05-27 11:30:05 +00001698EXPORT_SYMBOL(sock_wmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699
1700/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001702 */
Al Virodd0fc662005-10-07 07:46:04 +01001703void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704{
Eric Dumazet95c96172012-04-15 05:58:06 +00001705 if ((unsigned int)size <= sysctl_optmem_max &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1707 void *mem;
1708 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001709 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 */
1711 atomic_add(size, &sk->sk_omem_alloc);
1712 mem = kmalloc(size, priority);
1713 if (mem)
1714 return mem;
1715 atomic_sub(size, &sk->sk_omem_alloc);
1716 }
1717 return NULL;
1718}
Eric Dumazet2a915252009-05-27 11:30:05 +00001719EXPORT_SYMBOL(sock_kmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720
1721/*
1722 * Free an option memory block.
1723 */
1724void sock_kfree_s(struct sock *sk, void *mem, int size)
1725{
David S. Millere53da5f2014-10-14 17:02:37 -04001726 if (WARN_ON_ONCE(!mem))
1727 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 kfree(mem);
1729 atomic_sub(size, &sk->sk_omem_alloc);
1730}
Eric Dumazet2a915252009-05-27 11:30:05 +00001731EXPORT_SYMBOL(sock_kfree_s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732
1733/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1734 I think, these locks should be removed for datagram sockets.
1735 */
Eric Dumazet2a915252009-05-27 11:30:05 +00001736static long sock_wait_for_wmem(struct sock *sk, long timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737{
1738 DEFINE_WAIT(wait);
1739
1740 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1741 for (;;) {
1742 if (!timeo)
1743 break;
1744 if (signal_pending(current))
1745 break;
1746 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001747 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1749 break;
1750 if (sk->sk_shutdown & SEND_SHUTDOWN)
1751 break;
1752 if (sk->sk_err)
1753 break;
1754 timeo = schedule_timeout(timeo);
1755 }
Eric Dumazetaa395142010-04-20 13:03:51 +00001756 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 return timeo;
1758}
1759
1760
1761/*
1762 * Generic send/receive buffer handlers
1763 */
1764
Herbert Xu4cc7f682009-02-04 16:55:54 -08001765struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1766 unsigned long data_len, int noblock,
Eric Dumazet28d64272013-08-08 14:38:47 -07001767 int *errcode, int max_page_order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768{
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001769 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 long timeo;
1771 int err;
1772
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 timeo = sock_sndtimeo(sk, noblock);
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001774 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 err = sock_error(sk);
1776 if (err != 0)
1777 goto failure;
1778
1779 err = -EPIPE;
1780 if (sk->sk_shutdown & SEND_SHUTDOWN)
1781 goto failure;
1782
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001783 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
1784 break;
Eric Dumazet28d64272013-08-08 14:38:47 -07001785
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001786 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1787 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1788 err = -EAGAIN;
1789 if (!timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 goto failure;
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001791 if (signal_pending(current))
1792 goto interrupted;
1793 timeo = sock_wait_for_wmem(sk, timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 }
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001795 skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
1796 errcode, sk->sk_allocation);
1797 if (skb)
1798 skb_set_owner_w(skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 return skb;
1800
1801interrupted:
1802 err = sock_intr_errno(timeo);
1803failure:
1804 *errcode = err;
1805 return NULL;
1806}
Herbert Xu4cc7f682009-02-04 16:55:54 -08001807EXPORT_SYMBOL(sock_alloc_send_pskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001809struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 int noblock, int *errcode)
1811{
Eric Dumazet28d64272013-08-08 14:38:47 -07001812 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813}
Eric Dumazet2a915252009-05-27 11:30:05 +00001814EXPORT_SYMBOL(sock_alloc_send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815
Eric Dumazet5640f762012-09-23 23:04:42 +00001816/* On 32bit arches, an skb frag is limited to 2^15 */
1817#define SKB_FRAG_PAGE_ORDER get_order(32768)
1818
Eric Dumazet400dfd32013-10-17 16:27:07 -07001819/**
1820 * skb_page_frag_refill - check that a page_frag contains enough room
1821 * @sz: minimum size of the fragment we want to get
1822 * @pfrag: pointer to page_frag
Eric Dumazet82d5e2b2014-09-08 04:00:00 -07001823 * @gfp: priority for memory allocation
Eric Dumazet400dfd32013-10-17 16:27:07 -07001824 *
1825 * Note: While this allocator tries to use high order pages, there is
1826 * no guarantee that allocations succeed. Therefore, @sz MUST be
1827 * less or equal than PAGE_SIZE.
1828 */
Eric Dumazetd9b29382014-08-27 20:49:34 -07001829bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
Eric Dumazet5640f762012-09-23 23:04:42 +00001830{
Eric Dumazet5640f762012-09-23 23:04:42 +00001831 if (pfrag->page) {
1832 if (atomic_read(&pfrag->page->_count) == 1) {
1833 pfrag->offset = 0;
1834 return true;
1835 }
Eric Dumazet400dfd32013-10-17 16:27:07 -07001836 if (pfrag->offset + sz <= pfrag->size)
Eric Dumazet5640f762012-09-23 23:04:42 +00001837 return true;
1838 put_page(pfrag->page);
1839 }
1840
Eric Dumazetd9b29382014-08-27 20:49:34 -07001841 pfrag->offset = 0;
1842 if (SKB_FRAG_PAGE_ORDER) {
1843 pfrag->page = alloc_pages(gfp | __GFP_COMP |
1844 __GFP_NOWARN | __GFP_NORETRY,
1845 SKB_FRAG_PAGE_ORDER);
Eric Dumazet5640f762012-09-23 23:04:42 +00001846 if (likely(pfrag->page)) {
Eric Dumazetd9b29382014-08-27 20:49:34 -07001847 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
Eric Dumazet5640f762012-09-23 23:04:42 +00001848 return true;
1849 }
Eric Dumazetd9b29382014-08-27 20:49:34 -07001850 }
1851 pfrag->page = alloc_page(gfp);
1852 if (likely(pfrag->page)) {
1853 pfrag->size = PAGE_SIZE;
1854 return true;
1855 }
Eric Dumazet400dfd32013-10-17 16:27:07 -07001856 return false;
1857}
1858EXPORT_SYMBOL(skb_page_frag_refill);
1859
1860bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1861{
1862 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
1863 return true;
1864
Eric Dumazet5640f762012-09-23 23:04:42 +00001865 sk_enter_memory_pressure(sk);
1866 sk_stream_moderate_sndbuf(sk);
1867 return false;
1868}
1869EXPORT_SYMBOL(sk_page_frag_refill);
1870
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871static void __lock_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001872 __releases(&sk->sk_lock.slock)
1873 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874{
1875 DEFINE_WAIT(wait);
1876
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001877 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1879 TASK_UNINTERRUPTIBLE);
1880 spin_unlock_bh(&sk->sk_lock.slock);
1881 schedule();
1882 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001883 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 break;
1885 }
1886 finish_wait(&sk->sk_lock.wq, &wait);
1887}
1888
1889static void __release_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001890 __releases(&sk->sk_lock.slock)
1891 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892{
1893 struct sk_buff *skb = sk->sk_backlog.head;
1894
1895 do {
1896 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1897 bh_unlock_sock(sk);
1898
1899 do {
1900 struct sk_buff *next = skb->next;
1901
Eric Dumazete4cbb022012-04-30 16:07:09 +00001902 prefetch(next);
Eric Dumazet7fee2262010-05-11 23:19:48 +00001903 WARN_ON_ONCE(skb_dst_is_noref(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 skb->next = NULL;
Peter Zijlstrac57943a2008-10-07 14:18:42 -07001905 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906
1907 /*
1908 * We are in process context here with softirqs
1909 * disabled, use cond_resched_softirq() to preempt.
1910 * This is safe to do because we've taken the backlog
1911 * queue private:
1912 */
1913 cond_resched_softirq();
1914
1915 skb = next;
1916 } while (skb != NULL);
1917
1918 bh_lock_sock(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001919 } while ((skb = sk->sk_backlog.head) != NULL);
Zhu Yi8eae9392010-03-04 18:01:40 +00001920
1921 /*
1922 * Doing the zeroing here guarantee we can not loop forever
1923 * while a wild producer attempts to flood us.
1924 */
1925 sk->sk_backlog.len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926}
1927
1928/**
1929 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001930 * @sk: sock to wait on
1931 * @timeo: for how long
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 *
1933 * Now socket state including sk->sk_err is changed only under lock,
1934 * hence we may omit checks after joining wait queue.
1935 * We check receive queue before schedule() only as optimization;
1936 * it is very likely that release_sock() added new data.
1937 */
1938int sk_wait_data(struct sock *sk, long *timeo)
1939{
1940 int rc;
1941 DEFINE_WAIT(wait);
1942
Eric Dumazetaa395142010-04-20 13:03:51 +00001943 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1945 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1946 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001947 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 return rc;
1949}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950EXPORT_SYMBOL(sk_wait_data);
1951
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001952/**
1953 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1954 * @sk: socket
1955 * @size: memory size to allocate
1956 * @kind: allocation type
1957 *
1958 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1959 * rmem allocation. This function assumes that protocols which have
1960 * memory_pressure use sk_wmem_queued as write buffer accounting.
1961 */
1962int __sk_mem_schedule(struct sock *sk, int size, int kind)
1963{
1964 struct proto *prot = sk->sk_prot;
1965 int amt = sk_mem_pages(size);
Eric Dumazet8d987e52010-11-09 23:24:26 +00001966 long allocated;
Glauber Costae1aab162011-12-11 21:47:03 +00001967 int parent_status = UNDER_LIMIT;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001968
1969 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001970
Glauber Costae1aab162011-12-11 21:47:03 +00001971 allocated = sk_memory_allocated_add(sk, amt, &parent_status);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001972
1973 /* Under limit. */
Glauber Costae1aab162011-12-11 21:47:03 +00001974 if (parent_status == UNDER_LIMIT &&
1975 allocated <= sk_prot_mem_limits(sk, 0)) {
Glauber Costa180d8cd2011-12-11 21:47:02 +00001976 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001977 return 1;
1978 }
1979
Glauber Costae1aab162011-12-11 21:47:03 +00001980 /* Under pressure. (we or our parents) */
1981 if ((parent_status > SOFT_LIMIT) ||
1982 allocated > sk_prot_mem_limits(sk, 1))
Glauber Costa180d8cd2011-12-11 21:47:02 +00001983 sk_enter_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001984
Glauber Costae1aab162011-12-11 21:47:03 +00001985 /* Over hard limit (we or our parents) */
1986 if ((parent_status == OVER_LIMIT) ||
1987 (allocated > sk_prot_mem_limits(sk, 2)))
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001988 goto suppress_allocation;
1989
1990 /* guarantee minimum buffer size under pressure */
1991 if (kind == SK_MEM_RECV) {
1992 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1993 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001994
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001995 } else { /* SK_MEM_SEND */
1996 if (sk->sk_type == SOCK_STREAM) {
1997 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1998 return 1;
1999 } else if (atomic_read(&sk->sk_wmem_alloc) <
2000 prot->sysctl_wmem[0])
2001 return 1;
2002 }
2003
Glauber Costa180d8cd2011-12-11 21:47:02 +00002004 if (sk_has_memory_pressure(sk)) {
Eric Dumazet17483762008-11-25 21:16:35 -08002005 int alloc;
2006
Glauber Costa180d8cd2011-12-11 21:47:02 +00002007 if (!sk_under_memory_pressure(sk))
Eric Dumazet17483762008-11-25 21:16:35 -08002008 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002009 alloc = sk_sockets_allocated_read_positive(sk);
2010 if (sk_prot_mem_limits(sk, 2) > alloc *
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002011 sk_mem_pages(sk->sk_wmem_queued +
2012 atomic_read(&sk->sk_rmem_alloc) +
2013 sk->sk_forward_alloc))
2014 return 1;
2015 }
2016
2017suppress_allocation:
2018
2019 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2020 sk_stream_moderate_sndbuf(sk);
2021
2022 /* Fail only if socket is _under_ its sndbuf.
2023 * In this case we cannot block, so that we have to fail.
2024 */
2025 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2026 return 1;
2027 }
2028
Satoru Moriya3847ce32011-06-17 12:00:03 +00002029 trace_sock_exceed_buf_limit(sk, prot, allocated);
2030
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002031 /* Alas. Undo changes. */
2032 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002033
Glauber Costa0e90b312012-01-20 04:57:16 +00002034 sk_memory_allocated_sub(sk, amt);
Glauber Costa180d8cd2011-12-11 21:47:02 +00002035
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002036 return 0;
2037}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002038EXPORT_SYMBOL(__sk_mem_schedule);
2039
2040/**
2041 * __sk_reclaim - reclaim memory_allocated
2042 * @sk: socket
2043 */
2044void __sk_mem_reclaim(struct sock *sk)
2045{
Glauber Costa180d8cd2011-12-11 21:47:02 +00002046 sk_memory_allocated_sub(sk,
Glauber Costa0e90b312012-01-20 04:57:16 +00002047 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002048 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
2049
Glauber Costa180d8cd2011-12-11 21:47:02 +00002050 if (sk_under_memory_pressure(sk) &&
2051 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2052 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002053}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002054EXPORT_SYMBOL(__sk_mem_reclaim);
2055
2056
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057/*
2058 * Set of default routines for initialising struct proto_ops when
2059 * the protocol does not support a particular function. In certain
2060 * cases where it makes no sense for a protocol to have a "do nothing"
2061 * function, some default processing is provided.
2062 */
2063
2064int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2065{
2066 return -EOPNOTSUPP;
2067}
Eric Dumazet2a915252009-05-27 11:30:05 +00002068EXPORT_SYMBOL(sock_no_bind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002070int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 int len, int flags)
2072{
2073 return -EOPNOTSUPP;
2074}
Eric Dumazet2a915252009-05-27 11:30:05 +00002075EXPORT_SYMBOL(sock_no_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076
2077int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2078{
2079 return -EOPNOTSUPP;
2080}
Eric Dumazet2a915252009-05-27 11:30:05 +00002081EXPORT_SYMBOL(sock_no_socketpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082
2083int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2084{
2085 return -EOPNOTSUPP;
2086}
Eric Dumazet2a915252009-05-27 11:30:05 +00002087EXPORT_SYMBOL(sock_no_accept);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002089int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 int *len, int peer)
2091{
2092 return -EOPNOTSUPP;
2093}
Eric Dumazet2a915252009-05-27 11:30:05 +00002094EXPORT_SYMBOL(sock_no_getname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095
Eric Dumazet2a915252009-05-27 11:30:05 +00002096unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097{
2098 return 0;
2099}
Eric Dumazet2a915252009-05-27 11:30:05 +00002100EXPORT_SYMBOL(sock_no_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101
2102int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2103{
2104 return -EOPNOTSUPP;
2105}
Eric Dumazet2a915252009-05-27 11:30:05 +00002106EXPORT_SYMBOL(sock_no_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107
2108int sock_no_listen(struct socket *sock, int backlog)
2109{
2110 return -EOPNOTSUPP;
2111}
Eric Dumazet2a915252009-05-27 11:30:05 +00002112EXPORT_SYMBOL(sock_no_listen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
2114int sock_no_shutdown(struct socket *sock, int how)
2115{
2116 return -EOPNOTSUPP;
2117}
Eric Dumazet2a915252009-05-27 11:30:05 +00002118EXPORT_SYMBOL(sock_no_shutdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119
2120int sock_no_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002121 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122{
2123 return -EOPNOTSUPP;
2124}
Eric Dumazet2a915252009-05-27 11:30:05 +00002125EXPORT_SYMBOL(sock_no_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126
2127int sock_no_getsockopt(struct socket *sock, int level, int optname,
2128 char __user *optval, int __user *optlen)
2129{
2130 return -EOPNOTSUPP;
2131}
Eric Dumazet2a915252009-05-27 11:30:05 +00002132EXPORT_SYMBOL(sock_no_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133
2134int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2135 size_t len)
2136{
2137 return -EOPNOTSUPP;
2138}
Eric Dumazet2a915252009-05-27 11:30:05 +00002139EXPORT_SYMBOL(sock_no_sendmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140
2141int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2142 size_t len, int flags)
2143{
2144 return -EOPNOTSUPP;
2145}
Eric Dumazet2a915252009-05-27 11:30:05 +00002146EXPORT_SYMBOL(sock_no_recvmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147
2148int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2149{
2150 /* Mirror missing mmap method error code */
2151 return -ENODEV;
2152}
Eric Dumazet2a915252009-05-27 11:30:05 +00002153EXPORT_SYMBOL(sock_no_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154
2155ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2156{
2157 ssize_t res;
2158 struct msghdr msg = {.msg_flags = flags};
2159 struct kvec iov;
2160 char *kaddr = kmap(page);
2161 iov.iov_base = kaddr + offset;
2162 iov.iov_len = size;
2163 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2164 kunmap(page);
2165 return res;
2166}
Eric Dumazet2a915252009-05-27 11:30:05 +00002167EXPORT_SYMBOL(sock_no_sendpage);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168
2169/*
2170 * Default Socket Callbacks
2171 */
2172
2173static void sock_def_wakeup(struct sock *sk)
2174{
Eric Dumazet43815482010-04-29 11:01:49 +00002175 struct socket_wq *wq;
2176
2177 rcu_read_lock();
2178 wq = rcu_dereference(sk->sk_wq);
2179 if (wq_has_sleeper(wq))
2180 wake_up_interruptible_all(&wq->wait);
2181 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182}
2183
2184static void sock_def_error_report(struct sock *sk)
2185{
Eric Dumazet43815482010-04-29 11:01:49 +00002186 struct socket_wq *wq;
2187
2188 rcu_read_lock();
2189 wq = rcu_dereference(sk->sk_wq);
2190 if (wq_has_sleeper(wq))
2191 wake_up_interruptible_poll(&wq->wait, POLLERR);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002192 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Eric Dumazet43815482010-04-29 11:01:49 +00002193 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194}
2195
David S. Miller676d2362014-04-11 16:15:36 -04002196static void sock_def_readable(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197{
Eric Dumazet43815482010-04-29 11:01:49 +00002198 struct socket_wq *wq;
2199
2200 rcu_read_lock();
2201 wq = rcu_dereference(sk->sk_wq);
2202 if (wq_has_sleeper(wq))
Eric Dumazet2c6607c2011-01-06 10:54:29 -08002203 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
Davide Libenzi37e55402009-03-31 15:24:21 -07002204 POLLRDNORM | POLLRDBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002205 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Eric Dumazet43815482010-04-29 11:01:49 +00002206 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207}
2208
2209static void sock_def_write_space(struct sock *sk)
2210{
Eric Dumazet43815482010-04-29 11:01:49 +00002211 struct socket_wq *wq;
2212
2213 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214
2215 /* Do not wake up a writer until he can make "significant"
2216 * progress. --DaveM
2217 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002218 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
Eric Dumazet43815482010-04-29 11:01:49 +00002219 wq = rcu_dereference(sk->sk_wq);
2220 if (wq_has_sleeper(wq))
2221 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
Davide Libenzi37e55402009-03-31 15:24:21 -07002222 POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223
2224 /* Should agree with poll, otherwise some programs break */
2225 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002226 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 }
2228
Eric Dumazet43815482010-04-29 11:01:49 +00002229 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230}
2231
2232static void sock_def_destruct(struct sock *sk)
2233{
Jesper Juhla51482b2005-11-08 09:41:34 -08002234 kfree(sk->sk_protinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235}
2236
2237void sk_send_sigurg(struct sock *sk)
2238{
2239 if (sk->sk_socket && sk->sk_socket->file)
2240 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002241 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242}
Eric Dumazet2a915252009-05-27 11:30:05 +00002243EXPORT_SYMBOL(sk_send_sigurg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244
2245void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2246 unsigned long expires)
2247{
2248 if (!mod_timer(timer, expires))
2249 sock_hold(sk);
2250}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251EXPORT_SYMBOL(sk_reset_timer);
2252
2253void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2254{
Ying Xue25cc4ae2013-02-03 20:32:57 +00002255 if (del_timer(timer))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 __sock_put(sk);
2257}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258EXPORT_SYMBOL(sk_stop_timer);
2259
2260void sock_init_data(struct socket *sock, struct sock *sk)
2261{
2262 skb_queue_head_init(&sk->sk_receive_queue);
2263 skb_queue_head_init(&sk->sk_write_queue);
2264 skb_queue_head_init(&sk->sk_error_queue);
2265
2266 sk->sk_send_head = NULL;
2267
2268 init_timer(&sk->sk_timer);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002269
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 sk->sk_allocation = GFP_KERNEL;
2271 sk->sk_rcvbuf = sysctl_rmem_default;
2272 sk->sk_sndbuf = sysctl_wmem_default;
2273 sk->sk_state = TCP_CLOSE;
David S. Miller972692e2008-06-17 22:41:38 -07002274 sk_set_socket(sk, sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275
2276 sock_set_flag(sk, SOCK_ZAPPED);
2277
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002278 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 sk->sk_type = sock->type;
Eric Dumazet43815482010-04-29 11:01:49 +00002280 sk->sk_wq = sock->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281 sock->sk = sk;
2282 } else
Eric Dumazet43815482010-04-29 11:01:49 +00002283 sk->sk_wq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284
Eric Dumazetb6c67122010-04-08 23:03:29 +00002285 spin_lock_init(&sk->sk_dst_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 rwlock_init(&sk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07002287 lockdep_set_class_and_name(&sk->sk_callback_lock,
2288 af_callback_keys + sk->sk_family,
2289 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290
2291 sk->sk_state_change = sock_def_wakeup;
2292 sk->sk_data_ready = sock_def_readable;
2293 sk->sk_write_space = sock_def_write_space;
2294 sk->sk_error_report = sock_def_error_report;
2295 sk->sk_destruct = sock_def_destruct;
2296
Eric Dumazet5640f762012-09-23 23:04:42 +00002297 sk->sk_frag.page = NULL;
2298 sk->sk_frag.offset = 0;
Pavel Emelyanovef64a542012-02-21 07:31:34 +00002299 sk->sk_peek_off = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300
Eric W. Biederman109f6e32010-06-13 03:30:14 +00002301 sk->sk_peer_pid = NULL;
2302 sk->sk_peer_cred = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 sk->sk_write_pending = 0;
2304 sk->sk_rcvlowat = 1;
2305 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2306 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2307
Eric Dumazetf37f0af2008-04-13 21:39:26 -07002308 sk->sk_stamp = ktime_set(-1L, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309
Cong Wange0d10952013-08-01 11:10:25 +08002310#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir06021292013-06-10 11:39:50 +03002311 sk->sk_napi_id = 0;
Eliezer Tamir64b0dc52013-07-10 17:13:36 +03002312 sk->sk_ll_usec = sysctl_net_busy_read;
Eliezer Tamir06021292013-06-10 11:39:50 +03002313#endif
2314
Eric Dumazet62748f32013-09-24 08:20:52 -07002315 sk->sk_max_pacing_rate = ~0U;
Eric Dumazet7eec4172013-10-08 15:16:00 -07002316 sk->sk_pacing_rate = ~0U;
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00002317 /*
2318 * Before updating sk_refcnt, we must commit prior changes to memory
2319 * (Documentation/RCU/rculist_nulls.txt for details)
2320 */
2321 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 atomic_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08002323 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324}
Eric Dumazet2a915252009-05-27 11:30:05 +00002325EXPORT_SYMBOL(sock_init_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002327void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328{
2329 might_sleep();
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002330 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02002331 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002333 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002334 spin_unlock(&sk->sk_lock.slock);
2335 /*
2336 * The sk_lock has mutex_lock() semantics here:
2337 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002338 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002339 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340}
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002341EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002343void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344{
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002345 /*
2346 * The sk_lock has mutex_unlock() semantics:
2347 */
2348 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2349
2350 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 if (sk->sk_backlog.tail)
2352 __release_sock(sk);
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002353
Eric Dumazetc3f9b012014-03-10 09:50:11 -07002354 /* Warning : release_cb() might need to release sk ownership,
2355 * ie call sock_release_ownership(sk) before us.
2356 */
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002357 if (sk->sk_prot->release_cb)
2358 sk->sk_prot->release_cb(sk);
2359
Eric Dumazetc3f9b012014-03-10 09:50:11 -07002360 sock_release_ownership(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002361 if (waitqueue_active(&sk->sk_lock.wq))
2362 wake_up(&sk->sk_lock.wq);
2363 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364}
2365EXPORT_SYMBOL(release_sock);
2366
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002367/**
2368 * lock_sock_fast - fast version of lock_sock
2369 * @sk: socket
2370 *
2371 * This version should be used for very small section, where process wont block
2372 * return false if fast path is taken
2373 * sk_lock.slock locked, owned = 0, BH disabled
2374 * return true if slow path is taken
2375 * sk_lock.slock unlocked, owned = 1, BH enabled
2376 */
2377bool lock_sock_fast(struct sock *sk)
2378{
2379 might_sleep();
2380 spin_lock_bh(&sk->sk_lock.slock);
2381
2382 if (!sk->sk_lock.owned)
2383 /*
2384 * Note : We must disable BH
2385 */
2386 return false;
2387
2388 __lock_sock(sk);
2389 sk->sk_lock.owned = 1;
2390 spin_unlock(&sk->sk_lock.slock);
2391 /*
2392 * The sk_lock has mutex_lock() semantics here:
2393 */
2394 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2395 local_bh_enable();
2396 return true;
2397}
2398EXPORT_SYMBOL(lock_sock_fast);
2399
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002401{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002402 struct timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002404 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002405 tv = ktime_to_timeval(sk->sk_stamp);
2406 if (tv.tv_sec == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 return -ENOENT;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002408 if (tv.tv_sec == 0) {
2409 sk->sk_stamp = ktime_get_real();
2410 tv = ktime_to_timeval(sk->sk_stamp);
2411 }
2412 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002413}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414EXPORT_SYMBOL(sock_get_timestamp);
2415
Eric Dumazetae40eb12007-03-18 17:33:16 -07002416int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2417{
2418 struct timespec ts;
2419 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002420 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetae40eb12007-03-18 17:33:16 -07002421 ts = ktime_to_timespec(sk->sk_stamp);
2422 if (ts.tv_sec == -1)
2423 return -ENOENT;
2424 if (ts.tv_sec == 0) {
2425 sk->sk_stamp = ktime_get_real();
2426 ts = ktime_to_timespec(sk->sk_stamp);
2427 }
2428 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2429}
2430EXPORT_SYMBOL(sock_get_timestampns);
2431
Patrick Ohly20d49472009-02-12 05:03:38 +00002432void sock_enable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002433{
Patrick Ohly20d49472009-02-12 05:03:38 +00002434 if (!sock_flag(sk, flag)) {
Eric Dumazet08e29af2011-11-28 12:04:18 +00002435 unsigned long previous_flags = sk->sk_flags;
2436
Patrick Ohly20d49472009-02-12 05:03:38 +00002437 sock_set_flag(sk, flag);
2438 /*
2439 * we just set one of the two flags which require net
2440 * time stamping, but time stamping might have been on
2441 * already because of the other one
2442 */
Eric Dumazet08e29af2011-11-28 12:04:18 +00002443 if (!(previous_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002444 net_enable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 }
2446}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447
Richard Cochrancb820f82013-07-19 19:40:09 +02002448int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2449 int level, int type)
2450{
2451 struct sock_exterr_skb *serr;
Willem de Bruijn364a9e92014-08-31 21:30:27 -04002452 struct sk_buff *skb;
Richard Cochrancb820f82013-07-19 19:40:09 +02002453 int copied, err;
2454
2455 err = -EAGAIN;
Willem de Bruijn364a9e92014-08-31 21:30:27 -04002456 skb = sock_dequeue_err_skb(sk);
Richard Cochrancb820f82013-07-19 19:40:09 +02002457 if (skb == NULL)
2458 goto out;
2459
2460 copied = skb->len;
2461 if (copied > len) {
2462 msg->msg_flags |= MSG_TRUNC;
2463 copied = len;
2464 }
David S. Miller51f3d022014-11-05 16:46:40 -05002465 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Richard Cochrancb820f82013-07-19 19:40:09 +02002466 if (err)
2467 goto out_free_skb;
2468
2469 sock_recv_timestamp(msg, sk, skb);
2470
2471 serr = SKB_EXT_ERR(skb);
2472 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2473
2474 msg->msg_flags |= MSG_ERRQUEUE;
2475 err = copied;
2476
Richard Cochrancb820f82013-07-19 19:40:09 +02002477out_free_skb:
2478 kfree_skb(skb);
2479out:
2480 return err;
2481}
2482EXPORT_SYMBOL(sock_recv_errqueue);
2483
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484/*
2485 * Get a socket option on an socket.
2486 *
2487 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2488 * asynchronous errors should be reported by getsockopt. We assume
2489 * this means if you specify SO_ERROR (otherwise whats the point of it).
2490 */
2491int sock_common_getsockopt(struct socket *sock, int level, int optname,
2492 char __user *optval, int __user *optlen)
2493{
2494 struct sock *sk = sock->sk;
2495
2496 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2497}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498EXPORT_SYMBOL(sock_common_getsockopt);
2499
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002500#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002501int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2502 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002503{
2504 struct sock *sk = sock->sk;
2505
Johannes Berg1e51f952007-03-06 13:44:06 -08002506 if (sk->sk_prot->compat_getsockopt != NULL)
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002507 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2508 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002509 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2510}
2511EXPORT_SYMBOL(compat_sock_common_getsockopt);
2512#endif
2513
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2515 struct msghdr *msg, size_t size, int flags)
2516{
2517 struct sock *sk = sock->sk;
2518 int addr_len = 0;
2519 int err;
2520
2521 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2522 flags & ~MSG_DONTWAIT, &addr_len);
2523 if (err >= 0)
2524 msg->msg_namelen = addr_len;
2525 return err;
2526}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527EXPORT_SYMBOL(sock_common_recvmsg);
2528
2529/*
2530 * Set socket options on an inet socket.
2531 */
2532int sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002533 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534{
2535 struct sock *sk = sock->sk;
2536
2537 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2538}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539EXPORT_SYMBOL(sock_common_setsockopt);
2540
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002541#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002542int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002543 char __user *optval, unsigned int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002544{
2545 struct sock *sk = sock->sk;
2546
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002547 if (sk->sk_prot->compat_setsockopt != NULL)
2548 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2549 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002550 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2551}
2552EXPORT_SYMBOL(compat_sock_common_setsockopt);
2553#endif
2554
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555void sk_common_release(struct sock *sk)
2556{
2557 if (sk->sk_prot->destroy)
2558 sk->sk_prot->destroy(sk);
2559
2560 /*
2561 * Observation: when sock_common_release is called, processes have
2562 * no access to socket. But net still has.
2563 * Step one, detach it from networking:
2564 *
2565 * A. Remove from hash tables.
2566 */
2567
2568 sk->sk_prot->unhash(sk);
2569
2570 /*
2571 * In this point socket cannot receive new packets, but it is possible
2572 * that some packets are in flight because some CPU runs receiver and
2573 * did hash table lookup before we unhashed socket. They will achieve
2574 * receive queue and will be purged by socket destructor.
2575 *
2576 * Also we still have packets pending on receive queue and probably,
2577 * our own packets waiting in device queues. sock_destroy will drain
2578 * receive queue, but transmitted packets will delay socket destruction
2579 * until the last reference will be released.
2580 */
2581
2582 sock_orphan(sk);
2583
2584 xfrm_sk_free_policy(sk);
2585
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07002586 sk_refcnt_debug_release(sk);
Eric Dumazet5640f762012-09-23 23:04:42 +00002587
2588 if (sk->sk_frag.page) {
2589 put_page(sk->sk_frag.page);
2590 sk->sk_frag.page = NULL;
2591 }
2592
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 sock_put(sk);
2594}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595EXPORT_SYMBOL(sk_common_release);
2596
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002597#ifdef CONFIG_PROC_FS
2598#define PROTO_INUSE_NR 64 /* should be enough for the first time */
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002599struct prot_inuse {
2600 int val[PROTO_INUSE_NR];
2601};
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002602
2603static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002604
2605#ifdef CONFIG_NET_NS
2606void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2607{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002608 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002609}
2610EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2611
2612int sock_prot_inuse_get(struct net *net, struct proto *prot)
2613{
2614 int cpu, idx = prot->inuse_idx;
2615 int res = 0;
2616
2617 for_each_possible_cpu(cpu)
2618 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2619
2620 return res >= 0 ? res : 0;
2621}
2622EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2623
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002624static int __net_init sock_inuse_init_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002625{
2626 net->core.inuse = alloc_percpu(struct prot_inuse);
2627 return net->core.inuse ? 0 : -ENOMEM;
2628}
2629
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002630static void __net_exit sock_inuse_exit_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002631{
2632 free_percpu(net->core.inuse);
2633}
2634
2635static struct pernet_operations net_inuse_ops = {
2636 .init = sock_inuse_init_net,
2637 .exit = sock_inuse_exit_net,
2638};
2639
2640static __init int net_inuse_init(void)
2641{
2642 if (register_pernet_subsys(&net_inuse_ops))
2643 panic("Cannot initialize net inuse counters");
2644
2645 return 0;
2646}
2647
2648core_initcall(net_inuse_init);
2649#else
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002650static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2651
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002652void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002653{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002654 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002655}
2656EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2657
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002658int sock_prot_inuse_get(struct net *net, struct proto *prot)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002659{
2660 int cpu, idx = prot->inuse_idx;
2661 int res = 0;
2662
2663 for_each_possible_cpu(cpu)
2664 res += per_cpu(prot_inuse, cpu).val[idx];
2665
2666 return res >= 0 ? res : 0;
2667}
2668EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002669#endif
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002670
2671static void assign_proto_idx(struct proto *prot)
2672{
2673 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2674
2675 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
Joe Perchese005d192012-05-16 19:58:40 +00002676 pr_err("PROTO_INUSE_NR exhausted\n");
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002677 return;
2678 }
2679
2680 set_bit(prot->inuse_idx, proto_inuse_idx);
2681}
2682
2683static void release_proto_idx(struct proto *prot)
2684{
2685 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2686 clear_bit(prot->inuse_idx, proto_inuse_idx);
2687}
2688#else
2689static inline void assign_proto_idx(struct proto *prot)
2690{
2691}
2692
2693static inline void release_proto_idx(struct proto *prot)
2694{
2695}
2696#endif
2697
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698int proto_register(struct proto *prot, int alloc_slab)
2699{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700 if (alloc_slab) {
2701 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
Eric Dumazet271b72c2008-10-29 02:11:14 -07002702 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2703 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704
2705 if (prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002706 pr_crit("%s: Can't create sock SLAB cache!\n",
2707 prot->name);
Pavel Emelyanov60e76632008-03-28 16:39:10 -07002708 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002710
2711 if (prot->rsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002712 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002713 if (prot->rsk_prot->slab_name == NULL)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002714 goto out_free_sock_slab;
2715
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002716 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002717 prot->rsk_prot->obj_size, 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002718 SLAB_HWCACHE_ALIGN, NULL);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002719
2720 if (prot->rsk_prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002721 pr_crit("%s: Can't create request sock SLAB cache!\n",
2722 prot->name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002723 goto out_free_request_sock_slab_name;
2724 }
2725 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002726
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002727 if (prot->twsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002728 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002729
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002730 if (prot->twsk_prot->twsk_slab_name == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002731 goto out_free_request_sock_slab;
2732
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002733 prot->twsk_prot->twsk_slab =
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002734 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002735 prot->twsk_prot->twsk_obj_size,
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002736 0,
2737 SLAB_HWCACHE_ALIGN |
2738 prot->slab_flags,
Paul Mundt20c2df82007-07-20 10:11:58 +09002739 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002740 if (prot->twsk_prot->twsk_slab == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002741 goto out_free_timewait_sock_slab_name;
2742 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743 }
2744
Glauber Costa36b77a52011-12-16 00:51:59 +00002745 mutex_lock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 list_add(&prot->node, &proto_list);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002747 assign_proto_idx(prot);
Glauber Costa36b77a52011-12-16 00:51:59 +00002748 mutex_unlock(&proto_list_mutex);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002749 return 0;
2750
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002751out_free_timewait_sock_slab_name:
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002752 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002753out_free_request_sock_slab:
2754 if (prot->rsk_prot && prot->rsk_prot->slab) {
2755 kmem_cache_destroy(prot->rsk_prot->slab);
2756 prot->rsk_prot->slab = NULL;
2757 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002758out_free_request_sock_slab_name:
Dan Carpenter72150e92010-03-06 01:04:45 +00002759 if (prot->rsk_prot)
2760 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002761out_free_sock_slab:
2762 kmem_cache_destroy(prot->slab);
2763 prot->slab = NULL;
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002764out:
2765 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767EXPORT_SYMBOL(proto_register);
2768
2769void proto_unregister(struct proto *prot)
2770{
Glauber Costa36b77a52011-12-16 00:51:59 +00002771 mutex_lock(&proto_list_mutex);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002772 release_proto_idx(prot);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07002773 list_del(&prot->node);
Glauber Costa36b77a52011-12-16 00:51:59 +00002774 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775
2776 if (prot->slab != NULL) {
2777 kmem_cache_destroy(prot->slab);
2778 prot->slab = NULL;
2779 }
2780
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002781 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002782 kmem_cache_destroy(prot->rsk_prot->slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002783 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002784 prot->rsk_prot->slab = NULL;
2785 }
2786
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002787 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002788 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002789 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002790 prot->twsk_prot->twsk_slab = NULL;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002791 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793EXPORT_SYMBOL(proto_unregister);
2794
2795#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Glauber Costa36b77a52011-12-16 00:51:59 +00002797 __acquires(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798{
Glauber Costa36b77a52011-12-16 00:51:59 +00002799 mutex_lock(&proto_list_mutex);
Pavel Emelianov60f04382007-07-09 13:15:14 -07002800 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801}
2802
2803static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2804{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002805 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806}
2807
2808static void proto_seq_stop(struct seq_file *seq, void *v)
Glauber Costa36b77a52011-12-16 00:51:59 +00002809 __releases(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810{
Glauber Costa36b77a52011-12-16 00:51:59 +00002811 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812}
2813
2814static char proto_method_implemented(const void *method)
2815{
2816 return method == NULL ? 'n' : 'y';
2817}
Glauber Costa180d8cd2011-12-11 21:47:02 +00002818static long sock_prot_memory_allocated(struct proto *proto)
2819{
Jeffrin Josecb75a362012-04-25 19:17:29 +05302820 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002821}
2822
2823static char *sock_prot_memory_pressure(struct proto *proto)
2824{
2825 return proto->memory_pressure != NULL ?
2826 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2827}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828
2829static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2830{
Glauber Costa180d8cd2011-12-11 21:47:02 +00002831
Eric Dumazet8d987e52010-11-09 23:24:26 +00002832 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2834 proto->name,
2835 proto->obj_size,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002836 sock_prot_inuse_get(seq_file_net(seq), proto),
Glauber Costa180d8cd2011-12-11 21:47:02 +00002837 sock_prot_memory_allocated(proto),
2838 sock_prot_memory_pressure(proto),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 proto->max_header,
2840 proto->slab == NULL ? "no" : "yes",
2841 module_name(proto->owner),
2842 proto_method_implemented(proto->close),
2843 proto_method_implemented(proto->connect),
2844 proto_method_implemented(proto->disconnect),
2845 proto_method_implemented(proto->accept),
2846 proto_method_implemented(proto->ioctl),
2847 proto_method_implemented(proto->init),
2848 proto_method_implemented(proto->destroy),
2849 proto_method_implemented(proto->shutdown),
2850 proto_method_implemented(proto->setsockopt),
2851 proto_method_implemented(proto->getsockopt),
2852 proto_method_implemented(proto->sendmsg),
2853 proto_method_implemented(proto->recvmsg),
2854 proto_method_implemented(proto->sendpage),
2855 proto_method_implemented(proto->bind),
2856 proto_method_implemented(proto->backlog_rcv),
2857 proto_method_implemented(proto->hash),
2858 proto_method_implemented(proto->unhash),
2859 proto_method_implemented(proto->get_port),
2860 proto_method_implemented(proto->enter_memory_pressure));
2861}
2862
2863static int proto_seq_show(struct seq_file *seq, void *v)
2864{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002865 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2867 "protocol",
2868 "size",
2869 "sockets",
2870 "memory",
2871 "press",
2872 "maxhdr",
2873 "slab",
2874 "module",
2875 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2876 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07002877 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878 return 0;
2879}
2880
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002881static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882 .start = proto_seq_start,
2883 .next = proto_seq_next,
2884 .stop = proto_seq_stop,
2885 .show = proto_seq_show,
2886};
2887
2888static int proto_seq_open(struct inode *inode, struct file *file)
2889{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002890 return seq_open_net(inode, file, &proto_seq_ops,
2891 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892}
2893
Arjan van de Ven9a321442007-02-12 00:55:35 -08002894static const struct file_operations proto_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 .owner = THIS_MODULE,
2896 .open = proto_seq_open,
2897 .read = seq_read,
2898 .llseek = seq_lseek,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002899 .release = seq_release_net,
2900};
2901
2902static __net_init int proto_init_net(struct net *net)
2903{
Gao fengd4beaa62013-02-18 01:34:54 +00002904 if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
Eric Dumazet14e943d2008-11-19 15:14:01 -08002905 return -ENOMEM;
2906
2907 return 0;
2908}
2909
2910static __net_exit void proto_exit_net(struct net *net)
2911{
Gao fengece31ff2013-02-18 01:34:56 +00002912 remove_proc_entry("protocols", net->proc_net);
Eric Dumazet14e943d2008-11-19 15:14:01 -08002913}
2914
2915
2916static __net_initdata struct pernet_operations proto_net_ops = {
2917 .init = proto_init_net,
2918 .exit = proto_exit_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919};
2920
2921static int __init proto_init(void)
2922{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002923 return register_pernet_subsys(&proto_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924}
2925
2926subsys_initcall(proto_init);
2927
2928#endif /* PROC_FS */