blob: 15fc0938e1c4308c4ed48ce69aed302a2d813fba [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NETLINK Kernel-user communication protocol.
3 *
Alan Cox113aa832008-10-13 19:01:08 -07004 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
Patrick McHardycd1df522013-04-17 06:47:05 +00006 * Patrick McHardy <kaber@trash.net>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +090012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
Harald Welte4fdb3bb2005-08-09 19:40:55 -070017 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 */
24
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/module.h>
26
Randy Dunlap4fc268d2006-01-11 12:17:47 -080027#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/kernel.h>
29#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/signal.h>
31#include <linux/sched.h>
32#include <linux/errno.h>
33#include <linux/string.h>
34#include <linux/stat.h>
35#include <linux/socket.h>
36#include <linux/un.h>
37#include <linux/fcntl.h>
38#include <linux/termios.h>
39#include <linux/sockios.h>
40#include <linux/net.h>
41#include <linux/fs.h>
42#include <linux/slab.h>
43#include <asm/uaccess.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/rtnetlink.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
Andrew Morton54e0f522005-04-30 07:07:04 +010057#include <linux/audit.h>
Patrick McHardyaf65bdf2007-04-20 14:14:21 -070058#include <linux/mutex.h>
Patrick McHardyccdfcc32013-04-17 06:47:01 +000059#include <linux/vmalloc.h>
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +020060#include <linux/if_arp.h>
Thomas Grafe3416942014-08-02 11:47:45 +020061#include <linux/rhashtable.h>
Patrick McHardy9652e932013-04-17 06:47:02 +000062#include <asm/cacheflush.h>
Thomas Grafe3416942014-08-02 11:47:45 +020063#include <linux/hash.h>
Andrew Morton54e0f522005-04-30 07:07:04 +010064
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020065#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <net/sock.h>
67#include <net/scm.h>
Thomas Graf82ace472005-11-10 02:25:53 +010068#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Andrey Vagin0f29c762013-03-21 20:33:47 +040070#include "af_netlink.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Eric Dumazet5c398dc2010-10-24 04:27:10 +000072struct listeners {
73 struct rcu_head rcu;
74 unsigned long masks[0];
Johannes Berg6c04bb12009-07-10 09:51:32 +000075};
76
Patrick McHardycd967e02013-04-17 06:46:56 +000077/* state bits */
78#define NETLINK_CONGESTED 0x0
79
80/* flags */
Patrick McHardy77247bb2005-08-14 19:27:13 -070081#define NETLINK_KERNEL_SOCKET 0x1
Patrick McHardy9a4595b2005-08-15 12:32:15 -070082#define NETLINK_RECV_PKTINFO 0x2
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +000083#define NETLINK_BROADCAST_SEND_ERROR 0x4
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -070084#define NETLINK_RECV_NO_ENOBUFS 0x8
Patrick McHardy77247bb2005-08-14 19:27:13 -070085
David S. Miller035c4c12011-12-23 17:33:03 -050086static inline int netlink_is_kernel(struct sock *sk)
Denis V. Lunevaed81562007-10-10 21:14:32 -070087{
88 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
89}
90
Andrey Vagin0f29c762013-03-21 20:33:47 +040091struct netlink_table *nl_table;
92EXPORT_SYMBOL_GPL(nl_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
94static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
95
96static int netlink_dump(struct sock *sk);
Patrick McHardy9652e932013-04-17 06:47:02 +000097static void netlink_skb_destructor(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
Thomas Graf78fd1d02014-10-21 22:05:38 +020099/* nl_table locking explained:
100 * Lookup and traversal are protected with nl_sk_hash_lock or nl_table_lock
101 * combined with an RCU read-side lock. Insertion and removal are protected
102 * with nl_sk_hash_lock while using RCU list modification primitives and may
103 * run in parallel to nl_table_lock protected lookups. Destruction of the
104 * Netlink socket may only occur *after* nl_table_lock has been acquired
105 * either during or after the socket has been removed from the list.
106 */
Andrey Vagin0f29c762013-03-21 20:33:47 +0400107DEFINE_RWLOCK(nl_table_lock);
108EXPORT_SYMBOL_GPL(nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109static atomic_t nl_table_users = ATOMIC_INIT(0);
110
Eric Dumazet6d772ac2012-10-18 03:21:55 +0000111#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
112
Thomas Grafe3416942014-08-02 11:47:45 +0200113/* Protects netlink socket hash table mutations */
114DEFINE_MUTEX(nl_sk_hash_lock);
Thomas Graf6c8f7e72014-08-07 00:18:47 +0100115EXPORT_SYMBOL_GPL(nl_sk_hash_lock);
Thomas Grafe3416942014-08-02 11:47:45 +0200116
117static int lockdep_nl_sk_hash_is_held(void)
118{
119#ifdef CONFIG_LOCKDEP
Thomas Graf78fd1d02014-10-21 22:05:38 +0200120 if (debug_locks)
121 return lockdep_is_held(&nl_sk_hash_lock) || lockdep_is_held(&nl_table_lock);
Thomas Grafe3416942014-08-02 11:47:45 +0200122#endif
Thomas Graf78fd1d02014-10-21 22:05:38 +0200123 return 1;
Thomas Grafe3416942014-08-02 11:47:45 +0200124}
125
Alan Sterne041c682006-03-27 01:16:30 -0800126static ATOMIC_NOTIFIER_HEAD(netlink_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200128static DEFINE_SPINLOCK(netlink_tap_lock);
129static struct list_head netlink_tap_all __read_mostly;
130
stephen hemmingerb57ef81f2011-12-22 08:52:02 +0000131static inline u32 netlink_group_mask(u32 group)
Patrick McHardyd629b832005-08-14 19:27:50 -0700132{
133 return group ? 1 << (group - 1) : 0;
134}
135
Daniel Borkmannd3820002015-09-10 20:05:46 +0200136static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
137 gfp_t gfp_mask)
138{
139 unsigned int len = skb_end_offset(skb);
140 struct sk_buff *new;
141
142 new = alloc_skb(len, gfp_mask);
143 if (new == NULL)
144 return NULL;
145
146 NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
147 NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
148 NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
149
150 memcpy(skb_put(new, len), skb->data, len);
151 return new;
152}
153
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200154int netlink_add_tap(struct netlink_tap *nt)
155{
156 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
157 return -EINVAL;
158
159 spin_lock(&netlink_tap_lock);
160 list_add_rcu(&nt->list, &netlink_tap_all);
161 spin_unlock(&netlink_tap_lock);
162
163 if (nt->module)
164 __module_get(nt->module);
165
166 return 0;
167}
168EXPORT_SYMBOL_GPL(netlink_add_tap);
169
stephen hemminger2173f8d2013-12-30 10:49:22 -0800170static int __netlink_remove_tap(struct netlink_tap *nt)
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200171{
172 bool found = false;
173 struct netlink_tap *tmp;
174
175 spin_lock(&netlink_tap_lock);
176
177 list_for_each_entry(tmp, &netlink_tap_all, list) {
178 if (nt == tmp) {
179 list_del_rcu(&nt->list);
180 found = true;
181 goto out;
182 }
183 }
184
185 pr_warn("__netlink_remove_tap: %p not found\n", nt);
186out:
187 spin_unlock(&netlink_tap_lock);
188
189 if (found && nt->module)
190 module_put(nt->module);
191
192 return found ? 0 : -ENODEV;
193}
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200194
195int netlink_remove_tap(struct netlink_tap *nt)
196{
197 int ret;
198
199 ret = __netlink_remove_tap(nt);
200 synchronize_net();
201
202 return ret;
203}
204EXPORT_SYMBOL_GPL(netlink_remove_tap);
205
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200206static bool netlink_filter_tap(const struct sk_buff *skb)
207{
208 struct sock *sk = skb->sk;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200209
210 /* We take the more conservative approach and
211 * whitelist socket protocols that may pass.
212 */
213 switch (sk->sk_protocol) {
214 case NETLINK_ROUTE:
215 case NETLINK_USERSOCK:
216 case NETLINK_SOCK_DIAG:
217 case NETLINK_NFLOG:
218 case NETLINK_XFRM:
219 case NETLINK_FIB_LOOKUP:
220 case NETLINK_NETFILTER:
221 case NETLINK_GENERIC:
Varka Bhadram498044b2014-07-16 10:59:47 +0530222 return true;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200223 }
224
Varka Bhadram498044b2014-07-16 10:59:47 +0530225 return false;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200226}
227
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200228static int __netlink_deliver_tap_skb(struct sk_buff *skb,
229 struct net_device *dev)
230{
231 struct sk_buff *nskb;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200232 struct sock *sk = skb->sk;
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200233 int ret = -ENOMEM;
234
235 dev_hold(dev);
Daniel Borkmannd3820002015-09-10 20:05:46 +0200236
237 if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head))
238 nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
239 else
240 nskb = skb_clone(skb, GFP_ATOMIC);
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200241 if (nskb) {
242 nskb->dev = dev;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200243 nskb->protocol = htons((u16) sk->sk_protocol);
Daniel Borkmann604d13c2013-12-23 14:35:56 +0100244 nskb->pkt_type = netlink_is_kernel(sk) ?
245 PACKET_KERNEL : PACKET_USER;
Daniel Borkmann4e48ed82014-08-07 22:22:47 +0200246 skb_reset_network_header(nskb);
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200247 ret = dev_queue_xmit(nskb);
248 if (unlikely(ret > 0))
249 ret = net_xmit_errno(ret);
250 }
251
252 dev_put(dev);
253 return ret;
254}
255
256static void __netlink_deliver_tap(struct sk_buff *skb)
257{
258 int ret;
259 struct netlink_tap *tmp;
260
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200261 if (!netlink_filter_tap(skb))
262 return;
263
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200264 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
265 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
266 if (unlikely(ret))
267 break;
268 }
269}
270
271static void netlink_deliver_tap(struct sk_buff *skb)
272{
273 rcu_read_lock();
274
275 if (unlikely(!list_empty(&netlink_tap_all)))
276 __netlink_deliver_tap(skb);
277
278 rcu_read_unlock();
279}
280
Daniel Borkmann73bfd372013-12-23 14:35:55 +0100281static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
282 struct sk_buff *skb)
283{
284 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
285 netlink_deliver_tap(skb);
286}
287
Patrick McHardycd1df522013-04-17 06:47:05 +0000288static void netlink_overrun(struct sock *sk)
289{
290 struct netlink_sock *nlk = nlk_sk(sk);
291
292 if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
293 if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
294 sk->sk_err = ENOBUFS;
295 sk->sk_error_report(sk);
296 }
297 }
298 atomic_inc(&sk->sk_drops);
299}
300
301static void netlink_rcv_wake(struct sock *sk)
302{
303 struct netlink_sock *nlk = nlk_sk(sk);
304
305 if (skb_queue_empty(&sk->sk_receive_queue))
306 clear_bit(NETLINK_CONGESTED, &nlk->state);
307 if (!test_bit(NETLINK_CONGESTED, &nlk->state))
308 wake_up_interruptible(&nlk->wait);
309}
310
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000311#ifdef CONFIG_NETLINK_MMAP
Patrick McHardyf9c22882013-04-17 06:47:04 +0000312static bool netlink_rx_is_mmaped(struct sock *sk)
313{
314 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
315}
316
Patrick McHardy5fd96122013-04-17 06:47:03 +0000317static bool netlink_tx_is_mmaped(struct sock *sk)
318{
319 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
320}
321
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000322static __pure struct page *pgvec_to_page(const void *addr)
323{
324 if (is_vmalloc_addr(addr))
325 return vmalloc_to_page(addr);
326 else
327 return virt_to_page(addr);
328}
329
330static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
331{
332 unsigned int i;
333
334 for (i = 0; i < len; i++) {
335 if (pg_vec[i] != NULL) {
336 if (is_vmalloc_addr(pg_vec[i]))
337 vfree(pg_vec[i]);
338 else
339 free_pages((unsigned long)pg_vec[i], order);
340 }
341 }
342 kfree(pg_vec);
343}
344
345static void *alloc_one_pg_vec_page(unsigned long order)
346{
347 void *buffer;
348 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
349 __GFP_NOWARN | __GFP_NORETRY;
350
351 buffer = (void *)__get_free_pages(gfp_flags, order);
352 if (buffer != NULL)
353 return buffer;
354
355 buffer = vzalloc((1 << order) * PAGE_SIZE);
356 if (buffer != NULL)
357 return buffer;
358
359 gfp_flags &= ~__GFP_NORETRY;
360 return (void *)__get_free_pages(gfp_flags, order);
361}
362
363static void **alloc_pg_vec(struct netlink_sock *nlk,
364 struct nl_mmap_req *req, unsigned int order)
365{
366 unsigned int block_nr = req->nm_block_nr;
367 unsigned int i;
Daniel Borkmann8a849bb2013-08-02 17:32:39 +0200368 void **pg_vec;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000369
370 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
371 if (pg_vec == NULL)
372 return NULL;
373
374 for (i = 0; i < block_nr; i++) {
Daniel Borkmann8a849bb2013-08-02 17:32:39 +0200375 pg_vec[i] = alloc_one_pg_vec_page(order);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000376 if (pg_vec[i] == NULL)
377 goto err1;
378 }
379
380 return pg_vec;
381err1:
382 free_pg_vec(pg_vec, order, block_nr);
383 return NULL;
384}
385
Florian Westphal6c897d82015-07-21 16:33:50 +0200386
387static void
388__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
389 unsigned int order)
390{
391 struct netlink_sock *nlk = nlk_sk(sk);
392 struct sk_buff_head *queue;
393 struct netlink_ring *ring;
394
395 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
396 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
397
398 spin_lock_bh(&queue->lock);
399
400 ring->frame_max = req->nm_frame_nr - 1;
401 ring->head = 0;
402 ring->frame_size = req->nm_frame_size;
403 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
404
405 swap(ring->pg_vec_len, req->nm_block_nr);
406 swap(ring->pg_vec_order, order);
407 swap(ring->pg_vec, pg_vec);
408
409 __skb_queue_purge(queue);
410 spin_unlock_bh(&queue->lock);
411
412 WARN_ON(atomic_read(&nlk->mapped));
413
414 if (pg_vec)
415 free_pg_vec(pg_vec, order, req->nm_block_nr);
416}
417
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000418static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
Florian Westphal6c897d82015-07-21 16:33:50 +0200419 bool tx_ring)
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000420{
421 struct netlink_sock *nlk = nlk_sk(sk);
422 struct netlink_ring *ring;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000423 void **pg_vec = NULL;
424 unsigned int order = 0;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000425
426 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000427
Florian Westphal6c897d82015-07-21 16:33:50 +0200428 if (atomic_read(&nlk->mapped))
429 return -EBUSY;
430 if (atomic_read(&ring->pending))
431 return -EBUSY;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000432
433 if (req->nm_block_nr) {
434 if (ring->pg_vec != NULL)
435 return -EBUSY;
436
437 if ((int)req->nm_block_size <= 0)
438 return -EINVAL;
Tobias Klauser74e83b22014-07-31 12:17:08 +0200439 if (!PAGE_ALIGNED(req->nm_block_size))
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000440 return -EINVAL;
441 if (req->nm_frame_size < NL_MMAP_HDRLEN)
442 return -EINVAL;
443 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
444 return -EINVAL;
445
446 ring->frames_per_block = req->nm_block_size /
447 req->nm_frame_size;
448 if (ring->frames_per_block == 0)
449 return -EINVAL;
450 if (ring->frames_per_block * req->nm_block_nr !=
451 req->nm_frame_nr)
452 return -EINVAL;
453
454 order = get_order(req->nm_block_size);
455 pg_vec = alloc_pg_vec(nlk, req, order);
456 if (pg_vec == NULL)
457 return -ENOMEM;
458 } else {
459 if (req->nm_frame_nr)
460 return -EINVAL;
461 }
462
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000463 mutex_lock(&nlk->pg_vec_lock);
Florian Westphal6c897d82015-07-21 16:33:50 +0200464 if (atomic_read(&nlk->mapped) == 0) {
465 __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
466 mutex_unlock(&nlk->pg_vec_lock);
467 return 0;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000468 }
Florian Westphal6c897d82015-07-21 16:33:50 +0200469
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000470 mutex_unlock(&nlk->pg_vec_lock);
471
472 if (pg_vec)
473 free_pg_vec(pg_vec, order, req->nm_block_nr);
Florian Westphal6c897d82015-07-21 16:33:50 +0200474
475 return -EBUSY;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000476}
477
478static void netlink_mm_open(struct vm_area_struct *vma)
479{
480 struct file *file = vma->vm_file;
481 struct socket *sock = file->private_data;
482 struct sock *sk = sock->sk;
483
484 if (sk)
485 atomic_inc(&nlk_sk(sk)->mapped);
486}
487
488static void netlink_mm_close(struct vm_area_struct *vma)
489{
490 struct file *file = vma->vm_file;
491 struct socket *sock = file->private_data;
492 struct sock *sk = sock->sk;
493
494 if (sk)
495 atomic_dec(&nlk_sk(sk)->mapped);
496}
497
498static const struct vm_operations_struct netlink_mmap_ops = {
499 .open = netlink_mm_open,
500 .close = netlink_mm_close,
501};
502
503static int netlink_mmap(struct file *file, struct socket *sock,
504 struct vm_area_struct *vma)
505{
506 struct sock *sk = sock->sk;
507 struct netlink_sock *nlk = nlk_sk(sk);
508 struct netlink_ring *ring;
509 unsigned long start, size, expected;
510 unsigned int i;
511 int err = -EINVAL;
512
513 if (vma->vm_pgoff)
514 return -EINVAL;
515
516 mutex_lock(&nlk->pg_vec_lock);
517
518 expected = 0;
519 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
520 if (ring->pg_vec == NULL)
521 continue;
522 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
523 }
524
525 if (expected == 0)
526 goto out;
527
528 size = vma->vm_end - vma->vm_start;
529 if (size != expected)
530 goto out;
531
532 start = vma->vm_start;
533 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
534 if (ring->pg_vec == NULL)
535 continue;
536
537 for (i = 0; i < ring->pg_vec_len; i++) {
538 struct page *page;
539 void *kaddr = ring->pg_vec[i];
540 unsigned int pg_num;
541
542 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
543 page = pgvec_to_page(kaddr);
544 err = vm_insert_page(vma, start, page);
545 if (err < 0)
546 goto out;
547 start += PAGE_SIZE;
548 kaddr += PAGE_SIZE;
549 }
550 }
551 }
552
553 atomic_inc(&nlk->mapped);
554 vma->vm_ops = &netlink_mmap_ops;
555 err = 0;
556out:
557 mutex_unlock(&nlk->pg_vec_lock);
Patrick McHardy7cdbac72013-06-11 02:52:47 -0700558 return err;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000559}
Patrick McHardy9652e932013-04-17 06:47:02 +0000560
David Millerb68d3ab2014-12-16 17:58:17 -0500561static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
Patrick McHardy9652e932013-04-17 06:47:02 +0000562{
563#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
564 struct page *p_start, *p_end;
565
566 /* First page is flushed through netlink_{get,set}_status */
567 p_start = pgvec_to_page(hdr + PAGE_SIZE);
David Millerb68d3ab2014-12-16 17:58:17 -0500568 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
Patrick McHardy9652e932013-04-17 06:47:02 +0000569 while (p_start <= p_end) {
570 flush_dcache_page(p_start);
571 p_start++;
572 }
573#endif
574}
575
576static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
577{
578 smp_rmb();
579 flush_dcache_page(pgvec_to_page(hdr));
580 return hdr->nm_status;
581}
582
583static void netlink_set_status(struct nl_mmap_hdr *hdr,
584 enum nl_mmap_status status)
585{
Thomas Graf3c030f12014-12-18 10:30:26 +0000586 smp_mb();
Patrick McHardy9652e932013-04-17 06:47:02 +0000587 hdr->nm_status = status;
588 flush_dcache_page(pgvec_to_page(hdr));
Patrick McHardy9652e932013-04-17 06:47:02 +0000589}
590
591static struct nl_mmap_hdr *
592__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
593{
594 unsigned int pg_vec_pos, frame_off;
595
596 pg_vec_pos = pos / ring->frames_per_block;
597 frame_off = pos % ring->frames_per_block;
598
599 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
600}
601
602static struct nl_mmap_hdr *
603netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
604 enum nl_mmap_status status)
605{
606 struct nl_mmap_hdr *hdr;
607
608 hdr = __netlink_lookup_frame(ring, pos);
609 if (netlink_get_status(hdr) != status)
610 return NULL;
611
612 return hdr;
613}
614
615static struct nl_mmap_hdr *
616netlink_current_frame(const struct netlink_ring *ring,
617 enum nl_mmap_status status)
618{
619 return netlink_lookup_frame(ring, ring->head, status);
620}
621
622static struct nl_mmap_hdr *
623netlink_previous_frame(const struct netlink_ring *ring,
624 enum nl_mmap_status status)
625{
626 unsigned int prev;
627
628 prev = ring->head ? ring->head - 1 : ring->frame_max;
629 return netlink_lookup_frame(ring, prev, status);
630}
631
632static void netlink_increment_head(struct netlink_ring *ring)
633{
634 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
635}
636
637static void netlink_forward_ring(struct netlink_ring *ring)
638{
639 unsigned int head = ring->head, pos = head;
640 const struct nl_mmap_hdr *hdr;
641
642 do {
643 hdr = __netlink_lookup_frame(ring, pos);
644 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
645 break;
646 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
647 break;
648 netlink_increment_head(ring);
649 } while (ring->head != head);
650}
651
Patrick McHardycd1df522013-04-17 06:47:05 +0000652static bool netlink_dump_space(struct netlink_sock *nlk)
653{
654 struct netlink_ring *ring = &nlk->rx_ring;
655 struct nl_mmap_hdr *hdr;
656 unsigned int n;
657
658 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
659 if (hdr == NULL)
660 return false;
661
662 n = ring->head + ring->frame_max / 2;
663 if (n > ring->frame_max)
664 n -= ring->frame_max;
665
666 hdr = __netlink_lookup_frame(ring, n);
667
668 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
669}
670
Patrick McHardy9652e932013-04-17 06:47:02 +0000671static unsigned int netlink_poll(struct file *file, struct socket *sock,
672 poll_table *wait)
673{
674 struct sock *sk = sock->sk;
675 struct netlink_sock *nlk = nlk_sk(sk);
676 unsigned int mask;
Patrick McHardycd1df522013-04-17 06:47:05 +0000677 int err;
Patrick McHardy9652e932013-04-17 06:47:02 +0000678
Patrick McHardycd1df522013-04-17 06:47:05 +0000679 if (nlk->rx_ring.pg_vec != NULL) {
680 /* Memory mapped sockets don't call recvmsg(), so flow control
681 * for dumps is performed here. A dump is allowed to continue
682 * if at least half the ring is unused.
683 */
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700684 while (nlk->cb_running && netlink_dump_space(nlk)) {
Patrick McHardycd1df522013-04-17 06:47:05 +0000685 err = netlink_dump(sk);
686 if (err < 0) {
Ben Pfaffac30ef82014-07-09 10:31:22 -0700687 sk->sk_err = -err;
Patrick McHardycd1df522013-04-17 06:47:05 +0000688 sk->sk_error_report(sk);
689 break;
690 }
691 }
692 netlink_rcv_wake(sk);
693 }
Patrick McHardy5fd96122013-04-17 06:47:03 +0000694
Patrick McHardy9652e932013-04-17 06:47:02 +0000695 mask = datagram_poll(file, sock, wait);
696
697 spin_lock_bh(&sk->sk_receive_queue.lock);
698 if (nlk->rx_ring.pg_vec) {
699 netlink_forward_ring(&nlk->rx_ring);
700 if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
701 mask |= POLLIN | POLLRDNORM;
702 }
703 spin_unlock_bh(&sk->sk_receive_queue.lock);
704
705 spin_lock_bh(&sk->sk_write_queue.lock);
706 if (nlk->tx_ring.pg_vec) {
707 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
708 mask |= POLLOUT | POLLWRNORM;
709 }
710 spin_unlock_bh(&sk->sk_write_queue.lock);
711
712 return mask;
713}
714
715static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
716{
717 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
718}
719
720static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
721 struct netlink_ring *ring,
722 struct nl_mmap_hdr *hdr)
723{
724 unsigned int size;
725 void *data;
726
727 size = ring->frame_size - NL_MMAP_HDRLEN;
728 data = (void *)hdr + NL_MMAP_HDRLEN;
729
730 skb->head = data;
731 skb->data = data;
732 skb_reset_tail_pointer(skb);
733 skb->end = skb->tail + size;
734 skb->len = 0;
735
736 skb->destructor = netlink_skb_destructor;
737 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
738 NETLINK_CB(skb).sk = sk;
739}
Patrick McHardy5fd96122013-04-17 06:47:03 +0000740
741static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
742 u32 dst_portid, u32 dst_group,
743 struct sock_iocb *siocb)
744{
745 struct netlink_sock *nlk = nlk_sk(sk);
746 struct netlink_ring *ring;
747 struct nl_mmap_hdr *hdr;
748 struct sk_buff *skb;
749 unsigned int maxlen;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000750 int err = 0, len = 0;
751
Patrick McHardy5fd96122013-04-17 06:47:03 +0000752 mutex_lock(&nlk->pg_vec_lock);
753
754 ring = &nlk->tx_ring;
755 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
756
757 do {
David Millerb68d3ab2014-12-16 17:58:17 -0500758 unsigned int nm_len;
759
Patrick McHardy5fd96122013-04-17 06:47:03 +0000760 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
761 if (hdr == NULL) {
762 if (!(msg->msg_flags & MSG_DONTWAIT) &&
763 atomic_read(&nlk->tx_ring.pending))
764 schedule();
765 continue;
766 }
David Millerb68d3ab2014-12-16 17:58:17 -0500767
768 nm_len = ACCESS_ONCE(hdr->nm_len);
769 if (nm_len > maxlen) {
Patrick McHardy5fd96122013-04-17 06:47:03 +0000770 err = -EINVAL;
771 goto out;
772 }
773
David Millerb68d3ab2014-12-16 17:58:17 -0500774 netlink_frame_flush_dcache(hdr, nm_len);
Patrick McHardy5fd96122013-04-17 06:47:03 +0000775
David Millerb68d3ab2014-12-16 17:58:17 -0500776 skb = alloc_skb(nm_len, GFP_KERNEL);
777 if (skb == NULL) {
778 err = -ENOBUFS;
779 goto out;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000780 }
David Millerb68d3ab2014-12-16 17:58:17 -0500781 __skb_put(skb, nm_len);
782 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
783 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
Patrick McHardy5fd96122013-04-17 06:47:03 +0000784
785 netlink_increment_head(ring);
786
787 NETLINK_CB(skb).portid = nlk->portid;
788 NETLINK_CB(skb).dst_group = dst_group;
789 NETLINK_CB(skb).creds = siocb->scm->creds;
790
791 err = security_netlink_send(sk, skb);
792 if (err) {
793 kfree_skb(skb);
794 goto out;
795 }
796
797 if (unlikely(dst_group)) {
798 atomic_inc(&skb->users);
799 netlink_broadcast(sk, skb, dst_portid, dst_group,
800 GFP_KERNEL);
801 }
802 err = netlink_unicast(sk, skb, dst_portid,
803 msg->msg_flags & MSG_DONTWAIT);
804 if (err < 0)
805 goto out;
806 len += err;
807
808 } while (hdr != NULL ||
809 (!(msg->msg_flags & MSG_DONTWAIT) &&
810 atomic_read(&nlk->tx_ring.pending)));
811
812 if (len > 0)
813 err = len;
814out:
815 mutex_unlock(&nlk->pg_vec_lock);
816 return err;
817}
Patrick McHardyf9c22882013-04-17 06:47:04 +0000818
819static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
820{
821 struct nl_mmap_hdr *hdr;
822
823 hdr = netlink_mmap_hdr(skb);
824 hdr->nm_len = skb->len;
825 hdr->nm_group = NETLINK_CB(skb).dst_group;
826 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
Nicolas Dichtel1bf93102013-04-24 10:36:23 +0200827 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
828 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
David Millerb68d3ab2014-12-16 17:58:17 -0500829 netlink_frame_flush_dcache(hdr, hdr->nm_len);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000830 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
831
832 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
833 kfree_skb(skb);
834}
835
836static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
837{
838 struct netlink_sock *nlk = nlk_sk(sk);
839 struct netlink_ring *ring = &nlk->rx_ring;
840 struct nl_mmap_hdr *hdr;
841
842 spin_lock_bh(&sk->sk_receive_queue.lock);
843 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
844 if (hdr == NULL) {
845 spin_unlock_bh(&sk->sk_receive_queue.lock);
846 kfree_skb(skb);
Patrick McHardycd1df522013-04-17 06:47:05 +0000847 netlink_overrun(sk);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000848 return;
849 }
850 netlink_increment_head(ring);
851 __skb_queue_tail(&sk->sk_receive_queue, skb);
852 spin_unlock_bh(&sk->sk_receive_queue.lock);
853
854 hdr->nm_len = skb->len;
855 hdr->nm_group = NETLINK_CB(skb).dst_group;
856 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
Nicolas Dichtel1bf93102013-04-24 10:36:23 +0200857 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
858 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000859 netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
860}
861
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000862#else /* CONFIG_NETLINK_MMAP */
Patrick McHardyf9c22882013-04-17 06:47:04 +0000863#define netlink_rx_is_mmaped(sk) false
Patrick McHardy5fd96122013-04-17 06:47:03 +0000864#define netlink_tx_is_mmaped(sk) false
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000865#define netlink_mmap sock_no_mmap
Patrick McHardy9652e932013-04-17 06:47:02 +0000866#define netlink_poll datagram_poll
Patrick McHardy5fd96122013-04-17 06:47:03 +0000867#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000868#endif /* CONFIG_NETLINK_MMAP */
869
Patrick McHardycf0a0182013-04-17 06:47:00 +0000870static void netlink_skb_destructor(struct sk_buff *skb)
871{
Patrick McHardy9652e932013-04-17 06:47:02 +0000872#ifdef CONFIG_NETLINK_MMAP
873 struct nl_mmap_hdr *hdr;
874 struct netlink_ring *ring;
875 struct sock *sk;
876
877 /* If a packet from the kernel to userspace was freed because of an
878 * error without being delivered to userspace, the kernel must reset
879 * the status. In the direction userspace to kernel, the status is
880 * always reset here after the packet was processed and freed.
881 */
882 if (netlink_skb_is_mmaped(skb)) {
883 hdr = netlink_mmap_hdr(skb);
884 sk = NETLINK_CB(skb).sk;
885
Patrick McHardy5fd96122013-04-17 06:47:03 +0000886 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
887 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
888 ring = &nlk_sk(sk)->tx_ring;
889 } else {
890 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
891 hdr->nm_len = 0;
892 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
893 }
894 ring = &nlk_sk(sk)->rx_ring;
Patrick McHardy9652e932013-04-17 06:47:02 +0000895 }
Patrick McHardy9652e932013-04-17 06:47:02 +0000896
897 WARN_ON(atomic_read(&ring->pending) == 0);
898 atomic_dec(&ring->pending);
899 sock_put(sk);
900
Pablo Neira5e71d9d2013-06-03 09:28:43 +0000901 skb->head = NULL;
Patrick McHardy9652e932013-04-17 06:47:02 +0000902 }
903#endif
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +0000904 if (is_vmalloc_addr(skb->head)) {
Pablo Neira3a365152013-06-28 03:04:23 +0200905 if (!skb->cloned ||
906 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
907 vfree(skb->head);
908
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +0000909 skb->head = NULL;
910 }
Patrick McHardy9652e932013-04-17 06:47:02 +0000911 if (skb->sk != NULL)
912 sock_rfree(skb);
Patrick McHardycf0a0182013-04-17 06:47:00 +0000913}
914
915static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
916{
917 WARN_ON(skb->sk != NULL);
918 skb->sk = sk;
919 skb->destructor = netlink_skb_destructor;
920 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
921 sk_mem_charge(sk, skb->truesize);
922}
923
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924static void netlink_sock_destruct(struct sock *sk)
925{
Herbert Xu3f660d62007-05-03 03:17:14 -0700926 struct netlink_sock *nlk = nlk_sk(sk);
927
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700928 if (nlk->cb_running) {
929 if (nlk->cb.done)
930 nlk->cb.done(&nlk->cb);
Gao feng6dc878a2012-10-04 20:15:48 +0000931
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700932 module_put(nlk->cb.module);
933 kfree_skb(nlk->cb.skb);
Herbert Xu3f660d62007-05-03 03:17:14 -0700934 }
935
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 skb_queue_purge(&sk->sk_receive_queue);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000937#ifdef CONFIG_NETLINK_MMAP
938 if (1) {
939 struct nl_mmap_req req;
940
941 memset(&req, 0, sizeof(req));
942 if (nlk->rx_ring.pg_vec)
Florian Westphal6c897d82015-07-21 16:33:50 +0200943 __netlink_set_ring(sk, &req, false, NULL, 0);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000944 memset(&req, 0, sizeof(req));
945 if (nlk->tx_ring.pg_vec)
Florian Westphal6c897d82015-07-21 16:33:50 +0200946 __netlink_set_ring(sk, &req, true, NULL, 0);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000947 }
948#endif /* CONFIG_NETLINK_MMAP */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949
950 if (!sock_flag(sk, SOCK_DEAD)) {
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800951 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 return;
953 }
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700954
955 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
956 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
957 WARN_ON(nlk_sk(sk)->groups);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958}
959
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800960/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
961 * SMP. Look, when several writers sleep and reader wakes them up, all but one
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
963 * this, _but_ remember, it adds useless work on UP machines.
964 */
965
Johannes Bergd136f1b2009-09-12 03:03:15 +0000966void netlink_table_grab(void)
Eric Dumazet9a429c42008-01-01 21:58:02 -0800967 __acquires(nl_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968{
Johannes Bergd136f1b2009-09-12 03:03:15 +0000969 might_sleep();
970
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700971 write_lock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
973 if (atomic_read(&nl_table_users)) {
974 DECLARE_WAITQUEUE(wait, current);
975
976 add_wait_queue_exclusive(&nl_table_wait, &wait);
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800977 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 set_current_state(TASK_UNINTERRUPTIBLE);
979 if (atomic_read(&nl_table_users) == 0)
980 break;
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700981 write_unlock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 schedule();
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700983 write_lock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 }
985
986 __set_current_state(TASK_RUNNING);
987 remove_wait_queue(&nl_table_wait, &wait);
988 }
989}
990
Johannes Bergd136f1b2009-09-12 03:03:15 +0000991void netlink_table_ungrab(void)
Eric Dumazet9a429c42008-01-01 21:58:02 -0800992 __releases(nl_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993{
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700994 write_unlock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 wake_up(&nl_table_wait);
996}
997
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800998static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999netlink_lock_table(void)
1000{
1001 /* read_lock() synchronizes us to netlink_table_grab */
1002
1003 read_lock(&nl_table_lock);
1004 atomic_inc(&nl_table_users);
1005 read_unlock(&nl_table_lock);
1006}
1007
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001008static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009netlink_unlock_table(void)
1010{
1011 if (atomic_dec_and_test(&nl_table_users))
1012 wake_up(&nl_table_wait);
1013}
1014
Thomas Grafe3416942014-08-02 11:47:45 +02001015struct netlink_compare_arg
Gao fengda12c902013-06-06 14:49:11 +08001016{
Thomas Grafe3416942014-08-02 11:47:45 +02001017 struct net *net;
1018 u32 portid;
1019};
1020
1021static bool netlink_compare(void *ptr, void *arg)
1022{
1023 struct netlink_compare_arg *x = arg;
1024 struct sock *sk = ptr;
1025
1026 return nlk_sk(sk)->portid == x->portid &&
1027 net_eq(sock_net(sk), x->net);
1028}
1029
1030static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
1031 struct net *net)
1032{
1033 struct netlink_compare_arg arg = {
1034 .net = net,
1035 .portid = portid,
1036 };
Thomas Grafe3416942014-08-02 11:47:45 +02001037
Konstantin Khlebnikovfcd7e9a2015-06-26 13:48:17 +03001038 return rhashtable_lookup_compare(&table->hash, &portid,
Thomas Grafe3416942014-08-02 11:47:45 +02001039 &netlink_compare, &arg);
Gao fengda12c902013-06-06 14:49:11 +08001040}
1041
Eric W. Biederman15e47302012-09-07 20:12:54 +00001042static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043{
Gao fengda12c902013-06-06 14:49:11 +08001044 struct netlink_table *table = &nl_table[protocol];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046
Thomas Graf78fd1d02014-10-21 22:05:38 +02001047 read_lock(&nl_table_lock);
Thomas Grafe3416942014-08-02 11:47:45 +02001048 rcu_read_lock();
1049 sk = __netlink_lookup(table, portid, net);
1050 if (sk)
1051 sock_hold(sk);
1052 rcu_read_unlock();
Thomas Graf78fd1d02014-10-21 22:05:38 +02001053 read_unlock(&nl_table_lock);
Thomas Grafe3416942014-08-02 11:47:45 +02001054
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 return sk;
1056}
1057
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001058static const struct proto_ops netlink_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059
Patrick McHardy4277a082006-03-20 18:52:01 -08001060static void
1061netlink_update_listeners(struct sock *sk)
1062{
1063 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
Patrick McHardy4277a082006-03-20 18:52:01 -08001064 unsigned long mask;
1065 unsigned int i;
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001066 struct listeners *listeners;
1067
1068 listeners = nl_deref_protected(tbl->listeners);
1069 if (!listeners)
1070 return;
Patrick McHardy4277a082006-03-20 18:52:01 -08001071
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001072 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
Patrick McHardy4277a082006-03-20 18:52:01 -08001073 mask = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001074 sk_for_each_bound(sk, &tbl->mc_list) {
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001075 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1076 mask |= nlk_sk(sk)->groups[i];
1077 }
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001078 listeners->masks[i] = mask;
Patrick McHardy4277a082006-03-20 18:52:01 -08001079 }
1080 /* this function is only called with the netlink table "grabbed", which
1081 * makes sure updates are visible before bind or setsockopt return. */
1082}
1083
Eric W. Biederman15e47302012-09-07 20:12:54 +00001084static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085{
Gao fengda12c902013-06-06 14:49:11 +08001086 struct netlink_table *table = &nl_table[sk->sk_protocol];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 int err = -EADDRINUSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088
Thomas Grafe3416942014-08-02 11:47:45 +02001089 mutex_lock(&nl_sk_hash_lock);
1090 if (__netlink_lookup(table, portid, net))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 goto err;
1092
1093 err = -EBUSY;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001094 if (nlk_sk(sk)->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 goto err;
1096
1097 err = -ENOMEM;
Thomas Grafe3416942014-08-02 11:47:45 +02001098 if (BITS_PER_LONG > 32 && unlikely(table->hash.nelems >= UINT_MAX))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 goto err;
1100
Eric W. Biederman15e47302012-09-07 20:12:54 +00001101 nlk_sk(sk)->portid = portid;
Thomas Grafe3416942014-08-02 11:47:45 +02001102 sock_hold(sk);
1103 rhashtable_insert(&table->hash, &nlk_sk(sk)->node, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105err:
Thomas Grafe3416942014-08-02 11:47:45 +02001106 mutex_unlock(&nl_sk_hash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 return err;
1108}
1109
1110static void netlink_remove(struct sock *sk)
1111{
Thomas Grafe3416942014-08-02 11:47:45 +02001112 struct netlink_table *table;
1113
1114 mutex_lock(&nl_sk_hash_lock);
1115 table = &nl_table[sk->sk_protocol];
1116 if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node, GFP_KERNEL)) {
1117 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
1118 __sock_put(sk);
1119 }
1120 mutex_unlock(&nl_sk_hash_lock);
1121
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 netlink_table_grab();
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001123 if (nlk_sk(sk)->subscriptions)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 __sk_del_bind_node(sk);
1125 netlink_table_ungrab();
1126}
1127
1128static struct proto netlink_proto = {
1129 .name = "NETLINK",
1130 .owner = THIS_MODULE,
1131 .obj_size = sizeof(struct netlink_sock),
1132};
1133
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001134static int __netlink_create(struct net *net, struct socket *sock,
1135 struct mutex *cb_mutex, int protocol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136{
1137 struct sock *sk;
1138 struct netlink_sock *nlk;
Patrick McHardyab33a172005-08-14 19:31:36 -07001139
1140 sock->ops = &netlink_ops;
1141
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001142 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
Patrick McHardyab33a172005-08-14 19:31:36 -07001143 if (!sk)
1144 return -ENOMEM;
1145
1146 sock_init_data(sock, sk);
1147
1148 nlk = nlk_sk(sk);
Eric Dumazet658cb352012-04-22 21:30:21 +00001149 if (cb_mutex) {
Patrick McHardyffa4d722007-04-25 14:01:17 -07001150 nlk->cb_mutex = cb_mutex;
Eric Dumazet658cb352012-04-22 21:30:21 +00001151 } else {
Patrick McHardyffa4d722007-04-25 14:01:17 -07001152 nlk->cb_mutex = &nlk->cb_def_mutex;
1153 mutex_init(nlk->cb_mutex);
1154 }
Patrick McHardyab33a172005-08-14 19:31:36 -07001155 init_waitqueue_head(&nlk->wait);
Patrick McHardyccdfcc32013-04-17 06:47:01 +00001156#ifdef CONFIG_NETLINK_MMAP
1157 mutex_init(&nlk->pg_vec_lock);
1158#endif
Patrick McHardyab33a172005-08-14 19:31:36 -07001159
1160 sk->sk_destruct = netlink_sock_destruct;
1161 sk->sk_protocol = protocol;
1162 return 0;
1163}
1164
Eric Paris3f378b62009-11-05 22:18:14 -08001165static int netlink_create(struct net *net, struct socket *sock, int protocol,
1166 int kern)
Patrick McHardyab33a172005-08-14 19:31:36 -07001167{
1168 struct module *module = NULL;
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07001169 struct mutex *cb_mutex;
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001170 struct netlink_sock *nlk;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001171 int (*bind)(int group);
1172 void (*unbind)(int group);
Patrick McHardyab33a172005-08-14 19:31:36 -07001173 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174
1175 sock->state = SS_UNCONNECTED;
1176
1177 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1178 return -ESOCKTNOSUPPORT;
1179
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001180 if (protocol < 0 || protocol >= MAX_LINKS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 return -EPROTONOSUPPORT;
1182
Patrick McHardy77247bb2005-08-14 19:27:13 -07001183 netlink_lock_table();
Johannes Berg95a5afc2008-10-16 15:24:51 -07001184#ifdef CONFIG_MODULES
Patrick McHardyab33a172005-08-14 19:31:36 -07001185 if (!nl_table[protocol].registered) {
Patrick McHardy77247bb2005-08-14 19:27:13 -07001186 netlink_unlock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001187 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
Patrick McHardy77247bb2005-08-14 19:27:13 -07001188 netlink_lock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001189 }
Patrick McHardyab33a172005-08-14 19:31:36 -07001190#endif
1191 if (nl_table[protocol].registered &&
1192 try_module_get(nl_table[protocol].module))
1193 module = nl_table[protocol].module;
Alexey Dobriyan974c37e2010-01-30 10:05:05 +00001194 else
1195 err = -EPROTONOSUPPORT;
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07001196 cb_mutex = nl_table[protocol].cb_mutex;
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00001197 bind = nl_table[protocol].bind;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001198 unbind = nl_table[protocol].unbind;
Patrick McHardy77247bb2005-08-14 19:27:13 -07001199 netlink_unlock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001200
Alexey Dobriyan974c37e2010-01-30 10:05:05 +00001201 if (err < 0)
1202 goto out;
1203
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001204 err = __netlink_create(net, sock, cb_mutex, protocol);
1205 if (err < 0)
Patrick McHardyab33a172005-08-14 19:31:36 -07001206 goto out_module;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207
David S. Miller6f756a82008-11-23 17:34:03 -08001208 local_bh_disable();
Eric Dumazetc1fd3b92008-11-23 15:48:22 -08001209 sock_prot_inuse_add(net, &netlink_proto, 1);
David S. Miller6f756a82008-11-23 17:34:03 -08001210 local_bh_enable();
1211
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001212 nlk = nlk_sk(sock->sk);
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001213 nlk->module = module;
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00001214 nlk->netlink_bind = bind;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001215 nlk->netlink_unbind = unbind;
Patrick McHardyab33a172005-08-14 19:31:36 -07001216out:
1217 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
Patrick McHardyab33a172005-08-14 19:31:36 -07001219out_module:
1220 module_put(module);
1221 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222}
1223
1224static int netlink_release(struct socket *sock)
1225{
1226 struct sock *sk = sock->sk;
1227 struct netlink_sock *nlk;
1228
1229 if (!sk)
1230 return 0;
1231
1232 netlink_remove(sk);
Denis Lunevac57b3a2007-04-18 17:05:58 -07001233 sock_orphan(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 nlk = nlk_sk(sk);
1235
Herbert Xu3f660d62007-05-03 03:17:14 -07001236 /*
1237 * OK. Socket is unlinked, any packets that arrive now
1238 * will be purged.
1239 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 sock->sk = NULL;
1242 wake_up_interruptible_all(&nlk->wait);
1243
1244 skb_queue_purge(&sk->sk_write_queue);
1245
Eric W. Biederman15e47302012-09-07 20:12:54 +00001246 if (nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 struct netlink_notify n = {
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001248 .net = sock_net(sk),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 .protocol = sk->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001250 .portid = nlk->portid,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 };
Alan Sterne041c682006-03-27 01:16:30 -08001252 atomic_notifier_call_chain(&netlink_chain,
1253 NETLINK_URELEASE, &n);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001254 }
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001255
Mariusz Kozlowski5e7c0012007-01-02 15:24:30 -08001256 module_put(nlk->module);
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001257
Patrick McHardy4277a082006-03-20 18:52:01 -08001258 netlink_table_grab();
Denis V. Lunevaed81562007-10-10 21:14:32 -07001259 if (netlink_is_kernel(sk)) {
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001260 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1261 if (--nl_table[sk->sk_protocol].registered == 0) {
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001262 struct listeners *old;
1263
1264 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1265 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1266 kfree_rcu(old, rcu);
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001267 nl_table[sk->sk_protocol].module = NULL;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001268 nl_table[sk->sk_protocol].bind = NULL;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001269 nl_table[sk->sk_protocol].unbind = NULL;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001270 nl_table[sk->sk_protocol].flags = 0;
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001271 nl_table[sk->sk_protocol].registered = 0;
1272 }
Eric Dumazet658cb352012-04-22 21:30:21 +00001273 } else if (nlk->subscriptions) {
Patrick McHardy4277a082006-03-20 18:52:01 -08001274 netlink_update_listeners(sk);
Eric Dumazet658cb352012-04-22 21:30:21 +00001275 }
Patrick McHardy4277a082006-03-20 18:52:01 -08001276 netlink_table_ungrab();
Patrick McHardy77247bb2005-08-14 19:27:13 -07001277
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001278 kfree(nlk->groups);
1279 nlk->groups = NULL;
1280
Eric Dumazet37558102008-11-24 14:05:22 -08001281 local_bh_disable();
Eric Dumazetc1fd3b92008-11-23 15:48:22 -08001282 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
Eric Dumazet37558102008-11-24 14:05:22 -08001283 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 sock_put(sk);
1285 return 0;
1286}
1287
1288static int netlink_autobind(struct socket *sock)
1289{
1290 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001291 struct net *net = sock_net(sk);
Gao fengda12c902013-06-06 14:49:11 +08001292 struct netlink_table *table = &nl_table[sk->sk_protocol];
Eric W. Biederman15e47302012-09-07 20:12:54 +00001293 s32 portid = task_tgid_vnr(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 int err;
1295 static s32 rover = -4097;
1296
1297retry:
1298 cond_resched();
Thomas Graf78fd1d02014-10-21 22:05:38 +02001299 netlink_table_grab();
Thomas Grafe3416942014-08-02 11:47:45 +02001300 rcu_read_lock();
1301 if (__netlink_lookup(table, portid, net)) {
1302 /* Bind collision, search negative portid values. */
1303 portid = rover--;
1304 if (rover > -4097)
1305 rover = -4097;
1306 rcu_read_unlock();
Thomas Graf78fd1d02014-10-21 22:05:38 +02001307 netlink_table_ungrab();
Thomas Grafe3416942014-08-02 11:47:45 +02001308 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 }
Thomas Grafe3416942014-08-02 11:47:45 +02001310 rcu_read_unlock();
Thomas Graf78fd1d02014-10-21 22:05:38 +02001311 netlink_table_ungrab();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312
Eric W. Biederman15e47302012-09-07 20:12:54 +00001313 err = netlink_insert(sk, net, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 if (err == -EADDRINUSE)
1315 goto retry;
David S. Millerd470e3b2005-06-26 15:31:51 -07001316
1317 /* If 2 threads race to autobind, that is fine. */
1318 if (err == -EBUSY)
1319 err = 0;
1320
1321 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322}
1323
Eric W. Biedermanaa4cf942014-04-23 14:28:03 -07001324/**
1325 * __netlink_ns_capable - General netlink message capability test
1326 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1327 * @user_ns: The user namespace of the capability to use
1328 * @cap: The capability to use
1329 *
1330 * Test to see if the opener of the socket we received the message
1331 * from had when the netlink socket was created and the sender of the
1332 * message has has the capability @cap in the user namespace @user_ns.
1333 */
1334bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1335 struct user_namespace *user_ns, int cap)
1336{
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07001337 return ((nsp->flags & NETLINK_SKB_DST) ||
1338 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1339 ns_capable(user_ns, cap);
Eric W. Biedermanaa4cf942014-04-23 14:28:03 -07001340}
1341EXPORT_SYMBOL(__netlink_ns_capable);
1342
1343/**
1344 * netlink_ns_capable - General netlink message capability test
1345 * @skb: socket buffer holding a netlink command from userspace
1346 * @user_ns: The user namespace of the capability to use
1347 * @cap: The capability to use
1348 *
1349 * Test to see if the opener of the socket we received the message
1350 * from had when the netlink socket was created and the sender of the
1351 * message has has the capability @cap in the user namespace @user_ns.
1352 */
1353bool netlink_ns_capable(const struct sk_buff *skb,
1354 struct user_namespace *user_ns, int cap)
1355{
1356 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
1357}
1358EXPORT_SYMBOL(netlink_ns_capable);
1359
1360/**
1361 * netlink_capable - Netlink global message capability test
1362 * @skb: socket buffer holding a netlink command from userspace
1363 * @cap: The capability to use
1364 *
1365 * Test to see if the opener of the socket we received the message
1366 * from had when the netlink socket was created and the sender of the
1367 * message has has the capability @cap in all user namespaces.
1368 */
1369bool netlink_capable(const struct sk_buff *skb, int cap)
1370{
1371 return netlink_ns_capable(skb, &init_user_ns, cap);
1372}
1373EXPORT_SYMBOL(netlink_capable);
1374
1375/**
1376 * netlink_net_capable - Netlink network namespace message capability test
1377 * @skb: socket buffer holding a netlink command from userspace
1378 * @cap: The capability to use
1379 *
1380 * Test to see if the opener of the socket we received the message
1381 * from had when the netlink socket was created and the sender of the
1382 * message has has the capability @cap over the network namespace of
1383 * the socket we received the message from.
1384 */
1385bool netlink_net_capable(const struct sk_buff *skb, int cap)
1386{
1387 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
1388}
1389EXPORT_SYMBOL(netlink_net_capable);
1390
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001391static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001392{
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001393 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
Eric W. Biedermandf008c92012-11-16 03:03:07 +00001394 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001395}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001397static void
1398netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1399{
1400 struct netlink_sock *nlk = nlk_sk(sk);
1401
1402 if (nlk->subscriptions && !subscriptions)
1403 __sk_del_bind_node(sk);
1404 else if (!nlk->subscriptions && subscriptions)
1405 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1406 nlk->subscriptions = subscriptions;
1407}
1408
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001409static int netlink_realloc_groups(struct sock *sk)
Patrick McHardy513c2502005-09-06 15:43:59 -07001410{
1411 struct netlink_sock *nlk = nlk_sk(sk);
1412 unsigned int groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001413 unsigned long *new_groups;
Patrick McHardy513c2502005-09-06 15:43:59 -07001414 int err = 0;
1415
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001416 netlink_table_grab();
1417
Patrick McHardy513c2502005-09-06 15:43:59 -07001418 groups = nl_table[sk->sk_protocol].groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001419 if (!nl_table[sk->sk_protocol].registered) {
Patrick McHardy513c2502005-09-06 15:43:59 -07001420 err = -ENOENT;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001421 goto out_unlock;
1422 }
Patrick McHardy513c2502005-09-06 15:43:59 -07001423
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001424 if (nlk->ngroups >= groups)
1425 goto out_unlock;
Patrick McHardy513c2502005-09-06 15:43:59 -07001426
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001427 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1428 if (new_groups == NULL) {
1429 err = -ENOMEM;
1430 goto out_unlock;
1431 }
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001432 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001433 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1434
1435 nlk->groups = new_groups;
Patrick McHardy513c2502005-09-06 15:43:59 -07001436 nlk->ngroups = groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001437 out_unlock:
1438 netlink_table_ungrab();
1439 return err;
Patrick McHardy513c2502005-09-06 15:43:59 -07001440}
1441
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001442static void netlink_unbind(int group, long unsigned int groups,
1443 struct netlink_sock *nlk)
1444{
1445 int undo;
1446
1447 if (!nlk->netlink_unbind)
1448 return;
1449
1450 for (undo = 0; undo < group; undo++)
Hiroaki SHIMODA6251edd2014-11-13 04:24:10 +09001451 if (test_bit(undo, &groups))
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001452 nlk->netlink_unbind(undo);
1453}
1454
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001455static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1456 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457{
1458 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001459 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 struct netlink_sock *nlk = nlk_sk(sk);
1461 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1462 int err;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001463 long unsigned int groups = nladdr->nl_groups;
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001464
Hannes Frederic Sowa4e4b5372012-12-15 15:42:19 +00001465 if (addr_len < sizeof(struct sockaddr_nl))
1466 return -EINVAL;
1467
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 if (nladdr->nl_family != AF_NETLINK)
1469 return -EINVAL;
1470
1471 /* Only superuser is allowed to listen multicasts */
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001472 if (groups) {
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001473 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
Patrick McHardy513c2502005-09-06 15:43:59 -07001474 return -EPERM;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001475 err = netlink_realloc_groups(sk);
1476 if (err)
1477 return err;
Patrick McHardy513c2502005-09-06 15:43:59 -07001478 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001480 if (nlk->portid)
Eric W. Biederman15e47302012-09-07 20:12:54 +00001481 if (nladdr->nl_pid != nlk->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 return -EINVAL;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001483
1484 if (nlk->netlink_bind && groups) {
1485 int group;
1486
1487 for (group = 0; group < nlk->ngroups; group++) {
1488 if (!test_bit(group, &groups))
1489 continue;
1490 err = nlk->netlink_bind(group);
1491 if (!err)
1492 continue;
1493 netlink_unbind(group, groups, nlk);
1494 return err;
1495 }
1496 }
1497
1498 if (!nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 err = nladdr->nl_pid ?
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001500 netlink_insert(sk, net, nladdr->nl_pid) :
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 netlink_autobind(sock);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001502 if (err) {
Hiroaki SHIMODA6251edd2014-11-13 04:24:10 +09001503 netlink_unbind(nlk->ngroups, groups, nlk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 return err;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001505 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 }
1507
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001508 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 return 0;
1510
1511 netlink_table_grab();
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001512 netlink_update_subscriptions(sk, nlk->subscriptions +
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001513 hweight32(groups) -
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001514 hweight32(nlk->groups[0]));
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001515 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
Patrick McHardy4277a082006-03-20 18:52:01 -08001516 netlink_update_listeners(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 netlink_table_ungrab();
1518
1519 return 0;
1520}
1521
1522static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1523 int alen, int flags)
1524{
1525 int err = 0;
1526 struct sock *sk = sock->sk;
1527 struct netlink_sock *nlk = nlk_sk(sk);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001528 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529
Changli Gao6503d962010-03-31 22:58:26 +00001530 if (alen < sizeof(addr->sa_family))
1531 return -EINVAL;
1532
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 if (addr->sa_family == AF_UNSPEC) {
1534 sk->sk_state = NETLINK_UNCONNECTED;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001535 nlk->dst_portid = 0;
Patrick McHardyd629b832005-08-14 19:27:50 -07001536 nlk->dst_group = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 return 0;
1538 }
1539 if (addr->sa_family != AF_NETLINK)
1540 return -EINVAL;
1541
Mike Pecovnik46833a82014-02-24 21:11:16 +01001542 if ((nladdr->nl_groups || nladdr->nl_pid) &&
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001543 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 return -EPERM;
1545
Eric W. Biederman15e47302012-09-07 20:12:54 +00001546 if (!nlk->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 err = netlink_autobind(sock);
1548
1549 if (err == 0) {
1550 sk->sk_state = NETLINK_CONNECTED;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001551 nlk->dst_portid = nladdr->nl_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -07001552 nlk->dst_group = ffs(nladdr->nl_groups);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 }
1554
1555 return err;
1556}
1557
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001558static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1559 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560{
1561 struct sock *sk = sock->sk;
1562 struct netlink_sock *nlk = nlk_sk(sk);
Cyrill Gorcunov13cfa972009-11-08 05:51:19 +00001563 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001564
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 nladdr->nl_family = AF_NETLINK;
1566 nladdr->nl_pad = 0;
1567 *addr_len = sizeof(*nladdr);
1568
1569 if (peer) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001570 nladdr->nl_pid = nlk->dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07001571 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 } else {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001573 nladdr->nl_pid = nlk->portid;
Patrick McHardy513c2502005-09-06 15:43:59 -07001574 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 }
1576 return 0;
1577}
1578
Eric W. Biederman15e47302012-09-07 20:12:54 +00001579static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 struct sock *sock;
1582 struct netlink_sock *nlk;
1583
Eric W. Biederman15e47302012-09-07 20:12:54 +00001584 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 if (!sock)
1586 return ERR_PTR(-ECONNREFUSED);
1587
1588 /* Don't bother queuing skb if kernel socket has no input function */
1589 nlk = nlk_sk(sock);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001590 if (sock->sk_state == NETLINK_CONNECTED &&
Eric W. Biederman15e47302012-09-07 20:12:54 +00001591 nlk->dst_portid != nlk_sk(ssk)->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 sock_put(sock);
1593 return ERR_PTR(-ECONNREFUSED);
1594 }
1595 return sock;
1596}
1597
1598struct sock *netlink_getsockbyfilp(struct file *filp)
1599{
Al Viro496ad9a2013-01-23 17:07:38 -05001600 struct inode *inode = file_inode(filp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 struct sock *sock;
1602
1603 if (!S_ISSOCK(inode->i_mode))
1604 return ERR_PTR(-ENOTSOCK);
1605
1606 sock = SOCKET_I(inode)->sk;
1607 if (sock->sk_family != AF_NETLINK)
1608 return ERR_PTR(-EINVAL);
1609
1610 sock_hold(sock);
1611 return sock;
1612}
1613
Pablo Neira3a365152013-06-28 03:04:23 +02001614static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1615 int broadcast)
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001616{
1617 struct sk_buff *skb;
1618 void *data;
1619
Pablo Neira3a365152013-06-28 03:04:23 +02001620 if (size <= NLMSG_GOODSIZE || broadcast)
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001621 return alloc_skb(size, GFP_KERNEL);
1622
Pablo Neira3a365152013-06-28 03:04:23 +02001623 size = SKB_DATA_ALIGN(size) +
1624 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001625
1626 data = vmalloc(size);
1627 if (data == NULL)
Pablo Neira3a365152013-06-28 03:04:23 +02001628 return NULL;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001629
Eric Dumazet0ff99ba2015-04-24 16:05:01 -07001630 skb = __build_skb(data, size);
Pablo Neira3a365152013-06-28 03:04:23 +02001631 if (skb == NULL)
1632 vfree(data);
Eric Dumazet0ff99ba2015-04-24 16:05:01 -07001633 else
Pablo Neira3a365152013-06-28 03:04:23 +02001634 skb->destructor = netlink_skb_destructor;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001635
1636 return skb;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001637}
1638
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639/*
1640 * Attach a skb to a netlink socket.
1641 * The caller must hold a reference to the destination socket. On error, the
1642 * reference is dropped. The skb is not send to the destination, just all
1643 * all error checks are performed and memory in the queue is reserved.
1644 * Return values:
1645 * < 0: error. skb freed, reference to sock dropped.
1646 * 0: continue
1647 * 1: repeat lookup - reference dropped while waiting for socket memory.
1648 */
Denis V. Lunev9457afe2008-06-05 11:23:39 -07001649int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001650 long *timeo, struct sock *ssk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651{
1652 struct netlink_sock *nlk;
1653
1654 nlk = nlk_sk(sk);
1655
Patrick McHardy5fd96122013-04-17 06:47:03 +00001656 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1657 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1658 !netlink_skb_is_mmaped(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 DECLARE_WAITQUEUE(wait, current);
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001660 if (!*timeo) {
Denis V. Lunevaed81562007-10-10 21:14:32 -07001661 if (!ssk || netlink_is_kernel(ssk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 netlink_overrun(sk);
1663 sock_put(sk);
1664 kfree_skb(skb);
1665 return -EAGAIN;
1666 }
1667
1668 __set_current_state(TASK_INTERRUPTIBLE);
1669 add_wait_queue(&nlk->wait, &wait);
1670
1671 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
Patrick McHardycd967e02013-04-17 06:46:56 +00001672 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 !sock_flag(sk, SOCK_DEAD))
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001674 *timeo = schedule_timeout(*timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675
1676 __set_current_state(TASK_RUNNING);
1677 remove_wait_queue(&nlk->wait, &wait);
1678 sock_put(sk);
1679
1680 if (signal_pending(current)) {
1681 kfree_skb(skb);
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001682 return sock_intr_errno(*timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 }
1684 return 1;
1685 }
Patrick McHardycf0a0182013-04-17 06:47:00 +00001686 netlink_skb_set_owner_r(skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 return 0;
1688}
1689
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001690static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 int len = skb->len;
1693
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +02001694 netlink_deliver_tap(skb);
1695
Patrick McHardyf9c22882013-04-17 06:47:04 +00001696#ifdef CONFIG_NETLINK_MMAP
1697 if (netlink_skb_is_mmaped(skb))
1698 netlink_queue_mmaped_skb(sk, skb);
1699 else if (netlink_rx_is_mmaped(sk))
1700 netlink_ring_set_copied(sk, skb);
1701 else
1702#endif /* CONFIG_NETLINK_MMAP */
1703 skb_queue_tail(&sk->sk_receive_queue, skb);
David S. Miller676d2362014-04-11 16:15:36 -04001704 sk->sk_data_ready(sk);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001705 return len;
1706}
1707
1708int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1709{
1710 int len = __netlink_sendskb(sk, skb);
1711
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 sock_put(sk);
1713 return len;
1714}
1715
1716void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1717{
1718 kfree_skb(skb);
1719 sock_put(sk);
1720}
1721
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00001722static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723{
1724 int delta;
1725
Patrick McHardy1298ca42013-04-17 06:46:59 +00001726 WARN_ON(skb->sk != NULL);
Patrick McHardy5fd96122013-04-17 06:47:03 +00001727 if (netlink_skb_is_mmaped(skb))
1728 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001730 delta = skb->end - skb->tail;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001731 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 return skb;
1733
1734 if (skb_shared(skb)) {
1735 struct sk_buff *nskb = skb_clone(skb, allocation);
1736 if (!nskb)
1737 return skb;
Eric Dumazet8460c002012-04-19 02:24:28 +00001738 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 skb = nskb;
1740 }
1741
1742 if (!pskb_expand_head(skb, 0, -delta, allocation))
1743 skb->truesize -= delta;
1744
1745 return skb;
1746}
1747
Eric W. Biederman3fbc2902012-05-24 17:21:27 -06001748static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1749 struct sock *ssk)
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001750{
1751 int ret;
1752 struct netlink_sock *nlk = nlk_sk(sk);
1753
1754 ret = -ECONNREFUSED;
1755 if (nlk->netlink_rcv != NULL) {
1756 ret = skb->len;
Patrick McHardycf0a0182013-04-17 06:47:00 +00001757 netlink_skb_set_owner_r(skb, sk);
Patrick McHardye32123e2013-04-17 06:46:57 +00001758 NETLINK_CB(skb).sk = ssk;
Daniel Borkmann73bfd372013-12-23 14:35:55 +01001759 netlink_deliver_tap_kernel(sk, ssk, skb);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001760 nlk->netlink_rcv(skb);
Eric Dumazetbfb253c2012-04-22 21:30:29 +00001761 consume_skb(skb);
1762 } else {
1763 kfree_skb(skb);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001764 }
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001765 sock_put(sk);
1766 return ret;
1767}
1768
1769int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001770 u32 portid, int nonblock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771{
1772 struct sock *sk;
1773 int err;
1774 long timeo;
1775
1776 skb = netlink_trim(skb, gfp_any());
1777
1778 timeo = sock_sndtimeo(ssk, nonblock);
1779retry:
Eric W. Biederman15e47302012-09-07 20:12:54 +00001780 sk = netlink_getsockbyportid(ssk, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 if (IS_ERR(sk)) {
1782 kfree_skb(skb);
1783 return PTR_ERR(sk);
1784 }
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001785 if (netlink_is_kernel(sk))
Eric W. Biederman3fbc2902012-05-24 17:21:27 -06001786 return netlink_unicast_kernel(sk, skb, ssk);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001787
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001788 if (sk_filter(sk, skb)) {
Wang Chen84874602008-07-01 19:55:09 -07001789 err = skb->len;
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001790 kfree_skb(skb);
1791 sock_put(sk);
1792 return err;
1793 }
1794
Denis V. Lunev9457afe2008-06-05 11:23:39 -07001795 err = netlink_attachskb(sk, skb, &timeo, ssk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 if (err == 1)
1797 goto retry;
1798 if (err)
1799 return err;
1800
Denis V. Lunev7ee015e2007-10-10 21:14:03 -07001801 return netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001803EXPORT_SYMBOL(netlink_unicast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804
Patrick McHardyf9c22882013-04-17 06:47:04 +00001805struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1806 u32 dst_portid, gfp_t gfp_mask)
1807{
1808#ifdef CONFIG_NETLINK_MMAP
1809 struct sock *sk = NULL;
1810 struct sk_buff *skb;
1811 struct netlink_ring *ring;
1812 struct nl_mmap_hdr *hdr;
1813 unsigned int maxlen;
1814
1815 sk = netlink_getsockbyportid(ssk, dst_portid);
1816 if (IS_ERR(sk))
1817 goto out;
1818
1819 ring = &nlk_sk(sk)->rx_ring;
1820 /* fast-path without atomic ops for common case: non-mmaped receiver */
1821 if (ring->pg_vec == NULL)
1822 goto out_put;
1823
Thomas Grafaae9f0e2013-11-30 13:21:31 +01001824 if (ring->frame_size - NL_MMAP_HDRLEN < size)
1825 goto out_put;
1826
Patrick McHardyf9c22882013-04-17 06:47:04 +00001827 skb = alloc_skb_head(gfp_mask);
1828 if (skb == NULL)
1829 goto err1;
1830
1831 spin_lock_bh(&sk->sk_receive_queue.lock);
1832 /* check again under lock */
1833 if (ring->pg_vec == NULL)
1834 goto out_free;
1835
Thomas Grafaae9f0e2013-11-30 13:21:31 +01001836 /* check again under lock */
Patrick McHardyf9c22882013-04-17 06:47:04 +00001837 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1838 if (maxlen < size)
1839 goto out_free;
1840
1841 netlink_forward_ring(ring);
1842 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1843 if (hdr == NULL)
1844 goto err2;
1845 netlink_ring_setup_skb(skb, sk, ring, hdr);
1846 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1847 atomic_inc(&ring->pending);
1848 netlink_increment_head(ring);
1849
1850 spin_unlock_bh(&sk->sk_receive_queue.lock);
1851 return skb;
1852
1853err2:
1854 kfree_skb(skb);
1855 spin_unlock_bh(&sk->sk_receive_queue.lock);
Patrick McHardycd1df522013-04-17 06:47:05 +00001856 netlink_overrun(sk);
Patrick McHardyf9c22882013-04-17 06:47:04 +00001857err1:
1858 sock_put(sk);
1859 return NULL;
1860
1861out_free:
1862 kfree_skb(skb);
1863 spin_unlock_bh(&sk->sk_receive_queue.lock);
1864out_put:
1865 sock_put(sk);
1866out:
1867#endif
1868 return alloc_skb(size, gfp_mask);
1869}
1870EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1871
Patrick McHardy4277a082006-03-20 18:52:01 -08001872int netlink_has_listeners(struct sock *sk, unsigned int group)
1873{
1874 int res = 0;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00001875 struct listeners *listeners;
Patrick McHardy4277a082006-03-20 18:52:01 -08001876
Denis V. Lunevaed81562007-10-10 21:14:32 -07001877 BUG_ON(!netlink_is_kernel(sk));
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001878
1879 rcu_read_lock();
1880 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1881
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001882 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
Eric Dumazet5c398dc2010-10-24 04:27:10 +00001883 res = test_bit(group - 1, listeners->masks);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001884
1885 rcu_read_unlock();
1886
Patrick McHardy4277a082006-03-20 18:52:01 -08001887 return res;
1888}
1889EXPORT_SYMBOL_GPL(netlink_has_listeners);
1890
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00001891static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892{
1893 struct netlink_sock *nlk = nlk_sk(sk);
1894
1895 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
Patrick McHardycd967e02013-04-17 06:46:56 +00001896 !test_bit(NETLINK_CONGESTED, &nlk->state)) {
Patrick McHardycf0a0182013-04-17 06:47:00 +00001897 netlink_skb_set_owner_r(skb, sk);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001898 __netlink_sendskb(sk, skb);
stephen hemminger2c6458002011-12-22 08:52:03 +00001899 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 }
1901 return -1;
1902}
1903
1904struct netlink_broadcast_data {
1905 struct sock *exclude_sk;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001906 struct net *net;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001907 u32 portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 u32 group;
1909 int failure;
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08001910 int delivery_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 int congested;
1912 int delivered;
Al Viro7d877f32005-10-21 03:20:43 -04001913 gfp_t allocation;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 struct sk_buff *skb, *skb2;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001915 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1916 void *tx_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917};
1918
Rami Rosen46c95212014-07-01 21:17:35 +03001919static void do_one_broadcast(struct sock *sk,
1920 struct netlink_broadcast_data *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921{
1922 struct netlink_sock *nlk = nlk_sk(sk);
1923 int val;
1924
1925 if (p->exclude_sk == sk)
Rami Rosen46c95212014-07-01 21:17:35 +03001926 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927
Eric W. Biederman15e47302012-09-07 20:12:54 +00001928 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001929 !test_bit(p->group - 1, nlk->groups))
Rami Rosen46c95212014-07-01 21:17:35 +03001930 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09001932 if (!net_eq(sock_net(sk), p->net))
Rami Rosen46c95212014-07-01 21:17:35 +03001933 return;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001934
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 if (p->failure) {
1936 netlink_overrun(sk);
Rami Rosen46c95212014-07-01 21:17:35 +03001937 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 }
1939
1940 sock_hold(sk);
1941 if (p->skb2 == NULL) {
Tommy S. Christensen68acc022005-05-19 13:06:35 -07001942 if (skb_shared(p->skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943 p->skb2 = skb_clone(p->skb, p->allocation);
1944 } else {
Tommy S. Christensen68acc022005-05-19 13:06:35 -07001945 p->skb2 = skb_get(p->skb);
1946 /*
1947 * skb ownership may have been set when
1948 * delivered to a previous socket.
1949 */
1950 skb_orphan(p->skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 }
1952 }
1953 if (p->skb2 == NULL) {
1954 netlink_overrun(sk);
1955 /* Clone failed. Notify ALL listeners. */
1956 p->failure = 1;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00001957 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1958 p->delivery_failure = 1;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001959 } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1960 kfree_skb(p->skb2);
1961 p->skb2 = NULL;
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001962 } else if (sk_filter(sk, p->skb2)) {
1963 kfree_skb(p->skb2);
1964 p->skb2 = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1966 netlink_overrun(sk);
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00001967 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1968 p->delivery_failure = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 } else {
1970 p->congested |= val;
1971 p->delivered = 1;
1972 p->skb2 = NULL;
1973 }
1974 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975}
1976
Eric W. Biederman15e47302012-09-07 20:12:54 +00001977int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001978 u32 group, gfp_t allocation,
1979 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1980 void *filter_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001982 struct net *net = sock_net(ssk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 struct netlink_broadcast_data info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 struct sock *sk;
1985
1986 skb = netlink_trim(skb, allocation);
1987
1988 info.exclude_sk = ssk;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001989 info.net = net;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001990 info.portid = portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 info.group = group;
1992 info.failure = 0;
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08001993 info.delivery_failure = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 info.congested = 0;
1995 info.delivered = 0;
1996 info.allocation = allocation;
1997 info.skb = skb;
1998 info.skb2 = NULL;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001999 info.tx_filter = filter;
2000 info.tx_data = filter_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001
2002 /* While we sleep in clone, do not allow to change socket list */
2003
2004 netlink_lock_table();
2005
Sasha Levinb67bfe02013-02-27 17:06:00 -08002006 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 do_one_broadcast(sk, &info);
2008
Neil Horman70d4bf62010-07-20 06:45:56 +00002009 consume_skb(skb);
Tommy S. Christensenaa1c6a6f2005-05-19 13:07:32 -07002010
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 netlink_unlock_table();
2012
Neil Horman70d4bf62010-07-20 06:45:56 +00002013 if (info.delivery_failure) {
2014 kfree_skb(info.skb2);
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08002015 return -ENOBUFS;
Eric Dumazet658cb352012-04-22 21:30:21 +00002016 }
2017 consume_skb(info.skb2);
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08002018
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 if (info.delivered) {
2020 if (info.congested && (allocation & __GFP_WAIT))
2021 yield();
2022 return 0;
2023 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024 return -ESRCH;
2025}
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002026EXPORT_SYMBOL(netlink_broadcast_filtered);
2027
Eric W. Biederman15e47302012-09-07 20:12:54 +00002028int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002029 u32 group, gfp_t allocation)
2030{
Eric W. Biederman15e47302012-09-07 20:12:54 +00002031 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002032 NULL, NULL);
2033}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002034EXPORT_SYMBOL(netlink_broadcast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035
2036struct netlink_set_err_data {
2037 struct sock *exclude_sk;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002038 u32 portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 u32 group;
2040 int code;
2041};
2042
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00002043static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044{
2045 struct netlink_sock *nlk = nlk_sk(sk);
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002046 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047
2048 if (sk == p->exclude_sk)
2049 goto out;
2050
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08002051 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002052 goto out;
2053
Eric W. Biederman15e47302012-09-07 20:12:54 +00002054 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07002055 !test_bit(p->group - 1, nlk->groups))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056 goto out;
2057
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002058 if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
2059 ret = 1;
2060 goto out;
2061 }
2062
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 sk->sk_err = p->code;
2064 sk->sk_error_report(sk);
2065out:
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002066 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067}
2068
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002069/**
2070 * netlink_set_err - report error to broadcast listeners
2071 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
Eric W. Biederman15e47302012-09-07 20:12:54 +00002072 * @portid: the PORTID of a process that we want to skip (if any)
Johannes Berg840e93f22013-11-19 10:35:40 +01002073 * @group: the broadcast group that will notice the error
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002074 * @code: error code, must be negative (as usual in kernelspace)
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002075 *
2076 * This function returns the number of broadcast listeners that have set the
2077 * NETLINK_RECV_NO_ENOBUFS socket option.
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002078 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002079int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080{
2081 struct netlink_set_err_data info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 struct sock *sk;
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002083 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084
2085 info.exclude_sk = ssk;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002086 info.portid = portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 info.group = group;
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002088 /* sk->sk_err wants a positive error value */
2089 info.code = -code;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090
2091 read_lock(&nl_table_lock);
2092
Sasha Levinb67bfe02013-02-27 17:06:00 -08002093 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002094 ret += do_one_set_err(sk, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095
2096 read_unlock(&nl_table_lock);
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002097 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098}
Pablo Neira Ayusodd5b6ce2009-03-23 13:21:06 +01002099EXPORT_SYMBOL(netlink_set_err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100
Johannes Berg84659eb2007-07-18 15:47:05 -07002101/* must be called with netlink table grabbed */
2102static void netlink_update_socket_mc(struct netlink_sock *nlk,
2103 unsigned int group,
2104 int is_new)
2105{
2106 int old, new = !!is_new, subscriptions;
2107
2108 old = test_bit(group - 1, nlk->groups);
2109 subscriptions = nlk->subscriptions - old + new;
2110 if (new)
2111 __set_bit(group - 1, nlk->groups);
2112 else
2113 __clear_bit(group - 1, nlk->groups);
2114 netlink_update_subscriptions(&nlk->sk, subscriptions);
2115 netlink_update_listeners(&nlk->sk);
2116}
2117
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002118static int netlink_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002119 char __user *optval, unsigned int optlen)
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002120{
2121 struct sock *sk = sock->sk;
2122 struct netlink_sock *nlk = nlk_sk(sk);
Johannes Bergeb496532007-07-18 02:07:51 -07002123 unsigned int val = 0;
2124 int err;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002125
2126 if (level != SOL_NETLINK)
2127 return -ENOPROTOOPT;
2128
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002129 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2130 optlen >= sizeof(int) &&
Johannes Bergeb496532007-07-18 02:07:51 -07002131 get_user(val, (unsigned int __user *)optval))
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002132 return -EFAULT;
2133
2134 switch (optname) {
2135 case NETLINK_PKTINFO:
2136 if (val)
2137 nlk->flags |= NETLINK_RECV_PKTINFO;
2138 else
2139 nlk->flags &= ~NETLINK_RECV_PKTINFO;
2140 err = 0;
2141 break;
2142 case NETLINK_ADD_MEMBERSHIP:
2143 case NETLINK_DROP_MEMBERSHIP: {
Eric W. Biederman5187cd02014-04-23 14:25:48 -07002144 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002145 return -EPERM;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002146 err = netlink_realloc_groups(sk);
2147 if (err)
2148 return err;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002149 if (!val || val - 1 >= nlk->ngroups)
2150 return -EINVAL;
Richard Guy Briggs7774d5e2014-04-22 21:31:55 -04002151 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
Richard Guy Briggs4f520902014-04-22 21:31:54 -04002152 err = nlk->netlink_bind(val);
2153 if (err)
2154 return err;
2155 }
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002156 netlink_table_grab();
Johannes Berg84659eb2007-07-18 15:47:05 -07002157 netlink_update_socket_mc(nlk, val,
2158 optname == NETLINK_ADD_MEMBERSHIP);
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002159 netlink_table_ungrab();
Richard Guy Briggs7774d5e2014-04-22 21:31:55 -04002160 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
2161 nlk->netlink_unbind(val);
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00002162
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002163 err = 0;
2164 break;
2165 }
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002166 case NETLINK_BROADCAST_ERROR:
2167 if (val)
2168 nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
2169 else
2170 nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
2171 err = 0;
2172 break;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002173 case NETLINK_NO_ENOBUFS:
2174 if (val) {
2175 nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
Patrick McHardycd967e02013-04-17 06:46:56 +00002176 clear_bit(NETLINK_CONGESTED, &nlk->state);
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002177 wake_up_interruptible(&nlk->wait);
Eric Dumazet658cb352012-04-22 21:30:21 +00002178 } else {
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002179 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
Eric Dumazet658cb352012-04-22 21:30:21 +00002180 }
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002181 err = 0;
2182 break;
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002183#ifdef CONFIG_NETLINK_MMAP
2184 case NETLINK_RX_RING:
2185 case NETLINK_TX_RING: {
2186 struct nl_mmap_req req;
2187
2188 /* Rings might consume more memory than queue limits, require
2189 * CAP_NET_ADMIN.
2190 */
2191 if (!capable(CAP_NET_ADMIN))
2192 return -EPERM;
2193 if (optlen < sizeof(req))
2194 return -EINVAL;
2195 if (copy_from_user(&req, optval, sizeof(req)))
2196 return -EFAULT;
Florian Westphal6c897d82015-07-21 16:33:50 +02002197 err = netlink_set_ring(sk, &req,
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002198 optname == NETLINK_TX_RING);
2199 break;
2200 }
2201#endif /* CONFIG_NETLINK_MMAP */
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002202 default:
2203 err = -ENOPROTOOPT;
2204 }
2205 return err;
2206}
2207
2208static int netlink_getsockopt(struct socket *sock, int level, int optname,
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002209 char __user *optval, int __user *optlen)
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002210{
2211 struct sock *sk = sock->sk;
2212 struct netlink_sock *nlk = nlk_sk(sk);
2213 int len, val, err;
2214
2215 if (level != SOL_NETLINK)
2216 return -ENOPROTOOPT;
2217
2218 if (get_user(len, optlen))
2219 return -EFAULT;
2220 if (len < 0)
2221 return -EINVAL;
2222
2223 switch (optname) {
2224 case NETLINK_PKTINFO:
2225 if (len < sizeof(int))
2226 return -EINVAL;
2227 len = sizeof(int);
2228 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
Heiko Carstensa27b58f2006-10-30 15:06:12 -08002229 if (put_user(len, optlen) ||
2230 put_user(val, optval))
2231 return -EFAULT;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002232 err = 0;
2233 break;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002234 case NETLINK_BROADCAST_ERROR:
2235 if (len < sizeof(int))
2236 return -EINVAL;
2237 len = sizeof(int);
2238 val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
2239 if (put_user(len, optlen) ||
2240 put_user(val, optval))
2241 return -EFAULT;
2242 err = 0;
2243 break;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002244 case NETLINK_NO_ENOBUFS:
2245 if (len < sizeof(int))
2246 return -EINVAL;
2247 len = sizeof(int);
2248 val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
2249 if (put_user(len, optlen) ||
2250 put_user(val, optval))
2251 return -EFAULT;
2252 err = 0;
2253 break;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002254 default:
2255 err = -ENOPROTOOPT;
2256 }
2257 return err;
2258}
2259
2260static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2261{
2262 struct nl_pktinfo info;
2263
2264 info.group = NETLINK_CB(skb).dst_group;
2265 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2266}
2267
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
2269 struct msghdr *msg, size_t len)
2270{
2271 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2272 struct sock *sk = sock->sk;
2273 struct netlink_sock *nlk = nlk_sk(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +01002274 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002275 u32 dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002276 u32 dst_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 struct sk_buff *skb;
2278 int err;
2279 struct scm_cookie scm;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002280 u32 netlink_skb_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281
2282 if (msg->msg_flags&MSG_OOB)
2283 return -EOPNOTSUPP;
2284
Eric Dumazet16e57262011-09-19 05:52:27 +00002285 if (NULL == siocb->scm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 siocb->scm = &scm;
Eric Dumazet16e57262011-09-19 05:52:27 +00002287
Eric Dumazete0e3cea2012-08-21 06:21:17 +00002288 err = scm_send(sock, msg, siocb->scm, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289 if (err < 0)
2290 return err;
2291
2292 if (msg->msg_namelen) {
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002293 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294 if (addr->nl_family != AF_NETLINK)
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002295 goto out;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002296 dst_portid = addr->nl_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002297 dst_group = ffs(addr->nl_groups);
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002298 err = -EPERM;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002299 if ((dst_group || dst_portid) &&
Eric W. Biederman5187cd02014-04-23 14:25:48 -07002300 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002301 goto out;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002302 netlink_skb_flags |= NETLINK_SKB_DST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 } else {
Eric W. Biederman15e47302012-09-07 20:12:54 +00002304 dst_portid = nlk->dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002305 dst_group = nlk->dst_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 }
2307
Eric W. Biederman15e47302012-09-07 20:12:54 +00002308 if (!nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 err = netlink_autobind(sock);
2310 if (err)
2311 goto out;
2312 }
2313
Patrick McHardy5fd96122013-04-17 06:47:03 +00002314 if (netlink_tx_is_mmaped(sk) &&
2315 msg->msg_iov->iov_base == NULL) {
2316 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
2317 siocb);
2318 goto out;
2319 }
2320
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 err = -EMSGSIZE;
2322 if (len > sk->sk_sndbuf - 32)
2323 goto out;
2324 err = -ENOBUFS;
Pablo Neira3a365152013-06-28 03:04:23 +02002325 skb = netlink_alloc_large_skb(len, dst_group);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002326 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327 goto out;
2328
Eric W. Biederman15e47302012-09-07 20:12:54 +00002329 NETLINK_CB(skb).portid = nlk->portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002330 NETLINK_CB(skb).dst_group = dst_group;
Eric W. Biedermandbe9a412012-09-06 18:20:01 +00002331 NETLINK_CB(skb).creds = siocb->scm->creds;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002332 NETLINK_CB(skb).flags = netlink_skb_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 err = -EFAULT;
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002335 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 kfree_skb(skb);
2337 goto out;
2338 }
2339
2340 err = security_netlink_send(sk, skb);
2341 if (err) {
2342 kfree_skb(skb);
2343 goto out;
2344 }
2345
Patrick McHardyd629b832005-08-14 19:27:50 -07002346 if (dst_group) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 atomic_inc(&skb->users);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002348 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 }
Eric W. Biederman15e47302012-09-07 20:12:54 +00002350 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351
2352out:
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002353 scm_destroy(siocb->scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 return err;
2355}
2356
2357static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2358 struct msghdr *msg, size_t len,
2359 int flags)
2360{
2361 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2362 struct scm_cookie scm;
2363 struct sock *sk = sock->sk;
2364 struct netlink_sock *nlk = nlk_sk(sk);
2365 int noblock = flags&MSG_DONTWAIT;
2366 size_t copied;
Johannes Berg68d6ac62010-08-15 21:20:44 +00002367 struct sk_buff *skb, *data_skb;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002368 int err, ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369
2370 if (flags&MSG_OOB)
2371 return -EOPNOTSUPP;
2372
2373 copied = 0;
2374
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002375 skb = skb_recv_datagram(sk, flags, noblock, &err);
2376 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 goto out;
2378
Johannes Berg68d6ac62010-08-15 21:20:44 +00002379 data_skb = skb;
2380
Johannes Berg1dacc762009-07-01 11:26:02 +00002381#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2382 if (unlikely(skb_shinfo(skb)->frag_list)) {
Johannes Berg1dacc762009-07-01 11:26:02 +00002383 /*
Johannes Berg68d6ac62010-08-15 21:20:44 +00002384 * If this skb has a frag_list, then here that means that we
2385 * will have to use the frag_list skb's data for compat tasks
2386 * and the regular skb's data for normal (non-compat) tasks.
Johannes Berg1dacc762009-07-01 11:26:02 +00002387 *
Johannes Berg68d6ac62010-08-15 21:20:44 +00002388 * If we need to send the compat skb, assign it to the
2389 * 'data_skb' variable so that it will be used below for data
2390 * copying. We keep 'skb' for everything else, including
2391 * freeing both later.
Johannes Berg1dacc762009-07-01 11:26:02 +00002392 */
Johannes Berg68d6ac62010-08-15 21:20:44 +00002393 if (flags & MSG_CMSG_COMPAT)
2394 data_skb = skb_shinfo(skb)->frag_list;
Johannes Berg1dacc762009-07-01 11:26:02 +00002395 }
2396#endif
2397
Eric Dumazet9063e212014-03-07 12:02:33 -08002398 /* Record the max length of recvmsg() calls for future allocations */
2399 nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
2400 nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
2401 16384);
2402
Johannes Berg68d6ac62010-08-15 21:20:44 +00002403 copied = data_skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 if (len < copied) {
2405 msg->msg_flags |= MSG_TRUNC;
2406 copied = len;
2407 }
2408
Johannes Berg68d6ac62010-08-15 21:20:44 +00002409 skb_reset_transport_header(data_skb);
2410 err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411
2412 if (msg->msg_name) {
Steffen Hurrle342dfc32014-01-17 22:53:15 +01002413 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 addr->nl_family = AF_NETLINK;
2415 addr->nl_pad = 0;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002416 addr->nl_pid = NETLINK_CB(skb).portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002417 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 msg->msg_namelen = sizeof(*addr);
2419 }
2420
Patrick McHardycc9a06c2006-03-12 20:34:27 -08002421 if (nlk->flags & NETLINK_RECV_PKTINFO)
2422 netlink_cmsg_recv_pktinfo(msg, skb);
2423
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 if (NULL == siocb->scm) {
2425 memset(&scm, 0, sizeof(scm));
2426 siocb->scm = &scm;
2427 }
2428 siocb->scm->creds = *NETLINK_CREDS(skb);
Patrick McHardy188ccb52007-05-03 03:27:01 -07002429 if (flags & MSG_TRUNC)
Johannes Berg68d6ac62010-08-15 21:20:44 +00002430 copied = data_skb->len;
David S. Millerdaa37662010-08-15 23:21:50 -07002431
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 skb_free_datagram(sk, skb);
2433
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002434 if (nlk->cb_running &&
2435 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
Andrey Vaginb44d2112011-02-21 02:40:47 +00002436 ret = netlink_dump(sk);
2437 if (ret) {
Ben Pfaffac30ef82014-07-09 10:31:22 -07002438 sk->sk_err = -ret;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002439 sk->sk_error_report(sk);
2440 }
2441 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442
2443 scm_recv(sock, msg, siocb->scm, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444out:
2445 netlink_rcv_wake(sk);
2446 return err ? : copied;
2447}
2448
David S. Miller676d2362014-04-11 16:15:36 -04002449static void netlink_data_ready(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450{
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002451 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452}
2453
2454/*
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002455 * We export these functions to other modules. They provide a
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 * complete set of kernel non-blocking support for message
2457 * queueing.
2458 */
2459
2460struct sock *
Pablo Neira Ayuso9f00d972012-09-08 02:53:54 +00002461__netlink_kernel_create(struct net *net, int unit, struct module *module,
2462 struct netlink_kernel_cfg *cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463{
2464 struct socket *sock;
2465 struct sock *sk;
Patrick McHardy77247bb2005-08-14 19:27:13 -07002466 struct netlink_sock *nlk;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002467 struct listeners *listeners = NULL;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002468 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2469 unsigned int groups;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470
Akinobu Mitafab2caf2006-08-29 02:15:24 -07002471 BUG_ON(!nl_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002473 if (unit < 0 || unit >= MAX_LINKS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474 return NULL;
2475
2476 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2477 return NULL;
2478
Pavel Emelyanov23fe1862008-01-30 19:31:06 -08002479 /*
2480 * We have to just have a reference on the net from sk, but don't
2481 * get_net it. Besides, we cannot get and then put the net here.
2482 * So we create one inside init_net and the move it to net.
2483 */
2484
2485 if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
2486 goto out_sock_release_nosk;
2487
2488 sk = sock->sk;
Denis V. Lunevedf02082008-02-29 11:18:32 -08002489 sk_change_net(sk, net);
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002490
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002491 if (!cfg || cfg->groups < 32)
Patrick McHardy4277a082006-03-20 18:52:01 -08002492 groups = 32;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002493 else
2494 groups = cfg->groups;
Patrick McHardy4277a082006-03-20 18:52:01 -08002495
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002496 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
Patrick McHardy4277a082006-03-20 18:52:01 -08002497 if (!listeners)
2498 goto out_sock_release;
2499
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 sk->sk_data_ready = netlink_data_ready;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002501 if (cfg && cfg->input)
2502 nlk_sk(sk)->netlink_rcv = cfg->input;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002504 if (netlink_insert(sk, net, 0))
Patrick McHardy77247bb2005-08-14 19:27:13 -07002505 goto out_sock_release;
2506
2507 nlk = nlk_sk(sk);
2508 nlk->flags |= NETLINK_KERNEL_SOCKET;
2509
2510 netlink_table_grab();
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002511 if (!nl_table[unit].registered) {
2512 nl_table[unit].groups = groups;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002513 rcu_assign_pointer(nl_table[unit].listeners, listeners);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002514 nl_table[unit].cb_mutex = cb_mutex;
2515 nl_table[unit].module = module;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002516 if (cfg) {
2517 nl_table[unit].bind = cfg->bind;
Hiroaki SHIMODA6251edd2014-11-13 04:24:10 +09002518 nl_table[unit].unbind = cfg->unbind;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002519 nl_table[unit].flags = cfg->flags;
Gao fengda12c902013-06-06 14:49:11 +08002520 if (cfg->compare)
2521 nl_table[unit].compare = cfg->compare;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002522 }
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002523 nl_table[unit].registered = 1;
Jesper Juhlf937f1f462007-10-15 01:39:12 -07002524 } else {
2525 kfree(listeners);
Denis V. Lunev869e58f2008-01-18 23:53:31 -08002526 nl_table[unit].registered++;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002527 }
Patrick McHardy77247bb2005-08-14 19:27:13 -07002528 netlink_table_ungrab();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002529 return sk;
2530
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002531out_sock_release:
Patrick McHardy4277a082006-03-20 18:52:01 -08002532 kfree(listeners);
Denis V. Lunev9dfbec12008-02-29 11:17:56 -08002533 netlink_kernel_release(sk);
Pavel Emelyanov23fe1862008-01-30 19:31:06 -08002534 return NULL;
2535
2536out_sock_release_nosk:
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002537 sock_release(sock);
Patrick McHardy77247bb2005-08-14 19:27:13 -07002538 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539}
Pablo Neira Ayuso9f00d972012-09-08 02:53:54 +00002540EXPORT_SYMBOL(__netlink_kernel_create);
Denis V. Lunevb7c6ba62008-01-28 14:41:19 -08002541
2542void
2543netlink_kernel_release(struct sock *sk)
2544{
Denis V. Lunevedf02082008-02-29 11:18:32 -08002545 sk_release_kernel(sk);
Denis V. Lunevb7c6ba62008-01-28 14:41:19 -08002546}
2547EXPORT_SYMBOL(netlink_kernel_release);
2548
Johannes Bergd136f1b2009-09-12 03:03:15 +00002549int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002550{
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002551 struct listeners *new, *old;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002552 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002553
2554 if (groups < 32)
2555 groups = 32;
2556
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002557 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002558 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2559 if (!new)
Johannes Bergd136f1b2009-09-12 03:03:15 +00002560 return -ENOMEM;
Eric Dumazet6d772ac2012-10-18 03:21:55 +00002561 old = nl_deref_protected(tbl->listeners);
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002562 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2563 rcu_assign_pointer(tbl->listeners, new);
2564
Lai Jiangshan37b6b932011-03-15 18:01:42 +08002565 kfree_rcu(old, rcu);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002566 }
2567 tbl->groups = groups;
2568
Johannes Bergd136f1b2009-09-12 03:03:15 +00002569 return 0;
2570}
2571
2572/**
2573 * netlink_change_ngroups - change number of multicast groups
2574 *
2575 * This changes the number of multicast groups that are available
2576 * on a certain netlink family. Note that it is not possible to
2577 * change the number of groups to below 32. Also note that it does
2578 * not implicitly call netlink_clear_multicast_users() when the
2579 * number of groups is reduced.
2580 *
2581 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2582 * @groups: The new number of groups.
2583 */
2584int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2585{
2586 int err;
2587
2588 netlink_table_grab();
2589 err = __netlink_change_ngroups(sk, groups);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002590 netlink_table_ungrab();
Johannes Bergd136f1b2009-09-12 03:03:15 +00002591
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002592 return err;
2593}
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002594
Johannes Bergb8273572009-09-24 15:44:05 -07002595void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2596{
2597 struct sock *sk;
Johannes Bergb8273572009-09-24 15:44:05 -07002598 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2599
Sasha Levinb67bfe02013-02-27 17:06:00 -08002600 sk_for_each_bound(sk, &tbl->mc_list)
Johannes Bergb8273572009-09-24 15:44:05 -07002601 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2602}
2603
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002604struct nlmsghdr *
Eric W. Biederman15e47302012-09-07 20:12:54 +00002605__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002606{
2607 struct nlmsghdr *nlh;
Hong zhi guo573ce262013-03-27 06:47:04 +00002608 int size = nlmsg_msg_size(len);
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002609
Wang Yufen23b45672014-02-17 16:53:32 +08002610 nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002611 nlh->nlmsg_type = type;
2612 nlh->nlmsg_len = size;
2613 nlh->nlmsg_flags = flags;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002614 nlh->nlmsg_pid = portid;
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002615 nlh->nlmsg_seq = seq;
2616 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
Hong zhi guo573ce262013-03-27 06:47:04 +00002617 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002618 return nlh;
2619}
2620EXPORT_SYMBOL(__nlmsg_put);
2621
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622/*
2623 * It looks a bit ugly.
2624 * It would be better to create kernel thread.
2625 */
2626
2627static int netlink_dump(struct sock *sk)
2628{
2629 struct netlink_sock *nlk = nlk_sk(sk);
2630 struct netlink_callback *cb;
Greg Rosec7ac8672011-06-10 01:27:09 +00002631 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632 struct nlmsghdr *nlh;
Herbert Xu57b26932016-05-16 17:28:16 +08002633 struct module *module;
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002634 int len, err = -ENOBUFS;
Greg Rosec7ac8672011-06-10 01:27:09 +00002635 int alloc_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002637 mutex_lock(nlk->cb_mutex);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002638 if (!nlk->cb_running) {
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002639 err = -EINVAL;
2640 goto errout_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641 }
2642
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002643 cb = &nlk->cb;
Greg Rosec7ac8672011-06-10 01:27:09 +00002644 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2645
Patrick McHardyf9c22882013-04-17 06:47:04 +00002646 if (!netlink_rx_is_mmaped(sk) &&
2647 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2648 goto errout_skb;
Eric Dumazet9063e212014-03-07 12:02:33 -08002649
2650 /* NLMSG_GOODSIZE is small to avoid high order allocations being
2651 * required, but it makes sense to _attempt_ a 16K bytes allocation
2652 * to reduce number of system calls on dump operations, if user
2653 * ever provided a big enough buffer.
2654 */
2655 if (alloc_size < nlk->max_recvmsg_len) {
2656 skb = netlink_alloc_skb(sk,
2657 nlk->max_recvmsg_len,
2658 nlk->portid,
2659 GFP_KERNEL |
2660 __GFP_NOWARN |
2661 __GFP_NORETRY);
2662 /* available room should be exact amount to avoid MSG_TRUNC */
2663 if (skb)
2664 skb_reserve(skb, skb_tailroom(skb) -
2665 nlk->max_recvmsg_len);
2666 }
2667 if (!skb)
2668 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2669 GFP_KERNEL);
Greg Rosec7ac8672011-06-10 01:27:09 +00002670 if (!skb)
Dan Carpenterc63d6ea2011-06-15 03:11:42 +00002671 goto errout_skb;
Patrick McHardyf9c22882013-04-17 06:47:04 +00002672 netlink_skb_set_owner_r(skb, sk);
Greg Rosec7ac8672011-06-10 01:27:09 +00002673
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674 len = cb->dump(skb, cb);
2675
2676 if (len > 0) {
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002677 mutex_unlock(nlk->cb_mutex);
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07002678
2679 if (sk_filter(sk, skb))
2680 kfree_skb(skb);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00002681 else
2682 __netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683 return 0;
2684 }
2685
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002686 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2687 if (!nlh)
2688 goto errout_skb;
2689
Johannes Berg670dc282011-06-20 13:40:46 +02002690 nl_dump_check_consistent(cb, nlh);
2691
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002692 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2693
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07002694 if (sk_filter(sk, skb))
2695 kfree_skb(skb);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00002696 else
2697 __netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698
Thomas Grafa8f74b22005-11-10 02:25:52 +01002699 if (cb->done)
2700 cb->done(cb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002702 nlk->cb_running = false;
Herbert Xu57b26932016-05-16 17:28:16 +08002703 module = cb->module;
2704 skb = cb->skb;
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002705 mutex_unlock(nlk->cb_mutex);
Herbert Xu57b26932016-05-16 17:28:16 +08002706 module_put(module);
2707 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708 return 0;
Thomas Graf17977542005-06-18 22:53:48 -07002709
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002710errout_skb:
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002711 mutex_unlock(nlk->cb_mutex);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002712 kfree_skb(skb);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002713 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714}
2715
Gao feng6dc878a2012-10-04 20:15:48 +00002716int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2717 const struct nlmsghdr *nlh,
2718 struct netlink_dump_control *control)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719{
2720 struct netlink_callback *cb;
2721 struct sock *sk;
2722 struct netlink_sock *nlk;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002723 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724
Patrick McHardyf9c22882013-04-17 06:47:04 +00002725 /* Memory mapped dump requests need to be copied to avoid looping
2726 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2727 * a reference to the skb.
2728 */
2729 if (netlink_skb_is_mmaped(skb)) {
2730 skb = skb_copy(skb, GFP_KERNEL);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002731 if (skb == NULL)
Patrick McHardyf9c22882013-04-17 06:47:04 +00002732 return -ENOBUFS;
Patrick McHardyf9c22882013-04-17 06:47:04 +00002733 } else
2734 atomic_inc(&skb->users);
2735
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002736 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2737 if (sk == NULL) {
2738 ret = -ECONNREFUSED;
2739 goto error_free;
2740 }
2741
2742 nlk = nlk_sk(sk);
2743 mutex_lock(nlk->cb_mutex);
2744 /* A dump is in progress... */
2745 if (nlk->cb_running) {
2746 ret = -EBUSY;
2747 goto error_unlock;
2748 }
2749 /* add reference of module which cb->dump belongs to */
2750 if (!try_module_get(control->module)) {
2751 ret = -EPROTONOSUPPORT;
2752 goto error_unlock;
2753 }
2754
2755 cb = &nlk->cb;
2756 memset(cb, 0, sizeof(*cb));
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +00002757 cb->dump = control->dump;
2758 cb->done = control->done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759 cb->nlh = nlh;
Pablo Neira Ayuso7175c882012-02-24 14:30:16 +00002760 cb->data = control->data;
Gao feng6dc878a2012-10-04 20:15:48 +00002761 cb->module = control->module;
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +00002762 cb->min_dump_alloc = control->min_dump_alloc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763 cb->skb = skb;
2764
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002765 nlk->cb_running = true;
Gao feng6dc878a2012-10-04 20:15:48 +00002766
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002767 mutex_unlock(nlk->cb_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768
Andrey Vaginb44d2112011-02-21 02:40:47 +00002769 ret = netlink_dump(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 sock_put(sk);
Denis V. Lunev5c582982007-10-23 20:29:25 -07002771
Andrey Vaginb44d2112011-02-21 02:40:47 +00002772 if (ret)
2773 return ret;
2774
Denis V. Lunev5c582982007-10-23 20:29:25 -07002775 /* We successfully started a dump, by returning -EINTR we
2776 * signal not to send ACK even if it was requested.
2777 */
2778 return -EINTR;
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002779
2780error_unlock:
2781 sock_put(sk);
2782 mutex_unlock(nlk->cb_mutex);
2783error_free:
2784 kfree_skb(skb);
2785 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786}
Gao feng6dc878a2012-10-04 20:15:48 +00002787EXPORT_SYMBOL(__netlink_dump_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788
2789void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2790{
2791 struct sk_buff *skb;
2792 struct nlmsghdr *rep;
2793 struct nlmsgerr *errmsg;
Thomas Graf339bf982006-11-10 14:10:15 -08002794 size_t payload = sizeof(*errmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795
Thomas Graf339bf982006-11-10 14:10:15 -08002796 /* error messages get the original request appened */
2797 if (err)
2798 payload += nlmsg_len(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799
Patrick McHardyf9c22882013-04-17 06:47:04 +00002800 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2801 NETLINK_CB(in_skb).portid, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 if (!skb) {
2803 struct sock *sk;
2804
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002805 sk = netlink_lookup(sock_net(in_skb->sk),
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002806 in_skb->sk->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00002807 NETLINK_CB(in_skb).portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 if (sk) {
2809 sk->sk_err = ENOBUFS;
2810 sk->sk_error_report(sk);
2811 sock_put(sk);
2812 }
2813 return;
2814 }
2815
Eric W. Biederman15e47302012-09-07 20:12:54 +00002816 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
John Fastabend5dba93a2009-09-25 13:11:44 +00002817 NLMSG_ERROR, payload, 0);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002818 errmsg = nlmsg_data(rep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 errmsg->error = err;
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002820 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
Eric W. Biederman15e47302012-09-07 20:12:54 +00002821 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002823EXPORT_SYMBOL(netlink_ack);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002825int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
Thomas Graf1d00a4e2007-03-22 23:30:12 -07002826 struct nlmsghdr *))
Thomas Graf82ace472005-11-10 02:25:53 +01002827{
Thomas Graf82ace472005-11-10 02:25:53 +01002828 struct nlmsghdr *nlh;
2829 int err;
2830
2831 while (skb->len >= nlmsg_total_size(0)) {
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002832 int msglen;
2833
Arnaldo Carvalho de Melob529ccf2007-04-25 19:08:35 -07002834 nlh = nlmsg_hdr(skb);
Thomas Grafd35b6852007-03-22 23:28:46 -07002835 err = 0;
Thomas Graf82ace472005-11-10 02:25:53 +01002836
Martin Murrayad8e4b72006-01-10 13:02:29 -08002837 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
Thomas Graf82ace472005-11-10 02:25:53 +01002838 return 0;
2839
Thomas Grafd35b6852007-03-22 23:28:46 -07002840 /* Only requests are handled by the kernel */
2841 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
Denis V. Lunev5c582982007-10-23 20:29:25 -07002842 goto ack;
Thomas Grafd35b6852007-03-22 23:28:46 -07002843
Thomas Graf45e7ae72007-03-22 23:29:10 -07002844 /* Skip control messages */
2845 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
Denis V. Lunev5c582982007-10-23 20:29:25 -07002846 goto ack;
Thomas Graf45e7ae72007-03-22 23:29:10 -07002847
Thomas Graf1d00a4e2007-03-22 23:30:12 -07002848 err = cb(skb, nlh);
Denis V. Lunev5c582982007-10-23 20:29:25 -07002849 if (err == -EINTR)
2850 goto skip;
2851
2852ack:
Thomas Grafd35b6852007-03-22 23:28:46 -07002853 if (nlh->nlmsg_flags & NLM_F_ACK || err)
Thomas Graf82ace472005-11-10 02:25:53 +01002854 netlink_ack(skb, nlh, err);
Thomas Graf82ace472005-11-10 02:25:53 +01002855
Denis V. Lunev5c582982007-10-23 20:29:25 -07002856skip:
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002857 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002858 if (msglen > skb->len)
2859 msglen = skb->len;
2860 skb_pull(skb, msglen);
Thomas Graf82ace472005-11-10 02:25:53 +01002861 }
2862
2863 return 0;
2864}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002865EXPORT_SYMBOL(netlink_rcv_skb);
Thomas Graf82ace472005-11-10 02:25:53 +01002866
2867/**
Thomas Grafd387f6a2006-08-15 00:31:06 -07002868 * nlmsg_notify - send a notification netlink message
2869 * @sk: netlink socket to use
2870 * @skb: notification message
Eric W. Biederman15e47302012-09-07 20:12:54 +00002871 * @portid: destination netlink portid for reports or 0
Thomas Grafd387f6a2006-08-15 00:31:06 -07002872 * @group: destination multicast group or 0
2873 * @report: 1 to report back, 0 to disable
2874 * @flags: allocation flags
2875 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002876int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
Thomas Grafd387f6a2006-08-15 00:31:06 -07002877 unsigned int group, int report, gfp_t flags)
2878{
2879 int err = 0;
2880
2881 if (group) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00002882 int exclude_portid = 0;
Thomas Grafd387f6a2006-08-15 00:31:06 -07002883
2884 if (report) {
2885 atomic_inc(&skb->users);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002886 exclude_portid = portid;
Thomas Grafd387f6a2006-08-15 00:31:06 -07002887 }
2888
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002889 /* errors reported via destination sk->sk_err, but propagate
2890 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002891 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
Thomas Grafd387f6a2006-08-15 00:31:06 -07002892 }
2893
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002894 if (report) {
2895 int err2;
2896
Eric W. Biederman15e47302012-09-07 20:12:54 +00002897 err2 = nlmsg_unicast(sk, skb, portid);
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002898 if (!err || err == -ESRCH)
2899 err = err2;
2900 }
Thomas Grafd387f6a2006-08-15 00:31:06 -07002901
2902 return err;
2903}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002904EXPORT_SYMBOL(nlmsg_notify);
Thomas Grafd387f6a2006-08-15 00:31:06 -07002905
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906#ifdef CONFIG_PROC_FS
2907struct nl_seq_iter {
Denis V. Luneve372c412007-11-19 22:31:54 -08002908 struct seq_net_private p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909 int link;
2910 int hash_idx;
2911};
2912
2913static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
2914{
2915 struct nl_seq_iter *iter = seq->private;
2916 int i, j;
Thomas Grafe3416942014-08-02 11:47:45 +02002917 struct netlink_sock *nlk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918 struct sock *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919 loff_t off = 0;
2920
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002921 for (i = 0; i < MAX_LINKS; i++) {
Thomas Grafe3416942014-08-02 11:47:45 +02002922 struct rhashtable *ht = &nl_table[i].hash;
Eric Dumazet67a24ac2014-08-05 07:50:07 +02002923 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924
Thomas Grafe3416942014-08-02 11:47:45 +02002925 for (j = 0; j < tbl->size; j++) {
2926 rht_for_each_entry_rcu(nlk, tbl->buckets[j], node) {
2927 s = (struct sock *)nlk;
2928
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002929 if (sock_net(s) != seq_file_net(seq))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002930 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931 if (off == pos) {
2932 iter->link = i;
2933 iter->hash_idx = j;
2934 return s;
2935 }
2936 ++off;
2937 }
2938 }
2939 }
2940 return NULL;
2941}
2942
2943static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
Thomas Graf78fd1d02014-10-21 22:05:38 +02002944 __acquires(nl_table_lock) __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945{
Thomas Graf78fd1d02014-10-21 22:05:38 +02002946 read_lock(&nl_table_lock);
Thomas Grafe3416942014-08-02 11:47:45 +02002947 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2949}
2950
2951static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2952{
Thomas Graf78fd1d02014-10-21 22:05:38 +02002953 struct rhashtable *ht;
Thomas Grafe3416942014-08-02 11:47:45 +02002954 struct netlink_sock *nlk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955 struct nl_seq_iter *iter;
Gao fengda12c902013-06-06 14:49:11 +08002956 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957 int i, j;
2958
2959 ++*pos;
2960
2961 if (v == SEQ_START_TOKEN)
2962 return netlink_seq_socket_idx(seq, 0);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002963
Gao fengda12c902013-06-06 14:49:11 +08002964 net = seq_file_net(seq);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002965 iter = seq->private;
Thomas Grafe3416942014-08-02 11:47:45 +02002966 nlk = v;
2967
Thomas Graf78fd1d02014-10-21 22:05:38 +02002968 i = iter->link;
2969 ht = &nl_table[i].hash;
2970 rht_for_each_entry(nlk, nlk->node.next, ht, node)
Thomas Grafe3416942014-08-02 11:47:45 +02002971 if (net_eq(sock_net((struct sock *)nlk), net))
2972 return nlk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 j = iter->hash_idx + 1;
2975
2976 do {
Eric Dumazet67a24ac2014-08-05 07:50:07 +02002977 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978
Thomas Grafe3416942014-08-02 11:47:45 +02002979 for (; j < tbl->size; j++) {
Thomas Graf78fd1d02014-10-21 22:05:38 +02002980 rht_for_each_entry(nlk, tbl->buckets[j], ht, node) {
Thomas Grafe3416942014-08-02 11:47:45 +02002981 if (net_eq(sock_net((struct sock *)nlk), net)) {
2982 iter->link = i;
2983 iter->hash_idx = j;
2984 return nlk;
2985 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 }
2987 }
2988
2989 j = 0;
2990 } while (++i < MAX_LINKS);
2991
2992 return NULL;
2993}
2994
2995static void netlink_seq_stop(struct seq_file *seq, void *v)
Thomas Graf78fd1d02014-10-21 22:05:38 +02002996 __releases(RCU) __releases(nl_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997{
Thomas Grafe3416942014-08-02 11:47:45 +02002998 rcu_read_unlock();
Thomas Graf78fd1d02014-10-21 22:05:38 +02002999 read_unlock(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000}
3001
3002
3003static int netlink_seq_show(struct seq_file *seq, void *v)
3004{
Eric Dumazet658cb352012-04-22 21:30:21 +00003005 if (v == SEQ_START_TOKEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006 seq_puts(seq,
3007 "sk Eth Pid Groups "
Masatake YAMATOcf0aa4e2010-02-27 19:45:37 +00003008 "Rmem Wmem Dump Locks Drops Inode\n");
Eric Dumazet658cb352012-04-22 21:30:21 +00003009 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010 struct sock *s = v;
3011 struct netlink_sock *nlk = nlk_sk(s);
3012
Pravin B Shelar16b304f2013-08-15 15:31:06 -07003013 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003014 s,
3015 s->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00003016 nlk->portid,
Patrick McHardy513c2502005-09-06 15:43:59 -07003017 nlk->groups ? (u32)nlk->groups[0] : 0,
Eric Dumazet31e6d362009-06-17 19:05:41 -07003018 sk_rmem_alloc_get(s),
3019 sk_wmem_alloc_get(s),
Pravin B Shelar16b304f2013-08-15 15:31:06 -07003020 nlk->cb_running,
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07003021 atomic_read(&s->sk_refcnt),
Masatake YAMATOcf0aa4e2010-02-27 19:45:37 +00003022 atomic_read(&s->sk_drops),
3023 sock_i_ino(s)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024 );
3025
3026 }
3027 return 0;
3028}
3029
Philippe De Muyter56b3d972007-07-10 23:07:31 -07003030static const struct seq_operations netlink_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031 .start = netlink_seq_start,
3032 .next = netlink_seq_next,
3033 .stop = netlink_seq_stop,
3034 .show = netlink_seq_show,
3035};
3036
3037
3038static int netlink_seq_open(struct inode *inode, struct file *file)
3039{
Denis V. Luneve372c412007-11-19 22:31:54 -08003040 return seq_open_net(inode, file, &netlink_seq_ops,
3041 sizeof(struct nl_seq_iter));
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003042}
3043
Arjan van de Venda7071d2007-02-12 00:55:36 -08003044static const struct file_operations netlink_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 .owner = THIS_MODULE,
3046 .open = netlink_seq_open,
3047 .read = seq_read,
3048 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003049 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050};
3051
3052#endif
3053
3054int netlink_register_notifier(struct notifier_block *nb)
3055{
Alan Sterne041c682006-03-27 01:16:30 -08003056 return atomic_notifier_chain_register(&netlink_chain, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08003058EXPORT_SYMBOL(netlink_register_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059
3060int netlink_unregister_notifier(struct notifier_block *nb)
3061{
Alan Sterne041c682006-03-27 01:16:30 -08003062 return atomic_notifier_chain_unregister(&netlink_chain, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08003064EXPORT_SYMBOL(netlink_unregister_notifier);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09003065
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003066static const struct proto_ops netlink_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003067 .family = PF_NETLINK,
3068 .owner = THIS_MODULE,
3069 .release = netlink_release,
3070 .bind = netlink_bind,
3071 .connect = netlink_connect,
3072 .socketpair = sock_no_socketpair,
3073 .accept = sock_no_accept,
3074 .getname = netlink_getname,
Patrick McHardy9652e932013-04-17 06:47:02 +00003075 .poll = netlink_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076 .ioctl = sock_no_ioctl,
3077 .listen = sock_no_listen,
3078 .shutdown = sock_no_shutdown,
Patrick McHardy9a4595b2005-08-15 12:32:15 -07003079 .setsockopt = netlink_setsockopt,
3080 .getsockopt = netlink_getsockopt,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003081 .sendmsg = netlink_sendmsg,
3082 .recvmsg = netlink_recvmsg,
Patrick McHardyccdfcc32013-04-17 06:47:01 +00003083 .mmap = netlink_mmap,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003084 .sendpage = sock_no_sendpage,
3085};
3086
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00003087static const struct net_proto_family netlink_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003088 .family = PF_NETLINK,
3089 .create = netlink_create,
3090 .owner = THIS_MODULE, /* for consistency 8) */
3091};
3092
Pavel Emelyanov46650792007-10-08 20:38:39 -07003093static int __net_init netlink_net_init(struct net *net)
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003094{
3095#ifdef CONFIG_PROC_FS
Gao fengd4beaa62013-02-18 01:34:54 +00003096 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003097 return -ENOMEM;
3098#endif
3099 return 0;
3100}
3101
Pavel Emelyanov46650792007-10-08 20:38:39 -07003102static void __net_exit netlink_net_exit(struct net *net)
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003103{
3104#ifdef CONFIG_PROC_FS
Gao fengece31ff2013-02-18 01:34:56 +00003105 remove_proc_entry("netlink", net->proc_net);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003106#endif
3107}
3108
David S. Millerb963ea82010-08-30 19:08:01 -07003109static void __init netlink_add_usersock_entry(void)
3110{
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003111 struct listeners *listeners;
David S. Millerb963ea82010-08-30 19:08:01 -07003112 int groups = 32;
3113
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003114 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
David S. Millerb963ea82010-08-30 19:08:01 -07003115 if (!listeners)
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003116 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
David S. Millerb963ea82010-08-30 19:08:01 -07003117
3118 netlink_table_grab();
3119
3120 nl_table[NETLINK_USERSOCK].groups = groups;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003121 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
David S. Millerb963ea82010-08-30 19:08:01 -07003122 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3123 nl_table[NETLINK_USERSOCK].registered = 1;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00003124 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
David S. Millerb963ea82010-08-30 19:08:01 -07003125
3126 netlink_table_ungrab();
3127}
3128
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003129static struct pernet_operations __net_initdata netlink_net_ops = {
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003130 .init = netlink_net_init,
3131 .exit = netlink_net_exit,
3132};
3133
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134static int __init netlink_proto_init(void)
3135{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003136 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137 int err = proto_register(&netlink_proto, 0);
Thomas Grafe3416942014-08-02 11:47:45 +02003138 struct rhashtable_params ht_params = {
3139 .head_offset = offsetof(struct netlink_sock, node),
3140 .key_offset = offsetof(struct netlink_sock, portid),
3141 .key_len = sizeof(u32), /* portid */
Daniel Borkmann4b1c83d2014-12-10 16:33:10 +01003142 .hashfn = jhash,
Thomas Grafe3416942014-08-02 11:47:45 +02003143 .max_shift = 16, /* 64K */
3144 .grow_decision = rht_grow_above_75,
3145 .shrink_decision = rht_shrink_below_30,
3146 .mutex_is_held = lockdep_nl_sk_hash_is_held,
3147 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148
3149 if (err != 0)
3150 goto out;
3151
YOSHIFUJI Hideaki / 吉藤英明fab25742013-01-09 07:19:48 +00003152 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07003154 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003155 if (!nl_table)
3156 goto panic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158 for (i = 0; i < MAX_LINKS; i++) {
Thomas Grafe3416942014-08-02 11:47:45 +02003159 if (rhashtable_init(&nl_table[i].hash, &ht_params) < 0) {
3160 while (--i > 0)
3161 rhashtable_destroy(&nl_table[i].hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162 kfree(nl_table);
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003163 goto panic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165 }
3166
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +02003167 INIT_LIST_HEAD(&netlink_tap_all);
3168
David S. Millerb963ea82010-08-30 19:08:01 -07003169 netlink_add_usersock_entry();
3170
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171 sock_register(&netlink_family_ops);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003172 register_pernet_subsys(&netlink_net_ops);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09003173 /* The netlink device handler may be needed early. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174 rtnetlink_init();
3175out:
3176 return err;
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003177panic:
3178 panic("netlink_init: Cannot allocate nl_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003179}
3180
Linus Torvalds1da177e2005-04-16 15:20:36 -07003181core_initcall(netlink_proto_init);