blob: ea5ed7af97eb48e34a829f1f5eb166ca0ab59a04 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NETLINK Kernel-user communication protocol.
3 *
Alan Cox113aa832008-10-13 19:01:08 -07004 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
Patrick McHardycd1df522013-04-17 06:47:05 +00006 * Patrick McHardy <kaber@trash.net>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +090012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
Harald Welte4fdb3bb2005-08-09 19:40:55 -070017 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 */
24
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/module.h>
26
Randy Dunlap4fc268d2006-01-11 12:17:47 -080027#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/kernel.h>
29#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/signal.h>
31#include <linux/sched.h>
32#include <linux/errno.h>
33#include <linux/string.h>
34#include <linux/stat.h>
35#include <linux/socket.h>
36#include <linux/un.h>
37#include <linux/fcntl.h>
38#include <linux/termios.h>
39#include <linux/sockios.h>
40#include <linux/net.h>
41#include <linux/fs.h>
42#include <linux/slab.h>
43#include <asm/uaccess.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/rtnetlink.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
Andrew Morton54e0f522005-04-30 07:07:04 +010057#include <linux/audit.h>
Patrick McHardyaf65bdf2007-04-20 14:14:21 -070058#include <linux/mutex.h>
Patrick McHardyccdfcc32013-04-17 06:47:01 +000059#include <linux/vmalloc.h>
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +020060#include <linux/if_arp.h>
Thomas Grafe3416942014-08-02 11:47:45 +020061#include <linux/rhashtable.h>
Patrick McHardy9652e932013-04-17 06:47:02 +000062#include <asm/cacheflush.h>
Thomas Grafe3416942014-08-02 11:47:45 +020063#include <linux/hash.h>
Johannes Bergee1c24422015-01-16 11:37:14 +010064#include <linux/genetlink.h>
Andrew Morton54e0f522005-04-30 07:07:04 +010065
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020066#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <net/sock.h>
68#include <net/scm.h>
Thomas Graf82ace472005-11-10 02:25:53 +010069#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
Andrey Vagin0f29c762013-03-21 20:33:47 +040071#include "af_netlink.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
Eric Dumazet5c398dc2010-10-24 04:27:10 +000073struct listeners {
74 struct rcu_head rcu;
75 unsigned long masks[0];
Johannes Berg6c04bb12009-07-10 09:51:32 +000076};
77
Patrick McHardycd967e02013-04-17 06:46:56 +000078/* state bits */
79#define NETLINK_CONGESTED 0x0
80
81/* flags */
Patrick McHardy77247bb2005-08-14 19:27:13 -070082#define NETLINK_KERNEL_SOCKET 0x1
Patrick McHardy9a4595b2005-08-15 12:32:15 -070083#define NETLINK_RECV_PKTINFO 0x2
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +000084#define NETLINK_BROADCAST_SEND_ERROR 0x4
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -070085#define NETLINK_RECV_NO_ENOBUFS 0x8
Patrick McHardy77247bb2005-08-14 19:27:13 -070086
David S. Miller035c4c12011-12-23 17:33:03 -050087static inline int netlink_is_kernel(struct sock *sk)
Denis V. Lunevaed81562007-10-10 21:14:32 -070088{
89 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
90}
91
Eric Dumazet91dd93f2015-05-12 17:24:50 -070092struct netlink_table *nl_table __read_mostly;
Andrey Vagin0f29c762013-03-21 20:33:47 +040093EXPORT_SYMBOL_GPL(nl_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
95static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
96
97static int netlink_dump(struct sock *sk);
Patrick McHardy9652e932013-04-17 06:47:02 +000098static void netlink_skb_destructor(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
Thomas Graf78fd1d02014-10-21 22:05:38 +0200100/* nl_table locking explained:
Thomas Graf21e49022015-01-02 23:00:22 +0100101 * Lookup and traversal are protected with an RCU read-side lock. Insertion
Ying Xuec5adde92015-01-12 14:52:23 +0800102 * and removal are protected with per bucket lock while using RCU list
Thomas Graf21e49022015-01-02 23:00:22 +0100103 * modification primitives and may run in parallel to RCU protected lookups.
104 * Destruction of the Netlink socket may only occur *after* nl_table_lock has
105 * been acquired * either during or after the socket has been removed from
106 * the list and after an RCU grace period.
Thomas Graf78fd1d02014-10-21 22:05:38 +0200107 */
Andrey Vagin0f29c762013-03-21 20:33:47 +0400108DEFINE_RWLOCK(nl_table_lock);
109EXPORT_SYMBOL_GPL(nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110static atomic_t nl_table_users = ATOMIC_INIT(0);
111
Eric Dumazet6d772ac2012-10-18 03:21:55 +0000112#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
113
Alan Sterne041c682006-03-27 01:16:30 -0800114static ATOMIC_NOTIFIER_HEAD(netlink_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200116static DEFINE_SPINLOCK(netlink_tap_lock);
117static struct list_head netlink_tap_all __read_mostly;
118
Herbert Xuc428ecd2015-03-20 21:57:01 +1100119static const struct rhashtable_params netlink_rhashtable_params;
120
stephen hemmingerb57ef81f2011-12-22 08:52:02 +0000121static inline u32 netlink_group_mask(u32 group)
Patrick McHardyd629b832005-08-14 19:27:50 -0700122{
123 return group ? 1 << (group - 1) : 0;
124}
125
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200126int netlink_add_tap(struct netlink_tap *nt)
127{
128 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
129 return -EINVAL;
130
131 spin_lock(&netlink_tap_lock);
132 list_add_rcu(&nt->list, &netlink_tap_all);
133 spin_unlock(&netlink_tap_lock);
134
Markus Elfringfcd4d352014-11-18 21:03:13 +0100135 __module_get(nt->module);
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200136
137 return 0;
138}
139EXPORT_SYMBOL_GPL(netlink_add_tap);
140
stephen hemminger2173f8d2013-12-30 10:49:22 -0800141static int __netlink_remove_tap(struct netlink_tap *nt)
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200142{
143 bool found = false;
144 struct netlink_tap *tmp;
145
146 spin_lock(&netlink_tap_lock);
147
148 list_for_each_entry(tmp, &netlink_tap_all, list) {
149 if (nt == tmp) {
150 list_del_rcu(&nt->list);
151 found = true;
152 goto out;
153 }
154 }
155
156 pr_warn("__netlink_remove_tap: %p not found\n", nt);
157out:
158 spin_unlock(&netlink_tap_lock);
159
160 if (found && nt->module)
161 module_put(nt->module);
162
163 return found ? 0 : -ENODEV;
164}
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200165
166int netlink_remove_tap(struct netlink_tap *nt)
167{
168 int ret;
169
170 ret = __netlink_remove_tap(nt);
171 synchronize_net();
172
173 return ret;
174}
175EXPORT_SYMBOL_GPL(netlink_remove_tap);
176
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200177static bool netlink_filter_tap(const struct sk_buff *skb)
178{
179 struct sock *sk = skb->sk;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200180
181 /* We take the more conservative approach and
182 * whitelist socket protocols that may pass.
183 */
184 switch (sk->sk_protocol) {
185 case NETLINK_ROUTE:
186 case NETLINK_USERSOCK:
187 case NETLINK_SOCK_DIAG:
188 case NETLINK_NFLOG:
189 case NETLINK_XFRM:
190 case NETLINK_FIB_LOOKUP:
191 case NETLINK_NETFILTER:
192 case NETLINK_GENERIC:
Varka Bhadram498044b2014-07-16 10:59:47 +0530193 return true;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200194 }
195
Varka Bhadram498044b2014-07-16 10:59:47 +0530196 return false;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200197}
198
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200199static int __netlink_deliver_tap_skb(struct sk_buff *skb,
200 struct net_device *dev)
201{
202 struct sk_buff *nskb;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200203 struct sock *sk = skb->sk;
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200204 int ret = -ENOMEM;
205
206 dev_hold(dev);
207 nskb = skb_clone(skb, GFP_ATOMIC);
208 if (nskb) {
209 nskb->dev = dev;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200210 nskb->protocol = htons((u16) sk->sk_protocol);
Daniel Borkmann604d13c2013-12-23 14:35:56 +0100211 nskb->pkt_type = netlink_is_kernel(sk) ?
212 PACKET_KERNEL : PACKET_USER;
Daniel Borkmann4e48ed82014-08-07 22:22:47 +0200213 skb_reset_network_header(nskb);
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200214 ret = dev_queue_xmit(nskb);
215 if (unlikely(ret > 0))
216 ret = net_xmit_errno(ret);
217 }
218
219 dev_put(dev);
220 return ret;
221}
222
223static void __netlink_deliver_tap(struct sk_buff *skb)
224{
225 int ret;
226 struct netlink_tap *tmp;
227
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200228 if (!netlink_filter_tap(skb))
229 return;
230
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200231 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
232 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
233 if (unlikely(ret))
234 break;
235 }
236}
237
238static void netlink_deliver_tap(struct sk_buff *skb)
239{
240 rcu_read_lock();
241
242 if (unlikely(!list_empty(&netlink_tap_all)))
243 __netlink_deliver_tap(skb);
244
245 rcu_read_unlock();
246}
247
Daniel Borkmann73bfd372013-12-23 14:35:55 +0100248static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
249 struct sk_buff *skb)
250{
251 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
252 netlink_deliver_tap(skb);
253}
254
Patrick McHardycd1df522013-04-17 06:47:05 +0000255static void netlink_overrun(struct sock *sk)
256{
257 struct netlink_sock *nlk = nlk_sk(sk);
258
259 if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
260 if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
261 sk->sk_err = ENOBUFS;
262 sk->sk_error_report(sk);
263 }
264 }
265 atomic_inc(&sk->sk_drops);
266}
267
268static void netlink_rcv_wake(struct sock *sk)
269{
270 struct netlink_sock *nlk = nlk_sk(sk);
271
272 if (skb_queue_empty(&sk->sk_receive_queue))
273 clear_bit(NETLINK_CONGESTED, &nlk->state);
274 if (!test_bit(NETLINK_CONGESTED, &nlk->state))
275 wake_up_interruptible(&nlk->wait);
276}
277
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000278#ifdef CONFIG_NETLINK_MMAP
Patrick McHardy9652e932013-04-17 06:47:02 +0000279static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
280{
281 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
282}
283
Patrick McHardyf9c22882013-04-17 06:47:04 +0000284static bool netlink_rx_is_mmaped(struct sock *sk)
285{
286 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
287}
288
Patrick McHardy5fd96122013-04-17 06:47:03 +0000289static bool netlink_tx_is_mmaped(struct sock *sk)
290{
291 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
292}
293
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000294static __pure struct page *pgvec_to_page(const void *addr)
295{
296 if (is_vmalloc_addr(addr))
297 return vmalloc_to_page(addr);
298 else
299 return virt_to_page(addr);
300}
301
302static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
303{
304 unsigned int i;
305
306 for (i = 0; i < len; i++) {
307 if (pg_vec[i] != NULL) {
308 if (is_vmalloc_addr(pg_vec[i]))
309 vfree(pg_vec[i]);
310 else
311 free_pages((unsigned long)pg_vec[i], order);
312 }
313 }
314 kfree(pg_vec);
315}
316
317static void *alloc_one_pg_vec_page(unsigned long order)
318{
319 void *buffer;
320 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
321 __GFP_NOWARN | __GFP_NORETRY;
322
323 buffer = (void *)__get_free_pages(gfp_flags, order);
324 if (buffer != NULL)
325 return buffer;
326
327 buffer = vzalloc((1 << order) * PAGE_SIZE);
328 if (buffer != NULL)
329 return buffer;
330
331 gfp_flags &= ~__GFP_NORETRY;
332 return (void *)__get_free_pages(gfp_flags, order);
333}
334
335static void **alloc_pg_vec(struct netlink_sock *nlk,
336 struct nl_mmap_req *req, unsigned int order)
337{
338 unsigned int block_nr = req->nm_block_nr;
339 unsigned int i;
Daniel Borkmann8a849bb2013-08-02 17:32:39 +0200340 void **pg_vec;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000341
342 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
343 if (pg_vec == NULL)
344 return NULL;
345
346 for (i = 0; i < block_nr; i++) {
Daniel Borkmann8a849bb2013-08-02 17:32:39 +0200347 pg_vec[i] = alloc_one_pg_vec_page(order);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000348 if (pg_vec[i] == NULL)
349 goto err1;
350 }
351
352 return pg_vec;
353err1:
354 free_pg_vec(pg_vec, order, block_nr);
355 return NULL;
356}
357
Florian Westphalb265c302015-07-21 16:33:50 +0200358
359static void
360__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
361 unsigned int order)
362{
363 struct netlink_sock *nlk = nlk_sk(sk);
364 struct sk_buff_head *queue;
365 struct netlink_ring *ring;
366
367 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
368 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
369
370 spin_lock_bh(&queue->lock);
371
372 ring->frame_max = req->nm_frame_nr - 1;
373 ring->head = 0;
374 ring->frame_size = req->nm_frame_size;
375 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
376
377 swap(ring->pg_vec_len, req->nm_block_nr);
378 swap(ring->pg_vec_order, order);
379 swap(ring->pg_vec, pg_vec);
380
381 __skb_queue_purge(queue);
382 spin_unlock_bh(&queue->lock);
383
384 WARN_ON(atomic_read(&nlk->mapped));
385
386 if (pg_vec)
387 free_pg_vec(pg_vec, order, req->nm_block_nr);
388}
389
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000390static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
Florian Westphalb265c302015-07-21 16:33:50 +0200391 bool tx_ring)
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000392{
393 struct netlink_sock *nlk = nlk_sk(sk);
394 struct netlink_ring *ring;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000395 void **pg_vec = NULL;
396 unsigned int order = 0;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000397
398 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000399
Florian Westphalb265c302015-07-21 16:33:50 +0200400 if (atomic_read(&nlk->mapped))
401 return -EBUSY;
402 if (atomic_read(&ring->pending))
403 return -EBUSY;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000404
405 if (req->nm_block_nr) {
406 if (ring->pg_vec != NULL)
407 return -EBUSY;
408
409 if ((int)req->nm_block_size <= 0)
410 return -EINVAL;
Tobias Klauser74e83b22014-07-31 12:17:08 +0200411 if (!PAGE_ALIGNED(req->nm_block_size))
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000412 return -EINVAL;
413 if (req->nm_frame_size < NL_MMAP_HDRLEN)
414 return -EINVAL;
415 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
416 return -EINVAL;
417
418 ring->frames_per_block = req->nm_block_size /
419 req->nm_frame_size;
420 if (ring->frames_per_block == 0)
421 return -EINVAL;
422 if (ring->frames_per_block * req->nm_block_nr !=
423 req->nm_frame_nr)
424 return -EINVAL;
425
426 order = get_order(req->nm_block_size);
427 pg_vec = alloc_pg_vec(nlk, req, order);
428 if (pg_vec == NULL)
429 return -ENOMEM;
430 } else {
431 if (req->nm_frame_nr)
432 return -EINVAL;
433 }
434
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000435 mutex_lock(&nlk->pg_vec_lock);
Florian Westphalb265c302015-07-21 16:33:50 +0200436 if (atomic_read(&nlk->mapped) == 0) {
437 __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
438 mutex_unlock(&nlk->pg_vec_lock);
439 return 0;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000440 }
Florian Westphalb265c302015-07-21 16:33:50 +0200441
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000442 mutex_unlock(&nlk->pg_vec_lock);
443
444 if (pg_vec)
445 free_pg_vec(pg_vec, order, req->nm_block_nr);
Florian Westphalb265c302015-07-21 16:33:50 +0200446
447 return -EBUSY;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000448}
449
450static void netlink_mm_open(struct vm_area_struct *vma)
451{
452 struct file *file = vma->vm_file;
453 struct socket *sock = file->private_data;
454 struct sock *sk = sock->sk;
455
456 if (sk)
457 atomic_inc(&nlk_sk(sk)->mapped);
458}
459
460static void netlink_mm_close(struct vm_area_struct *vma)
461{
462 struct file *file = vma->vm_file;
463 struct socket *sock = file->private_data;
464 struct sock *sk = sock->sk;
465
466 if (sk)
467 atomic_dec(&nlk_sk(sk)->mapped);
468}
469
470static const struct vm_operations_struct netlink_mmap_ops = {
471 .open = netlink_mm_open,
472 .close = netlink_mm_close,
473};
474
475static int netlink_mmap(struct file *file, struct socket *sock,
476 struct vm_area_struct *vma)
477{
478 struct sock *sk = sock->sk;
479 struct netlink_sock *nlk = nlk_sk(sk);
480 struct netlink_ring *ring;
481 unsigned long start, size, expected;
482 unsigned int i;
483 int err = -EINVAL;
484
485 if (vma->vm_pgoff)
486 return -EINVAL;
487
488 mutex_lock(&nlk->pg_vec_lock);
489
490 expected = 0;
491 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
492 if (ring->pg_vec == NULL)
493 continue;
494 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
495 }
496
497 if (expected == 0)
498 goto out;
499
500 size = vma->vm_end - vma->vm_start;
501 if (size != expected)
502 goto out;
503
504 start = vma->vm_start;
505 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
506 if (ring->pg_vec == NULL)
507 continue;
508
509 for (i = 0; i < ring->pg_vec_len; i++) {
510 struct page *page;
511 void *kaddr = ring->pg_vec[i];
512 unsigned int pg_num;
513
514 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
515 page = pgvec_to_page(kaddr);
516 err = vm_insert_page(vma, start, page);
517 if (err < 0)
518 goto out;
519 start += PAGE_SIZE;
520 kaddr += PAGE_SIZE;
521 }
522 }
523 }
524
525 atomic_inc(&nlk->mapped);
526 vma->vm_ops = &netlink_mmap_ops;
527 err = 0;
528out:
529 mutex_unlock(&nlk->pg_vec_lock);
Patrick McHardy7cdbac72013-06-11 02:52:47 -0700530 return err;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000531}
Patrick McHardy9652e932013-04-17 06:47:02 +0000532
David Miller4682a032014-12-16 17:58:17 -0500533static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
Patrick McHardy9652e932013-04-17 06:47:02 +0000534{
535#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
536 struct page *p_start, *p_end;
537
538 /* First page is flushed through netlink_{get,set}_status */
539 p_start = pgvec_to_page(hdr + PAGE_SIZE);
David Miller4682a032014-12-16 17:58:17 -0500540 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
Patrick McHardy9652e932013-04-17 06:47:02 +0000541 while (p_start <= p_end) {
542 flush_dcache_page(p_start);
543 p_start++;
544 }
545#endif
546}
547
548static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
549{
550 smp_rmb();
551 flush_dcache_page(pgvec_to_page(hdr));
552 return hdr->nm_status;
553}
554
555static void netlink_set_status(struct nl_mmap_hdr *hdr,
556 enum nl_mmap_status status)
557{
Thomas Grafa18e6a12014-12-18 10:30:26 +0000558 smp_mb();
Patrick McHardy9652e932013-04-17 06:47:02 +0000559 hdr->nm_status = status;
560 flush_dcache_page(pgvec_to_page(hdr));
Patrick McHardy9652e932013-04-17 06:47:02 +0000561}
562
563static struct nl_mmap_hdr *
564__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
565{
566 unsigned int pg_vec_pos, frame_off;
567
568 pg_vec_pos = pos / ring->frames_per_block;
569 frame_off = pos % ring->frames_per_block;
570
571 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
572}
573
574static struct nl_mmap_hdr *
575netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
576 enum nl_mmap_status status)
577{
578 struct nl_mmap_hdr *hdr;
579
580 hdr = __netlink_lookup_frame(ring, pos);
581 if (netlink_get_status(hdr) != status)
582 return NULL;
583
584 return hdr;
585}
586
587static struct nl_mmap_hdr *
588netlink_current_frame(const struct netlink_ring *ring,
589 enum nl_mmap_status status)
590{
591 return netlink_lookup_frame(ring, ring->head, status);
592}
593
594static struct nl_mmap_hdr *
595netlink_previous_frame(const struct netlink_ring *ring,
596 enum nl_mmap_status status)
597{
598 unsigned int prev;
599
600 prev = ring->head ? ring->head - 1 : ring->frame_max;
601 return netlink_lookup_frame(ring, prev, status);
602}
603
604static void netlink_increment_head(struct netlink_ring *ring)
605{
606 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
607}
608
609static void netlink_forward_ring(struct netlink_ring *ring)
610{
611 unsigned int head = ring->head, pos = head;
612 const struct nl_mmap_hdr *hdr;
613
614 do {
615 hdr = __netlink_lookup_frame(ring, pos);
616 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
617 break;
618 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
619 break;
620 netlink_increment_head(ring);
621 } while (ring->head != head);
622}
623
Patrick McHardycd1df522013-04-17 06:47:05 +0000624static bool netlink_dump_space(struct netlink_sock *nlk)
625{
626 struct netlink_ring *ring = &nlk->rx_ring;
627 struct nl_mmap_hdr *hdr;
628 unsigned int n;
629
630 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
631 if (hdr == NULL)
632 return false;
633
634 n = ring->head + ring->frame_max / 2;
635 if (n > ring->frame_max)
636 n -= ring->frame_max;
637
638 hdr = __netlink_lookup_frame(ring, n);
639
640 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
641}
642
Patrick McHardy9652e932013-04-17 06:47:02 +0000643static unsigned int netlink_poll(struct file *file, struct socket *sock,
644 poll_table *wait)
645{
646 struct sock *sk = sock->sk;
647 struct netlink_sock *nlk = nlk_sk(sk);
648 unsigned int mask;
Patrick McHardycd1df522013-04-17 06:47:05 +0000649 int err;
Patrick McHardy9652e932013-04-17 06:47:02 +0000650
Patrick McHardycd1df522013-04-17 06:47:05 +0000651 if (nlk->rx_ring.pg_vec != NULL) {
652 /* Memory mapped sockets don't call recvmsg(), so flow control
653 * for dumps is performed here. A dump is allowed to continue
654 * if at least half the ring is unused.
655 */
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700656 while (nlk->cb_running && netlink_dump_space(nlk)) {
Patrick McHardycd1df522013-04-17 06:47:05 +0000657 err = netlink_dump(sk);
658 if (err < 0) {
Ben Pfaffac30ef82014-07-09 10:31:22 -0700659 sk->sk_err = -err;
Patrick McHardycd1df522013-04-17 06:47:05 +0000660 sk->sk_error_report(sk);
661 break;
662 }
663 }
664 netlink_rcv_wake(sk);
665 }
Patrick McHardy5fd96122013-04-17 06:47:03 +0000666
Patrick McHardy9652e932013-04-17 06:47:02 +0000667 mask = datagram_poll(file, sock, wait);
668
669 spin_lock_bh(&sk->sk_receive_queue.lock);
670 if (nlk->rx_ring.pg_vec) {
671 netlink_forward_ring(&nlk->rx_ring);
672 if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
673 mask |= POLLIN | POLLRDNORM;
674 }
675 spin_unlock_bh(&sk->sk_receive_queue.lock);
676
677 spin_lock_bh(&sk->sk_write_queue.lock);
678 if (nlk->tx_ring.pg_vec) {
679 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
680 mask |= POLLOUT | POLLWRNORM;
681 }
682 spin_unlock_bh(&sk->sk_write_queue.lock);
683
684 return mask;
685}
686
687static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
688{
689 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
690}
691
692static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
693 struct netlink_ring *ring,
694 struct nl_mmap_hdr *hdr)
695{
696 unsigned int size;
697 void *data;
698
699 size = ring->frame_size - NL_MMAP_HDRLEN;
700 data = (void *)hdr + NL_MMAP_HDRLEN;
701
702 skb->head = data;
703 skb->data = data;
704 skb_reset_tail_pointer(skb);
705 skb->end = skb->tail + size;
706 skb->len = 0;
707
708 skb->destructor = netlink_skb_destructor;
709 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
710 NETLINK_CB(skb).sk = sk;
711}
Patrick McHardy5fd96122013-04-17 06:47:03 +0000712
713static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
714 u32 dst_portid, u32 dst_group,
Christoph Hellwig7cc05662015-01-28 18:04:53 +0100715 struct scm_cookie *scm)
Patrick McHardy5fd96122013-04-17 06:47:03 +0000716{
717 struct netlink_sock *nlk = nlk_sk(sk);
718 struct netlink_ring *ring;
719 struct nl_mmap_hdr *hdr;
720 struct sk_buff *skb;
721 unsigned int maxlen;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000722 int err = 0, len = 0;
723
Patrick McHardy5fd96122013-04-17 06:47:03 +0000724 mutex_lock(&nlk->pg_vec_lock);
725
726 ring = &nlk->tx_ring;
727 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
728
729 do {
David Miller4682a032014-12-16 17:58:17 -0500730 unsigned int nm_len;
731
Patrick McHardy5fd96122013-04-17 06:47:03 +0000732 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
733 if (hdr == NULL) {
734 if (!(msg->msg_flags & MSG_DONTWAIT) &&
735 atomic_read(&nlk->tx_ring.pending))
736 schedule();
737 continue;
738 }
David Miller4682a032014-12-16 17:58:17 -0500739
740 nm_len = ACCESS_ONCE(hdr->nm_len);
741 if (nm_len > maxlen) {
Patrick McHardy5fd96122013-04-17 06:47:03 +0000742 err = -EINVAL;
743 goto out;
744 }
745
David Miller4682a032014-12-16 17:58:17 -0500746 netlink_frame_flush_dcache(hdr, nm_len);
Patrick McHardy5fd96122013-04-17 06:47:03 +0000747
David Miller4682a032014-12-16 17:58:17 -0500748 skb = alloc_skb(nm_len, GFP_KERNEL);
749 if (skb == NULL) {
750 err = -ENOBUFS;
751 goto out;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000752 }
David Miller4682a032014-12-16 17:58:17 -0500753 __skb_put(skb, nm_len);
754 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
755 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
Patrick McHardy5fd96122013-04-17 06:47:03 +0000756
757 netlink_increment_head(ring);
758
759 NETLINK_CB(skb).portid = nlk->portid;
760 NETLINK_CB(skb).dst_group = dst_group;
Christoph Hellwig7cc05662015-01-28 18:04:53 +0100761 NETLINK_CB(skb).creds = scm->creds;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000762
763 err = security_netlink_send(sk, skb);
764 if (err) {
765 kfree_skb(skb);
766 goto out;
767 }
768
769 if (unlikely(dst_group)) {
770 atomic_inc(&skb->users);
771 netlink_broadcast(sk, skb, dst_portid, dst_group,
772 GFP_KERNEL);
773 }
774 err = netlink_unicast(sk, skb, dst_portid,
775 msg->msg_flags & MSG_DONTWAIT);
776 if (err < 0)
777 goto out;
778 len += err;
779
780 } while (hdr != NULL ||
781 (!(msg->msg_flags & MSG_DONTWAIT) &&
782 atomic_read(&nlk->tx_ring.pending)));
783
784 if (len > 0)
785 err = len;
786out:
787 mutex_unlock(&nlk->pg_vec_lock);
788 return err;
789}
Patrick McHardyf9c22882013-04-17 06:47:04 +0000790
791static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
792{
793 struct nl_mmap_hdr *hdr;
794
795 hdr = netlink_mmap_hdr(skb);
796 hdr->nm_len = skb->len;
797 hdr->nm_group = NETLINK_CB(skb).dst_group;
798 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
Nicolas Dichtel1bf93102013-04-24 10:36:23 +0200799 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
800 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
David Miller4682a032014-12-16 17:58:17 -0500801 netlink_frame_flush_dcache(hdr, hdr->nm_len);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000802 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
803
804 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
805 kfree_skb(skb);
806}
807
808static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
809{
810 struct netlink_sock *nlk = nlk_sk(sk);
811 struct netlink_ring *ring = &nlk->rx_ring;
812 struct nl_mmap_hdr *hdr;
813
814 spin_lock_bh(&sk->sk_receive_queue.lock);
815 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
816 if (hdr == NULL) {
817 spin_unlock_bh(&sk->sk_receive_queue.lock);
818 kfree_skb(skb);
Patrick McHardycd1df522013-04-17 06:47:05 +0000819 netlink_overrun(sk);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000820 return;
821 }
822 netlink_increment_head(ring);
823 __skb_queue_tail(&sk->sk_receive_queue, skb);
824 spin_unlock_bh(&sk->sk_receive_queue.lock);
825
826 hdr->nm_len = skb->len;
827 hdr->nm_group = NETLINK_CB(skb).dst_group;
828 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
Nicolas Dichtel1bf93102013-04-24 10:36:23 +0200829 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
830 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000831 netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
832}
833
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000834#else /* CONFIG_NETLINK_MMAP */
Patrick McHardy9652e932013-04-17 06:47:02 +0000835#define netlink_skb_is_mmaped(skb) false
Patrick McHardyf9c22882013-04-17 06:47:04 +0000836#define netlink_rx_is_mmaped(sk) false
Patrick McHardy5fd96122013-04-17 06:47:03 +0000837#define netlink_tx_is_mmaped(sk) false
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000838#define netlink_mmap sock_no_mmap
Patrick McHardy9652e932013-04-17 06:47:02 +0000839#define netlink_poll datagram_poll
Christoph Hellwig7cc05662015-01-28 18:04:53 +0100840#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, scm) 0
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000841#endif /* CONFIG_NETLINK_MMAP */
842
Patrick McHardycf0a0182013-04-17 06:47:00 +0000843static void netlink_skb_destructor(struct sk_buff *skb)
844{
Patrick McHardy9652e932013-04-17 06:47:02 +0000845#ifdef CONFIG_NETLINK_MMAP
846 struct nl_mmap_hdr *hdr;
847 struct netlink_ring *ring;
848 struct sock *sk;
849
850 /* If a packet from the kernel to userspace was freed because of an
851 * error without being delivered to userspace, the kernel must reset
852 * the status. In the direction userspace to kernel, the status is
853 * always reset here after the packet was processed and freed.
854 */
855 if (netlink_skb_is_mmaped(skb)) {
856 hdr = netlink_mmap_hdr(skb);
857 sk = NETLINK_CB(skb).sk;
858
Patrick McHardy5fd96122013-04-17 06:47:03 +0000859 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
860 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
861 ring = &nlk_sk(sk)->tx_ring;
862 } else {
863 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
864 hdr->nm_len = 0;
865 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
866 }
867 ring = &nlk_sk(sk)->rx_ring;
Patrick McHardy9652e932013-04-17 06:47:02 +0000868 }
Patrick McHardy9652e932013-04-17 06:47:02 +0000869
870 WARN_ON(atomic_read(&ring->pending) == 0);
871 atomic_dec(&ring->pending);
872 sock_put(sk);
873
Pablo Neira5e71d9d2013-06-03 09:28:43 +0000874 skb->head = NULL;
Patrick McHardy9652e932013-04-17 06:47:02 +0000875 }
876#endif
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +0000877 if (is_vmalloc_addr(skb->head)) {
Pablo Neira3a365152013-06-28 03:04:23 +0200878 if (!skb->cloned ||
879 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
880 vfree(skb->head);
881
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +0000882 skb->head = NULL;
883 }
Patrick McHardy9652e932013-04-17 06:47:02 +0000884 if (skb->sk != NULL)
885 sock_rfree(skb);
Patrick McHardycf0a0182013-04-17 06:47:00 +0000886}
887
888static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
889{
890 WARN_ON(skb->sk != NULL);
891 skb->sk = sk;
892 skb->destructor = netlink_skb_destructor;
893 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
894 sk_mem_charge(sk, skb->truesize);
895}
896
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897static void netlink_sock_destruct(struct sock *sk)
898{
Herbert Xu3f660d62007-05-03 03:17:14 -0700899 struct netlink_sock *nlk = nlk_sk(sk);
900
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700901 if (nlk->cb_running) {
902 if (nlk->cb.done)
903 nlk->cb.done(&nlk->cb);
Gao feng6dc878a2012-10-04 20:15:48 +0000904
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700905 module_put(nlk->cb.module);
906 kfree_skb(nlk->cb.skb);
Herbert Xu3f660d62007-05-03 03:17:14 -0700907 }
908
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 skb_queue_purge(&sk->sk_receive_queue);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000910#ifdef CONFIG_NETLINK_MMAP
911 if (1) {
912 struct nl_mmap_req req;
913
914 memset(&req, 0, sizeof(req));
915 if (nlk->rx_ring.pg_vec)
Florian Westphalb265c302015-07-21 16:33:50 +0200916 __netlink_set_ring(sk, &req, false, NULL, 0);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000917 memset(&req, 0, sizeof(req));
918 if (nlk->tx_ring.pg_vec)
Florian Westphalb265c302015-07-21 16:33:50 +0200919 __netlink_set_ring(sk, &req, true, NULL, 0);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000920 }
921#endif /* CONFIG_NETLINK_MMAP */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922
923 if (!sock_flag(sk, SOCK_DEAD)) {
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800924 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 return;
926 }
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700927
928 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
929 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
930 WARN_ON(nlk_sk(sk)->groups);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931}
932
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800933/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
934 * SMP. Look, when several writers sleep and reader wakes them up, all but one
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
936 * this, _but_ remember, it adds useless work on UP machines.
937 */
938
Johannes Bergd136f1b2009-09-12 03:03:15 +0000939void netlink_table_grab(void)
Eric Dumazet9a429c42008-01-01 21:58:02 -0800940 __acquires(nl_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941{
Johannes Bergd136f1b2009-09-12 03:03:15 +0000942 might_sleep();
943
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700944 write_lock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945
946 if (atomic_read(&nl_table_users)) {
947 DECLARE_WAITQUEUE(wait, current);
948
949 add_wait_queue_exclusive(&nl_table_wait, &wait);
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800950 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 set_current_state(TASK_UNINTERRUPTIBLE);
952 if (atomic_read(&nl_table_users) == 0)
953 break;
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700954 write_unlock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 schedule();
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700956 write_lock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 }
958
959 __set_current_state(TASK_RUNNING);
960 remove_wait_queue(&nl_table_wait, &wait);
961 }
962}
963
Johannes Bergd136f1b2009-09-12 03:03:15 +0000964void netlink_table_ungrab(void)
Eric Dumazet9a429c42008-01-01 21:58:02 -0800965 __releases(nl_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966{
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700967 write_unlock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 wake_up(&nl_table_wait);
969}
970
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800971static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972netlink_lock_table(void)
973{
974 /* read_lock() synchronizes us to netlink_table_grab */
975
976 read_lock(&nl_table_lock);
977 atomic_inc(&nl_table_users);
978 read_unlock(&nl_table_lock);
979}
980
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800981static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982netlink_unlock_table(void)
983{
984 if (atomic_dec_and_test(&nl_table_users))
985 wake_up(&nl_table_wait);
986}
987
Thomas Grafe3416942014-08-02 11:47:45 +0200988struct netlink_compare_arg
Gao fengda12c902013-06-06 14:49:11 +0800989{
Herbert Xuc428ecd2015-03-20 21:57:01 +1100990 possible_net_t pnet;
Thomas Grafe3416942014-08-02 11:47:45 +0200991 u32 portid;
992};
993
Herbert Xu8f2ddaa2015-03-21 14:14:03 +1100994/* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
995#define netlink_compare_arg_len \
996 (offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
Thomas Grafe3416942014-08-02 11:47:45 +0200997
Herbert Xuc428ecd2015-03-20 21:57:01 +1100998static inline int netlink_compare(struct rhashtable_compare_arg *arg,
999 const void *ptr)
1000{
1001 const struct netlink_compare_arg *x = arg->key;
1002 const struct netlink_sock *nlk = ptr;
1003
1004 return nlk->portid != x->portid ||
1005 !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
1006}
1007
1008static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
1009 struct net *net, u32 portid)
1010{
1011 memset(arg, 0, sizeof(*arg));
1012 write_pnet(&arg->pnet, net);
1013 arg->portid = portid;
Thomas Grafe3416942014-08-02 11:47:45 +02001014}
1015
1016static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
1017 struct net *net)
1018{
Herbert Xuc428ecd2015-03-20 21:57:01 +11001019 struct netlink_compare_arg arg;
Thomas Grafe3416942014-08-02 11:47:45 +02001020
Herbert Xuc428ecd2015-03-20 21:57:01 +11001021 netlink_compare_arg_init(&arg, net, portid);
1022 return rhashtable_lookup_fast(&table->hash, &arg,
1023 netlink_rhashtable_params);
Gao fengda12c902013-06-06 14:49:11 +08001024}
1025
Herbert Xuc428ecd2015-03-20 21:57:01 +11001026static int __netlink_insert(struct netlink_table *table, struct sock *sk)
Ying Xuec5adde92015-01-12 14:52:23 +08001027{
Herbert Xuc428ecd2015-03-20 21:57:01 +11001028 struct netlink_compare_arg arg;
Ying Xuec5adde92015-01-12 14:52:23 +08001029
Herbert Xuc428ecd2015-03-20 21:57:01 +11001030 netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
1031 return rhashtable_lookup_insert_key(&table->hash, &arg,
1032 &nlk_sk(sk)->node,
1033 netlink_rhashtable_params);
Ying Xuec5adde92015-01-12 14:52:23 +08001034}
1035
Eric W. Biederman15e47302012-09-07 20:12:54 +00001036static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037{
Gao fengda12c902013-06-06 14:49:11 +08001038 struct netlink_table *table = &nl_table[protocol];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040
Thomas Grafe3416942014-08-02 11:47:45 +02001041 rcu_read_lock();
1042 sk = __netlink_lookup(table, portid, net);
1043 if (sk)
1044 sock_hold(sk);
1045 rcu_read_unlock();
1046
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 return sk;
1048}
1049
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001050static const struct proto_ops netlink_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
Patrick McHardy4277a082006-03-20 18:52:01 -08001052static void
1053netlink_update_listeners(struct sock *sk)
1054{
1055 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
Patrick McHardy4277a082006-03-20 18:52:01 -08001056 unsigned long mask;
1057 unsigned int i;
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001058 struct listeners *listeners;
1059
1060 listeners = nl_deref_protected(tbl->listeners);
1061 if (!listeners)
1062 return;
Patrick McHardy4277a082006-03-20 18:52:01 -08001063
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001064 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
Patrick McHardy4277a082006-03-20 18:52:01 -08001065 mask = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001066 sk_for_each_bound(sk, &tbl->mc_list) {
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001067 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1068 mask |= nlk_sk(sk)->groups[i];
1069 }
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001070 listeners->masks[i] = mask;
Patrick McHardy4277a082006-03-20 18:52:01 -08001071 }
1072 /* this function is only called with the netlink table "grabbed", which
1073 * makes sure updates are visible before bind or setsockopt return. */
1074}
1075
Herbert Xu8ea65f42015-01-26 14:02:56 +11001076static int netlink_insert(struct sock *sk, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077{
Gao fengda12c902013-06-06 14:49:11 +08001078 struct netlink_table *table = &nl_table[sk->sk_protocol];
Herbert Xu919d9db2015-01-16 17:23:48 +11001079 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080
Ying Xuec5adde92015-01-12 14:52:23 +08001081 lock_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082
1083 err = -EBUSY;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001084 if (nlk_sk(sk)->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 goto err;
1086
1087 err = -ENOMEM;
Thomas Graf97defe12015-01-02 23:00:20 +01001088 if (BITS_PER_LONG > 32 &&
1089 unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 goto err;
1091
Eric W. Biederman15e47302012-09-07 20:12:54 +00001092 nlk_sk(sk)->portid = portid;
Thomas Grafe3416942014-08-02 11:47:45 +02001093 sock_hold(sk);
Herbert Xu919d9db2015-01-16 17:23:48 +11001094
Herbert Xuc428ecd2015-03-20 21:57:01 +11001095 err = __netlink_insert(table, sk);
1096 if (err) {
1097 if (err == -EEXIST)
1098 err = -EADDRINUSE;
Herbert Xuc0bb07d2015-05-16 21:50:28 +08001099 nlk_sk(sk)->portid = 0;
Ying Xuec5adde92015-01-12 14:52:23 +08001100 sock_put(sk);
Herbert Xu919d9db2015-01-16 17:23:48 +11001101 }
1102
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103err:
Ying Xuec5adde92015-01-12 14:52:23 +08001104 release_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 return err;
1106}
1107
1108static void netlink_remove(struct sock *sk)
1109{
Thomas Grafe3416942014-08-02 11:47:45 +02001110 struct netlink_table *table;
1111
Thomas Grafe3416942014-08-02 11:47:45 +02001112 table = &nl_table[sk->sk_protocol];
Herbert Xuc428ecd2015-03-20 21:57:01 +11001113 if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
1114 netlink_rhashtable_params)) {
Thomas Grafe3416942014-08-02 11:47:45 +02001115 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
1116 __sock_put(sk);
1117 }
Thomas Grafe3416942014-08-02 11:47:45 +02001118
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 netlink_table_grab();
Johannes Bergb10dcb32014-12-22 18:56:37 +01001120 if (nlk_sk(sk)->subscriptions) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 __sk_del_bind_node(sk);
Johannes Bergb10dcb32014-12-22 18:56:37 +01001122 netlink_update_listeners(sk);
1123 }
Johannes Bergee1c24422015-01-16 11:37:14 +01001124 if (sk->sk_protocol == NETLINK_GENERIC)
1125 atomic_inc(&genl_sk_destructing_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 netlink_table_ungrab();
1127}
1128
1129static struct proto netlink_proto = {
1130 .name = "NETLINK",
1131 .owner = THIS_MODULE,
1132 .obj_size = sizeof(struct netlink_sock),
1133};
1134
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001135static int __netlink_create(struct net *net, struct socket *sock,
1136 struct mutex *cb_mutex, int protocol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137{
1138 struct sock *sk;
1139 struct netlink_sock *nlk;
Patrick McHardyab33a172005-08-14 19:31:36 -07001140
1141 sock->ops = &netlink_ops;
1142
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001143 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
Patrick McHardyab33a172005-08-14 19:31:36 -07001144 if (!sk)
1145 return -ENOMEM;
1146
1147 sock_init_data(sock, sk);
1148
1149 nlk = nlk_sk(sk);
Eric Dumazet658cb352012-04-22 21:30:21 +00001150 if (cb_mutex) {
Patrick McHardyffa4d722007-04-25 14:01:17 -07001151 nlk->cb_mutex = cb_mutex;
Eric Dumazet658cb352012-04-22 21:30:21 +00001152 } else {
Patrick McHardyffa4d722007-04-25 14:01:17 -07001153 nlk->cb_mutex = &nlk->cb_def_mutex;
1154 mutex_init(nlk->cb_mutex);
1155 }
Patrick McHardyab33a172005-08-14 19:31:36 -07001156 init_waitqueue_head(&nlk->wait);
Patrick McHardyccdfcc32013-04-17 06:47:01 +00001157#ifdef CONFIG_NETLINK_MMAP
1158 mutex_init(&nlk->pg_vec_lock);
1159#endif
Patrick McHardyab33a172005-08-14 19:31:36 -07001160
1161 sk->sk_destruct = netlink_sock_destruct;
1162 sk->sk_protocol = protocol;
1163 return 0;
1164}
1165
Eric Paris3f378b62009-11-05 22:18:14 -08001166static int netlink_create(struct net *net, struct socket *sock, int protocol,
1167 int kern)
Patrick McHardyab33a172005-08-14 19:31:36 -07001168{
1169 struct module *module = NULL;
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07001170 struct mutex *cb_mutex;
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001171 struct netlink_sock *nlk;
Johannes Berg023e2cf2014-12-23 21:00:06 +01001172 int (*bind)(struct net *net, int group);
1173 void (*unbind)(struct net *net, int group);
Patrick McHardyab33a172005-08-14 19:31:36 -07001174 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175
1176 sock->state = SS_UNCONNECTED;
1177
1178 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1179 return -ESOCKTNOSUPPORT;
1180
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001181 if (protocol < 0 || protocol >= MAX_LINKS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 return -EPROTONOSUPPORT;
1183
Patrick McHardy77247bb2005-08-14 19:27:13 -07001184 netlink_lock_table();
Johannes Berg95a5afc2008-10-16 15:24:51 -07001185#ifdef CONFIG_MODULES
Patrick McHardyab33a172005-08-14 19:31:36 -07001186 if (!nl_table[protocol].registered) {
Patrick McHardy77247bb2005-08-14 19:27:13 -07001187 netlink_unlock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001188 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
Patrick McHardy77247bb2005-08-14 19:27:13 -07001189 netlink_lock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001190 }
Patrick McHardyab33a172005-08-14 19:31:36 -07001191#endif
1192 if (nl_table[protocol].registered &&
1193 try_module_get(nl_table[protocol].module))
1194 module = nl_table[protocol].module;
Alexey Dobriyan974c37e2010-01-30 10:05:05 +00001195 else
1196 err = -EPROTONOSUPPORT;
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07001197 cb_mutex = nl_table[protocol].cb_mutex;
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00001198 bind = nl_table[protocol].bind;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001199 unbind = nl_table[protocol].unbind;
Patrick McHardy77247bb2005-08-14 19:27:13 -07001200 netlink_unlock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001201
Alexey Dobriyan974c37e2010-01-30 10:05:05 +00001202 if (err < 0)
1203 goto out;
1204
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001205 err = __netlink_create(net, sock, cb_mutex, protocol);
1206 if (err < 0)
Patrick McHardyab33a172005-08-14 19:31:36 -07001207 goto out_module;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208
David S. Miller6f756a82008-11-23 17:34:03 -08001209 local_bh_disable();
Eric Dumazetc1fd3b92008-11-23 15:48:22 -08001210 sock_prot_inuse_add(net, &netlink_proto, 1);
David S. Miller6f756a82008-11-23 17:34:03 -08001211 local_bh_enable();
1212
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001213 nlk = nlk_sk(sock->sk);
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001214 nlk->module = module;
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00001215 nlk->netlink_bind = bind;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001216 nlk->netlink_unbind = unbind;
Patrick McHardyab33a172005-08-14 19:31:36 -07001217out:
1218 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
Patrick McHardyab33a172005-08-14 19:31:36 -07001220out_module:
1221 module_put(module);
1222 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223}
1224
Thomas Graf21e49022015-01-02 23:00:22 +01001225static void deferred_put_nlk_sk(struct rcu_head *head)
1226{
1227 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
1228
1229 sock_put(&nlk->sk);
1230}
1231
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232static int netlink_release(struct socket *sock)
1233{
1234 struct sock *sk = sock->sk;
1235 struct netlink_sock *nlk;
1236
1237 if (!sk)
1238 return 0;
1239
1240 netlink_remove(sk);
Denis Lunevac57b3a2007-04-18 17:05:58 -07001241 sock_orphan(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 nlk = nlk_sk(sk);
1243
Herbert Xu3f660d62007-05-03 03:17:14 -07001244 /*
1245 * OK. Socket is unlinked, any packets that arrive now
1246 * will be purged.
1247 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248
Johannes Bergee1c24422015-01-16 11:37:14 +01001249 /* must not acquire netlink_table_lock in any way again before unbind
1250 * and notifying genetlink is done as otherwise it might deadlock
1251 */
1252 if (nlk->netlink_unbind) {
1253 int i;
1254
1255 for (i = 0; i < nlk->ngroups; i++)
1256 if (test_bit(i, nlk->groups))
1257 nlk->netlink_unbind(sock_net(sk), i + 1);
1258 }
1259 if (sk->sk_protocol == NETLINK_GENERIC &&
1260 atomic_dec_return(&genl_sk_destructing_cnt) == 0)
1261 wake_up(&genl_sk_destructing_waitq);
1262
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 sock->sk = NULL;
1264 wake_up_interruptible_all(&nlk->wait);
1265
1266 skb_queue_purge(&sk->sk_write_queue);
1267
Eric W. Biederman15e47302012-09-07 20:12:54 +00001268 if (nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 struct netlink_notify n = {
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001270 .net = sock_net(sk),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 .protocol = sk->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001272 .portid = nlk->portid,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 };
Alan Sterne041c682006-03-27 01:16:30 -08001274 atomic_notifier_call_chain(&netlink_chain,
1275 NETLINK_URELEASE, &n);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001276 }
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001277
Mariusz Kozlowski5e7c0012007-01-02 15:24:30 -08001278 module_put(nlk->module);
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001279
Denis V. Lunevaed81562007-10-10 21:14:32 -07001280 if (netlink_is_kernel(sk)) {
Johannes Bergb10dcb32014-12-22 18:56:37 +01001281 netlink_table_grab();
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001282 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1283 if (--nl_table[sk->sk_protocol].registered == 0) {
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001284 struct listeners *old;
1285
1286 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1287 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1288 kfree_rcu(old, rcu);
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001289 nl_table[sk->sk_protocol].module = NULL;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001290 nl_table[sk->sk_protocol].bind = NULL;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001291 nl_table[sk->sk_protocol].unbind = NULL;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001292 nl_table[sk->sk_protocol].flags = 0;
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001293 nl_table[sk->sk_protocol].registered = 0;
1294 }
Johannes Bergb10dcb32014-12-22 18:56:37 +01001295 netlink_table_ungrab();
Eric Dumazet658cb352012-04-22 21:30:21 +00001296 }
Patrick McHardy77247bb2005-08-14 19:27:13 -07001297
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001298 kfree(nlk->groups);
1299 nlk->groups = NULL;
1300
Eric Dumazet37558102008-11-24 14:05:22 -08001301 local_bh_disable();
Eric Dumazetc1fd3b92008-11-23 15:48:22 -08001302 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
Eric Dumazet37558102008-11-24 14:05:22 -08001303 local_bh_enable();
Thomas Graf21e49022015-01-02 23:00:22 +01001304 call_rcu(&nlk->rcu, deferred_put_nlk_sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 return 0;
1306}
1307
1308static int netlink_autobind(struct socket *sock)
1309{
1310 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001311 struct net *net = sock_net(sk);
Gao fengda12c902013-06-06 14:49:11 +08001312 struct netlink_table *table = &nl_table[sk->sk_protocol];
Eric W. Biederman15e47302012-09-07 20:12:54 +00001313 s32 portid = task_tgid_vnr(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 int err;
1315 static s32 rover = -4097;
1316
1317retry:
1318 cond_resched();
Thomas Grafe3416942014-08-02 11:47:45 +02001319 rcu_read_lock();
1320 if (__netlink_lookup(table, portid, net)) {
1321 /* Bind collision, search negative portid values. */
1322 portid = rover--;
1323 if (rover > -4097)
1324 rover = -4097;
1325 rcu_read_unlock();
1326 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 }
Thomas Grafe3416942014-08-02 11:47:45 +02001328 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329
Herbert Xu8ea65f42015-01-26 14:02:56 +11001330 err = netlink_insert(sk, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 if (err == -EADDRINUSE)
1332 goto retry;
David S. Millerd470e3b2005-06-26 15:31:51 -07001333
1334 /* If 2 threads race to autobind, that is fine. */
1335 if (err == -EBUSY)
1336 err = 0;
1337
1338 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339}
1340
Eric W. Biedermanaa4cf942014-04-23 14:28:03 -07001341/**
1342 * __netlink_ns_capable - General netlink message capability test
1343 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1344 * @user_ns: The user namespace of the capability to use
1345 * @cap: The capability to use
1346 *
1347 * Test to see if the opener of the socket we received the message
1348 * from had when the netlink socket was created and the sender of the
1349 * message has has the capability @cap in the user namespace @user_ns.
1350 */
1351bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1352 struct user_namespace *user_ns, int cap)
1353{
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07001354 return ((nsp->flags & NETLINK_SKB_DST) ||
1355 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1356 ns_capable(user_ns, cap);
Eric W. Biedermanaa4cf942014-04-23 14:28:03 -07001357}
1358EXPORT_SYMBOL(__netlink_ns_capable);
1359
1360/**
1361 * netlink_ns_capable - General netlink message capability test
1362 * @skb: socket buffer holding a netlink command from userspace
1363 * @user_ns: The user namespace of the capability to use
1364 * @cap: The capability to use
1365 *
1366 * Test to see if the opener of the socket we received the message
1367 * from had when the netlink socket was created and the sender of the
1368 * message has has the capability @cap in the user namespace @user_ns.
1369 */
1370bool netlink_ns_capable(const struct sk_buff *skb,
1371 struct user_namespace *user_ns, int cap)
1372{
1373 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
1374}
1375EXPORT_SYMBOL(netlink_ns_capable);
1376
1377/**
1378 * netlink_capable - Netlink global message capability test
1379 * @skb: socket buffer holding a netlink command from userspace
1380 * @cap: The capability to use
1381 *
1382 * Test to see if the opener of the socket we received the message
1383 * from had when the netlink socket was created and the sender of the
1384 * message has has the capability @cap in all user namespaces.
1385 */
1386bool netlink_capable(const struct sk_buff *skb, int cap)
1387{
1388 return netlink_ns_capable(skb, &init_user_ns, cap);
1389}
1390EXPORT_SYMBOL(netlink_capable);
1391
1392/**
1393 * netlink_net_capable - Netlink network namespace message capability test
1394 * @skb: socket buffer holding a netlink command from userspace
1395 * @cap: The capability to use
1396 *
1397 * Test to see if the opener of the socket we received the message
1398 * from had when the netlink socket was created and the sender of the
1399 * message has has the capability @cap over the network namespace of
1400 * the socket we received the message from.
1401 */
1402bool netlink_net_capable(const struct sk_buff *skb, int cap)
1403{
1404 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
1405}
1406EXPORT_SYMBOL(netlink_net_capable);
1407
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001408static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001409{
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001410 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
Eric W. Biedermandf008c92012-11-16 03:03:07 +00001411 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001412}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001414static void
1415netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1416{
1417 struct netlink_sock *nlk = nlk_sk(sk);
1418
1419 if (nlk->subscriptions && !subscriptions)
1420 __sk_del_bind_node(sk);
1421 else if (!nlk->subscriptions && subscriptions)
1422 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1423 nlk->subscriptions = subscriptions;
1424}
1425
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001426static int netlink_realloc_groups(struct sock *sk)
Patrick McHardy513c2502005-09-06 15:43:59 -07001427{
1428 struct netlink_sock *nlk = nlk_sk(sk);
1429 unsigned int groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001430 unsigned long *new_groups;
Patrick McHardy513c2502005-09-06 15:43:59 -07001431 int err = 0;
1432
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001433 netlink_table_grab();
1434
Patrick McHardy513c2502005-09-06 15:43:59 -07001435 groups = nl_table[sk->sk_protocol].groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001436 if (!nl_table[sk->sk_protocol].registered) {
Patrick McHardy513c2502005-09-06 15:43:59 -07001437 err = -ENOENT;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001438 goto out_unlock;
1439 }
Patrick McHardy513c2502005-09-06 15:43:59 -07001440
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001441 if (nlk->ngroups >= groups)
1442 goto out_unlock;
Patrick McHardy513c2502005-09-06 15:43:59 -07001443
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001444 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1445 if (new_groups == NULL) {
1446 err = -ENOMEM;
1447 goto out_unlock;
1448 }
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001449 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001450 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1451
1452 nlk->groups = new_groups;
Patrick McHardy513c2502005-09-06 15:43:59 -07001453 nlk->ngroups = groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001454 out_unlock:
1455 netlink_table_ungrab();
1456 return err;
Patrick McHardy513c2502005-09-06 15:43:59 -07001457}
1458
Johannes Berg02c81ab2014-12-22 18:56:35 +01001459static void netlink_undo_bind(int group, long unsigned int groups,
Johannes Berg023e2cf2014-12-23 21:00:06 +01001460 struct sock *sk)
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001461{
Johannes Berg023e2cf2014-12-23 21:00:06 +01001462 struct netlink_sock *nlk = nlk_sk(sk);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001463 int undo;
1464
1465 if (!nlk->netlink_unbind)
1466 return;
1467
1468 for (undo = 0; undo < group; undo++)
Hiroaki SHIMODA6251edd2014-11-13 04:24:10 +09001469 if (test_bit(undo, &groups))
Pablo Neira8b7c36d2015-01-29 10:51:53 +01001470 nlk->netlink_unbind(sock_net(sk), undo + 1);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001471}
1472
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001473static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1474 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475{
1476 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001477 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 struct netlink_sock *nlk = nlk_sk(sk);
1479 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1480 int err;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001481 long unsigned int groups = nladdr->nl_groups;
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001482
Hannes Frederic Sowa4e4b5372012-12-15 15:42:19 +00001483 if (addr_len < sizeof(struct sockaddr_nl))
1484 return -EINVAL;
1485
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 if (nladdr->nl_family != AF_NETLINK)
1487 return -EINVAL;
1488
1489 /* Only superuser is allowed to listen multicasts */
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001490 if (groups) {
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001491 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
Patrick McHardy513c2502005-09-06 15:43:59 -07001492 return -EPERM;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001493 err = netlink_realloc_groups(sk);
1494 if (err)
1495 return err;
Patrick McHardy513c2502005-09-06 15:43:59 -07001496 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001498 if (nlk->portid)
Eric W. Biederman15e47302012-09-07 20:12:54 +00001499 if (nladdr->nl_pid != nlk->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 return -EINVAL;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001501
1502 if (nlk->netlink_bind && groups) {
1503 int group;
1504
1505 for (group = 0; group < nlk->ngroups; group++) {
1506 if (!test_bit(group, &groups))
1507 continue;
Pablo Neira8b7c36d2015-01-29 10:51:53 +01001508 err = nlk->netlink_bind(net, group + 1);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001509 if (!err)
1510 continue;
Johannes Berg023e2cf2014-12-23 21:00:06 +01001511 netlink_undo_bind(group, groups, sk);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001512 return err;
1513 }
1514 }
1515
1516 if (!nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 err = nladdr->nl_pid ?
Herbert Xu8ea65f42015-01-26 14:02:56 +11001518 netlink_insert(sk, nladdr->nl_pid) :
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 netlink_autobind(sock);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001520 if (err) {
Johannes Berg023e2cf2014-12-23 21:00:06 +01001521 netlink_undo_bind(nlk->ngroups, groups, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 return err;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001523 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 }
1525
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001526 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 return 0;
1528
1529 netlink_table_grab();
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001530 netlink_update_subscriptions(sk, nlk->subscriptions +
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001531 hweight32(groups) -
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001532 hweight32(nlk->groups[0]));
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001533 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
Patrick McHardy4277a082006-03-20 18:52:01 -08001534 netlink_update_listeners(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 netlink_table_ungrab();
1536
1537 return 0;
1538}
1539
1540static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1541 int alen, int flags)
1542{
1543 int err = 0;
1544 struct sock *sk = sock->sk;
1545 struct netlink_sock *nlk = nlk_sk(sk);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001546 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547
Changli Gao6503d962010-03-31 22:58:26 +00001548 if (alen < sizeof(addr->sa_family))
1549 return -EINVAL;
1550
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 if (addr->sa_family == AF_UNSPEC) {
1552 sk->sk_state = NETLINK_UNCONNECTED;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001553 nlk->dst_portid = 0;
Patrick McHardyd629b832005-08-14 19:27:50 -07001554 nlk->dst_group = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 return 0;
1556 }
1557 if (addr->sa_family != AF_NETLINK)
1558 return -EINVAL;
1559
Mike Pecovnik46833a82014-02-24 21:11:16 +01001560 if ((nladdr->nl_groups || nladdr->nl_pid) &&
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001561 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 return -EPERM;
1563
Eric W. Biederman15e47302012-09-07 20:12:54 +00001564 if (!nlk->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 err = netlink_autobind(sock);
1566
1567 if (err == 0) {
1568 sk->sk_state = NETLINK_CONNECTED;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001569 nlk->dst_portid = nladdr->nl_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -07001570 nlk->dst_group = ffs(nladdr->nl_groups);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 }
1572
1573 return err;
1574}
1575
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001576static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1577 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578{
1579 struct sock *sk = sock->sk;
1580 struct netlink_sock *nlk = nlk_sk(sk);
Cyrill Gorcunov13cfa972009-11-08 05:51:19 +00001581 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001582
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 nladdr->nl_family = AF_NETLINK;
1584 nladdr->nl_pad = 0;
1585 *addr_len = sizeof(*nladdr);
1586
1587 if (peer) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001588 nladdr->nl_pid = nlk->dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07001589 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 } else {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001591 nladdr->nl_pid = nlk->portid;
Patrick McHardy513c2502005-09-06 15:43:59 -07001592 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 }
1594 return 0;
1595}
1596
Eric W. Biederman15e47302012-09-07 20:12:54 +00001597static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 struct sock *sock;
1600 struct netlink_sock *nlk;
1601
Eric W. Biederman15e47302012-09-07 20:12:54 +00001602 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 if (!sock)
1604 return ERR_PTR(-ECONNREFUSED);
1605
1606 /* Don't bother queuing skb if kernel socket has no input function */
1607 nlk = nlk_sk(sock);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001608 if (sock->sk_state == NETLINK_CONNECTED &&
Eric W. Biederman15e47302012-09-07 20:12:54 +00001609 nlk->dst_portid != nlk_sk(ssk)->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 sock_put(sock);
1611 return ERR_PTR(-ECONNREFUSED);
1612 }
1613 return sock;
1614}
1615
1616struct sock *netlink_getsockbyfilp(struct file *filp)
1617{
Al Viro496ad9a2013-01-23 17:07:38 -05001618 struct inode *inode = file_inode(filp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 struct sock *sock;
1620
1621 if (!S_ISSOCK(inode->i_mode))
1622 return ERR_PTR(-ENOTSOCK);
1623
1624 sock = SOCKET_I(inode)->sk;
1625 if (sock->sk_family != AF_NETLINK)
1626 return ERR_PTR(-EINVAL);
1627
1628 sock_hold(sock);
1629 return sock;
1630}
1631
Pablo Neira3a365152013-06-28 03:04:23 +02001632static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1633 int broadcast)
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001634{
1635 struct sk_buff *skb;
1636 void *data;
1637
Pablo Neira3a365152013-06-28 03:04:23 +02001638 if (size <= NLMSG_GOODSIZE || broadcast)
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001639 return alloc_skb(size, GFP_KERNEL);
1640
Pablo Neira3a365152013-06-28 03:04:23 +02001641 size = SKB_DATA_ALIGN(size) +
1642 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001643
1644 data = vmalloc(size);
1645 if (data == NULL)
Pablo Neira3a365152013-06-28 03:04:23 +02001646 return NULL;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001647
Eric Dumazet2ea2f622015-04-24 16:05:01 -07001648 skb = __build_skb(data, size);
Pablo Neira3a365152013-06-28 03:04:23 +02001649 if (skb == NULL)
1650 vfree(data);
Eric Dumazet2ea2f622015-04-24 16:05:01 -07001651 else
Pablo Neira3a365152013-06-28 03:04:23 +02001652 skb->destructor = netlink_skb_destructor;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001653
1654 return skb;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001655}
1656
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657/*
1658 * Attach a skb to a netlink socket.
1659 * The caller must hold a reference to the destination socket. On error, the
1660 * reference is dropped. The skb is not send to the destination, just all
1661 * all error checks are performed and memory in the queue is reserved.
1662 * Return values:
1663 * < 0: error. skb freed, reference to sock dropped.
1664 * 0: continue
1665 * 1: repeat lookup - reference dropped while waiting for socket memory.
1666 */
Denis V. Lunev9457afe2008-06-05 11:23:39 -07001667int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001668 long *timeo, struct sock *ssk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669{
1670 struct netlink_sock *nlk;
1671
1672 nlk = nlk_sk(sk);
1673
Patrick McHardy5fd96122013-04-17 06:47:03 +00001674 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1675 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1676 !netlink_skb_is_mmaped(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 DECLARE_WAITQUEUE(wait, current);
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001678 if (!*timeo) {
Denis V. Lunevaed81562007-10-10 21:14:32 -07001679 if (!ssk || netlink_is_kernel(ssk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 netlink_overrun(sk);
1681 sock_put(sk);
1682 kfree_skb(skb);
1683 return -EAGAIN;
1684 }
1685
1686 __set_current_state(TASK_INTERRUPTIBLE);
1687 add_wait_queue(&nlk->wait, &wait);
1688
1689 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
Patrick McHardycd967e02013-04-17 06:46:56 +00001690 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 !sock_flag(sk, SOCK_DEAD))
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001692 *timeo = schedule_timeout(*timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693
1694 __set_current_state(TASK_RUNNING);
1695 remove_wait_queue(&nlk->wait, &wait);
1696 sock_put(sk);
1697
1698 if (signal_pending(current)) {
1699 kfree_skb(skb);
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001700 return sock_intr_errno(*timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 }
1702 return 1;
1703 }
Patrick McHardycf0a0182013-04-17 06:47:00 +00001704 netlink_skb_set_owner_r(skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 return 0;
1706}
1707
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001708static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 int len = skb->len;
1711
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +02001712 netlink_deliver_tap(skb);
1713
Patrick McHardyf9c22882013-04-17 06:47:04 +00001714#ifdef CONFIG_NETLINK_MMAP
1715 if (netlink_skb_is_mmaped(skb))
1716 netlink_queue_mmaped_skb(sk, skb);
1717 else if (netlink_rx_is_mmaped(sk))
1718 netlink_ring_set_copied(sk, skb);
1719 else
1720#endif /* CONFIG_NETLINK_MMAP */
1721 skb_queue_tail(&sk->sk_receive_queue, skb);
David S. Miller676d2362014-04-11 16:15:36 -04001722 sk->sk_data_ready(sk);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001723 return len;
1724}
1725
1726int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1727{
1728 int len = __netlink_sendskb(sk, skb);
1729
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 sock_put(sk);
1731 return len;
1732}
1733
1734void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1735{
1736 kfree_skb(skb);
1737 sock_put(sk);
1738}
1739
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00001740static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741{
1742 int delta;
1743
Patrick McHardy1298ca42013-04-17 06:46:59 +00001744 WARN_ON(skb->sk != NULL);
Patrick McHardy5fd96122013-04-17 06:47:03 +00001745 if (netlink_skb_is_mmaped(skb))
1746 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001748 delta = skb->end - skb->tail;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001749 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 return skb;
1751
1752 if (skb_shared(skb)) {
1753 struct sk_buff *nskb = skb_clone(skb, allocation);
1754 if (!nskb)
1755 return skb;
Eric Dumazet8460c002012-04-19 02:24:28 +00001756 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 skb = nskb;
1758 }
1759
1760 if (!pskb_expand_head(skb, 0, -delta, allocation))
1761 skb->truesize -= delta;
1762
1763 return skb;
1764}
1765
Eric W. Biederman3fbc2902012-05-24 17:21:27 -06001766static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1767 struct sock *ssk)
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001768{
1769 int ret;
1770 struct netlink_sock *nlk = nlk_sk(sk);
1771
1772 ret = -ECONNREFUSED;
1773 if (nlk->netlink_rcv != NULL) {
1774 ret = skb->len;
Patrick McHardycf0a0182013-04-17 06:47:00 +00001775 netlink_skb_set_owner_r(skb, sk);
Patrick McHardye32123e2013-04-17 06:46:57 +00001776 NETLINK_CB(skb).sk = ssk;
Daniel Borkmann73bfd372013-12-23 14:35:55 +01001777 netlink_deliver_tap_kernel(sk, ssk, skb);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001778 nlk->netlink_rcv(skb);
Eric Dumazetbfb253c2012-04-22 21:30:29 +00001779 consume_skb(skb);
1780 } else {
1781 kfree_skb(skb);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001782 }
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001783 sock_put(sk);
1784 return ret;
1785}
1786
1787int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001788 u32 portid, int nonblock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789{
1790 struct sock *sk;
1791 int err;
1792 long timeo;
1793
1794 skb = netlink_trim(skb, gfp_any());
1795
1796 timeo = sock_sndtimeo(ssk, nonblock);
1797retry:
Eric W. Biederman15e47302012-09-07 20:12:54 +00001798 sk = netlink_getsockbyportid(ssk, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 if (IS_ERR(sk)) {
1800 kfree_skb(skb);
1801 return PTR_ERR(sk);
1802 }
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001803 if (netlink_is_kernel(sk))
Eric W. Biederman3fbc2902012-05-24 17:21:27 -06001804 return netlink_unicast_kernel(sk, skb, ssk);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001805
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001806 if (sk_filter(sk, skb)) {
Wang Chen84874602008-07-01 19:55:09 -07001807 err = skb->len;
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001808 kfree_skb(skb);
1809 sock_put(sk);
1810 return err;
1811 }
1812
Denis V. Lunev9457afe2008-06-05 11:23:39 -07001813 err = netlink_attachskb(sk, skb, &timeo, ssk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 if (err == 1)
1815 goto retry;
1816 if (err)
1817 return err;
1818
Denis V. Lunev7ee015e2007-10-10 21:14:03 -07001819 return netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001821EXPORT_SYMBOL(netlink_unicast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822
Patrick McHardyf9c22882013-04-17 06:47:04 +00001823struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1824 u32 dst_portid, gfp_t gfp_mask)
1825{
1826#ifdef CONFIG_NETLINK_MMAP
1827 struct sock *sk = NULL;
1828 struct sk_buff *skb;
1829 struct netlink_ring *ring;
1830 struct nl_mmap_hdr *hdr;
1831 unsigned int maxlen;
1832
1833 sk = netlink_getsockbyportid(ssk, dst_portid);
1834 if (IS_ERR(sk))
1835 goto out;
1836
1837 ring = &nlk_sk(sk)->rx_ring;
1838 /* fast-path without atomic ops for common case: non-mmaped receiver */
1839 if (ring->pg_vec == NULL)
1840 goto out_put;
1841
Thomas Grafaae9f0e2013-11-30 13:21:31 +01001842 if (ring->frame_size - NL_MMAP_HDRLEN < size)
1843 goto out_put;
1844
Patrick McHardyf9c22882013-04-17 06:47:04 +00001845 skb = alloc_skb_head(gfp_mask);
1846 if (skb == NULL)
1847 goto err1;
1848
1849 spin_lock_bh(&sk->sk_receive_queue.lock);
1850 /* check again under lock */
1851 if (ring->pg_vec == NULL)
1852 goto out_free;
1853
Thomas Grafaae9f0e2013-11-30 13:21:31 +01001854 /* check again under lock */
Patrick McHardyf9c22882013-04-17 06:47:04 +00001855 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1856 if (maxlen < size)
1857 goto out_free;
1858
1859 netlink_forward_ring(ring);
1860 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1861 if (hdr == NULL)
1862 goto err2;
1863 netlink_ring_setup_skb(skb, sk, ring, hdr);
1864 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1865 atomic_inc(&ring->pending);
1866 netlink_increment_head(ring);
1867
1868 spin_unlock_bh(&sk->sk_receive_queue.lock);
1869 return skb;
1870
1871err2:
1872 kfree_skb(skb);
1873 spin_unlock_bh(&sk->sk_receive_queue.lock);
Patrick McHardycd1df522013-04-17 06:47:05 +00001874 netlink_overrun(sk);
Patrick McHardyf9c22882013-04-17 06:47:04 +00001875err1:
1876 sock_put(sk);
1877 return NULL;
1878
1879out_free:
1880 kfree_skb(skb);
1881 spin_unlock_bh(&sk->sk_receive_queue.lock);
1882out_put:
1883 sock_put(sk);
1884out:
1885#endif
1886 return alloc_skb(size, gfp_mask);
1887}
1888EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1889
Patrick McHardy4277a082006-03-20 18:52:01 -08001890int netlink_has_listeners(struct sock *sk, unsigned int group)
1891{
1892 int res = 0;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00001893 struct listeners *listeners;
Patrick McHardy4277a082006-03-20 18:52:01 -08001894
Denis V. Lunevaed81562007-10-10 21:14:32 -07001895 BUG_ON(!netlink_is_kernel(sk));
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001896
1897 rcu_read_lock();
1898 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1899
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001900 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
Eric Dumazet5c398dc2010-10-24 04:27:10 +00001901 res = test_bit(group - 1, listeners->masks);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001902
1903 rcu_read_unlock();
1904
Patrick McHardy4277a082006-03-20 18:52:01 -08001905 return res;
1906}
1907EXPORT_SYMBOL_GPL(netlink_has_listeners);
1908
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00001909static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910{
1911 struct netlink_sock *nlk = nlk_sk(sk);
1912
1913 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
Patrick McHardycd967e02013-04-17 06:46:56 +00001914 !test_bit(NETLINK_CONGESTED, &nlk->state)) {
Patrick McHardycf0a0182013-04-17 06:47:00 +00001915 netlink_skb_set_owner_r(skb, sk);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001916 __netlink_sendskb(sk, skb);
stephen hemminger2c6458002011-12-22 08:52:03 +00001917 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 }
1919 return -1;
1920}
1921
1922struct netlink_broadcast_data {
1923 struct sock *exclude_sk;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001924 struct net *net;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001925 u32 portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 u32 group;
1927 int failure;
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08001928 int delivery_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 int congested;
1930 int delivered;
Al Viro7d877f32005-10-21 03:20:43 -04001931 gfp_t allocation;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 struct sk_buff *skb, *skb2;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001933 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1934 void *tx_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935};
1936
Rami Rosen46c95212014-07-01 21:17:35 +03001937static void do_one_broadcast(struct sock *sk,
1938 struct netlink_broadcast_data *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939{
1940 struct netlink_sock *nlk = nlk_sk(sk);
1941 int val;
1942
1943 if (p->exclude_sk == sk)
Rami Rosen46c95212014-07-01 21:17:35 +03001944 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945
Eric W. Biederman15e47302012-09-07 20:12:54 +00001946 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001947 !test_bit(p->group - 1, nlk->groups))
Rami Rosen46c95212014-07-01 21:17:35 +03001948 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09001950 if (!net_eq(sock_net(sk), p->net))
Rami Rosen46c95212014-07-01 21:17:35 +03001951 return;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001952
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 if (p->failure) {
1954 netlink_overrun(sk);
Rami Rosen46c95212014-07-01 21:17:35 +03001955 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 }
1957
1958 sock_hold(sk);
1959 if (p->skb2 == NULL) {
Tommy S. Christensen68acc022005-05-19 13:06:35 -07001960 if (skb_shared(p->skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 p->skb2 = skb_clone(p->skb, p->allocation);
1962 } else {
Tommy S. Christensen68acc022005-05-19 13:06:35 -07001963 p->skb2 = skb_get(p->skb);
1964 /*
1965 * skb ownership may have been set when
1966 * delivered to a previous socket.
1967 */
1968 skb_orphan(p->skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 }
1970 }
1971 if (p->skb2 == NULL) {
1972 netlink_overrun(sk);
1973 /* Clone failed. Notify ALL listeners. */
1974 p->failure = 1;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00001975 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1976 p->delivery_failure = 1;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001977 } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1978 kfree_skb(p->skb2);
1979 p->skb2 = NULL;
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001980 } else if (sk_filter(sk, p->skb2)) {
1981 kfree_skb(p->skb2);
1982 p->skb2 = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1984 netlink_overrun(sk);
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00001985 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1986 p->delivery_failure = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 } else {
1988 p->congested |= val;
1989 p->delivered = 1;
1990 p->skb2 = NULL;
1991 }
1992 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993}
1994
Eric W. Biederman15e47302012-09-07 20:12:54 +00001995int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001996 u32 group, gfp_t allocation,
1997 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1998 void *filter_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002000 struct net *net = sock_net(ssk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 struct netlink_broadcast_data info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 struct sock *sk;
2003
2004 skb = netlink_trim(skb, allocation);
2005
2006 info.exclude_sk = ssk;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002007 info.net = net;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002008 info.portid = portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009 info.group = group;
2010 info.failure = 0;
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08002011 info.delivery_failure = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 info.congested = 0;
2013 info.delivered = 0;
2014 info.allocation = allocation;
2015 info.skb = skb;
2016 info.skb2 = NULL;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002017 info.tx_filter = filter;
2018 info.tx_data = filter_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019
2020 /* While we sleep in clone, do not allow to change socket list */
2021
2022 netlink_lock_table();
2023
Sasha Levinb67bfe02013-02-27 17:06:00 -08002024 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 do_one_broadcast(sk, &info);
2026
Neil Horman70d4bf62010-07-20 06:45:56 +00002027 consume_skb(skb);
Tommy S. Christensenaa1c6a6f2005-05-19 13:07:32 -07002028
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 netlink_unlock_table();
2030
Neil Horman70d4bf62010-07-20 06:45:56 +00002031 if (info.delivery_failure) {
2032 kfree_skb(info.skb2);
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08002033 return -ENOBUFS;
Eric Dumazet658cb352012-04-22 21:30:21 +00002034 }
2035 consume_skb(info.skb2);
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08002036
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 if (info.delivered) {
2038 if (info.congested && (allocation & __GFP_WAIT))
2039 yield();
2040 return 0;
2041 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 return -ESRCH;
2043}
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002044EXPORT_SYMBOL(netlink_broadcast_filtered);
2045
Eric W. Biederman15e47302012-09-07 20:12:54 +00002046int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002047 u32 group, gfp_t allocation)
2048{
Eric W. Biederman15e47302012-09-07 20:12:54 +00002049 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002050 NULL, NULL);
2051}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002052EXPORT_SYMBOL(netlink_broadcast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053
2054struct netlink_set_err_data {
2055 struct sock *exclude_sk;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002056 u32 portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 u32 group;
2058 int code;
2059};
2060
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00002061static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062{
2063 struct netlink_sock *nlk = nlk_sk(sk);
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002064 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065
2066 if (sk == p->exclude_sk)
2067 goto out;
2068
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08002069 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002070 goto out;
2071
Eric W. Biederman15e47302012-09-07 20:12:54 +00002072 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07002073 !test_bit(p->group - 1, nlk->groups))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 goto out;
2075
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002076 if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
2077 ret = 1;
2078 goto out;
2079 }
2080
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 sk->sk_err = p->code;
2082 sk->sk_error_report(sk);
2083out:
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002084 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085}
2086
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002087/**
2088 * netlink_set_err - report error to broadcast listeners
2089 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
Eric W. Biederman15e47302012-09-07 20:12:54 +00002090 * @portid: the PORTID of a process that we want to skip (if any)
Johannes Berg840e93f22013-11-19 10:35:40 +01002091 * @group: the broadcast group that will notice the error
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002092 * @code: error code, must be negative (as usual in kernelspace)
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002093 *
2094 * This function returns the number of broadcast listeners that have set the
2095 * NETLINK_RECV_NO_ENOBUFS socket option.
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002096 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002097int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098{
2099 struct netlink_set_err_data info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 struct sock *sk;
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002101 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102
2103 info.exclude_sk = ssk;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002104 info.portid = portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105 info.group = group;
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002106 /* sk->sk_err wants a positive error value */
2107 info.code = -code;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108
2109 read_lock(&nl_table_lock);
2110
Sasha Levinb67bfe02013-02-27 17:06:00 -08002111 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002112 ret += do_one_set_err(sk, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
2114 read_unlock(&nl_table_lock);
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002115 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116}
Pablo Neira Ayusodd5b6ce2009-03-23 13:21:06 +01002117EXPORT_SYMBOL(netlink_set_err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118
Johannes Berg84659eb2007-07-18 15:47:05 -07002119/* must be called with netlink table grabbed */
2120static void netlink_update_socket_mc(struct netlink_sock *nlk,
2121 unsigned int group,
2122 int is_new)
2123{
2124 int old, new = !!is_new, subscriptions;
2125
2126 old = test_bit(group - 1, nlk->groups);
2127 subscriptions = nlk->subscriptions - old + new;
2128 if (new)
2129 __set_bit(group - 1, nlk->groups);
2130 else
2131 __clear_bit(group - 1, nlk->groups);
2132 netlink_update_subscriptions(&nlk->sk, subscriptions);
2133 netlink_update_listeners(&nlk->sk);
2134}
2135
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002136static int netlink_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002137 char __user *optval, unsigned int optlen)
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002138{
2139 struct sock *sk = sock->sk;
2140 struct netlink_sock *nlk = nlk_sk(sk);
Johannes Bergeb496532007-07-18 02:07:51 -07002141 unsigned int val = 0;
2142 int err;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002143
2144 if (level != SOL_NETLINK)
2145 return -ENOPROTOOPT;
2146
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002147 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2148 optlen >= sizeof(int) &&
Johannes Bergeb496532007-07-18 02:07:51 -07002149 get_user(val, (unsigned int __user *)optval))
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002150 return -EFAULT;
2151
2152 switch (optname) {
2153 case NETLINK_PKTINFO:
2154 if (val)
2155 nlk->flags |= NETLINK_RECV_PKTINFO;
2156 else
2157 nlk->flags &= ~NETLINK_RECV_PKTINFO;
2158 err = 0;
2159 break;
2160 case NETLINK_ADD_MEMBERSHIP:
2161 case NETLINK_DROP_MEMBERSHIP: {
Eric W. Biederman5187cd02014-04-23 14:25:48 -07002162 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002163 return -EPERM;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002164 err = netlink_realloc_groups(sk);
2165 if (err)
2166 return err;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002167 if (!val || val - 1 >= nlk->ngroups)
2168 return -EINVAL;
Richard Guy Briggs7774d5e2014-04-22 21:31:55 -04002169 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
Johannes Berg023e2cf2014-12-23 21:00:06 +01002170 err = nlk->netlink_bind(sock_net(sk), val);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04002171 if (err)
2172 return err;
2173 }
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002174 netlink_table_grab();
Johannes Berg84659eb2007-07-18 15:47:05 -07002175 netlink_update_socket_mc(nlk, val,
2176 optname == NETLINK_ADD_MEMBERSHIP);
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002177 netlink_table_ungrab();
Richard Guy Briggs7774d5e2014-04-22 21:31:55 -04002178 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
Johannes Berg023e2cf2014-12-23 21:00:06 +01002179 nlk->netlink_unbind(sock_net(sk), val);
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00002180
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002181 err = 0;
2182 break;
2183 }
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002184 case NETLINK_BROADCAST_ERROR:
2185 if (val)
2186 nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
2187 else
2188 nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
2189 err = 0;
2190 break;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002191 case NETLINK_NO_ENOBUFS:
2192 if (val) {
2193 nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
Patrick McHardycd967e02013-04-17 06:46:56 +00002194 clear_bit(NETLINK_CONGESTED, &nlk->state);
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002195 wake_up_interruptible(&nlk->wait);
Eric Dumazet658cb352012-04-22 21:30:21 +00002196 } else {
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002197 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
Eric Dumazet658cb352012-04-22 21:30:21 +00002198 }
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002199 err = 0;
2200 break;
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002201#ifdef CONFIG_NETLINK_MMAP
2202 case NETLINK_RX_RING:
2203 case NETLINK_TX_RING: {
2204 struct nl_mmap_req req;
2205
2206 /* Rings might consume more memory than queue limits, require
2207 * CAP_NET_ADMIN.
2208 */
2209 if (!capable(CAP_NET_ADMIN))
2210 return -EPERM;
2211 if (optlen < sizeof(req))
2212 return -EINVAL;
2213 if (copy_from_user(&req, optval, sizeof(req)))
2214 return -EFAULT;
Florian Westphalb265c302015-07-21 16:33:50 +02002215 err = netlink_set_ring(sk, &req,
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002216 optname == NETLINK_TX_RING);
2217 break;
2218 }
2219#endif /* CONFIG_NETLINK_MMAP */
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002220 default:
2221 err = -ENOPROTOOPT;
2222 }
2223 return err;
2224}
2225
2226static int netlink_getsockopt(struct socket *sock, int level, int optname,
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002227 char __user *optval, int __user *optlen)
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002228{
2229 struct sock *sk = sock->sk;
2230 struct netlink_sock *nlk = nlk_sk(sk);
2231 int len, val, err;
2232
2233 if (level != SOL_NETLINK)
2234 return -ENOPROTOOPT;
2235
2236 if (get_user(len, optlen))
2237 return -EFAULT;
2238 if (len < 0)
2239 return -EINVAL;
2240
2241 switch (optname) {
2242 case NETLINK_PKTINFO:
2243 if (len < sizeof(int))
2244 return -EINVAL;
2245 len = sizeof(int);
2246 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
Heiko Carstensa27b58f2006-10-30 15:06:12 -08002247 if (put_user(len, optlen) ||
2248 put_user(val, optval))
2249 return -EFAULT;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002250 err = 0;
2251 break;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002252 case NETLINK_BROADCAST_ERROR:
2253 if (len < sizeof(int))
2254 return -EINVAL;
2255 len = sizeof(int);
2256 val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
2257 if (put_user(len, optlen) ||
2258 put_user(val, optval))
2259 return -EFAULT;
2260 err = 0;
2261 break;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002262 case NETLINK_NO_ENOBUFS:
2263 if (len < sizeof(int))
2264 return -EINVAL;
2265 len = sizeof(int);
2266 val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
2267 if (put_user(len, optlen) ||
2268 put_user(val, optval))
2269 return -EFAULT;
2270 err = 0;
2271 break;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002272 default:
2273 err = -ENOPROTOOPT;
2274 }
2275 return err;
2276}
2277
2278static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2279{
2280 struct nl_pktinfo info;
2281
2282 info.group = NETLINK_CB(skb).dst_group;
2283 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2284}
2285
Ying Xue1b784142015-03-02 15:37:48 +08002286static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 struct sock *sk = sock->sk;
2289 struct netlink_sock *nlk = nlk_sk(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +01002290 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002291 u32 dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002292 u32 dst_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 struct sk_buff *skb;
2294 int err;
2295 struct scm_cookie scm;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002296 u32 netlink_skb_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297
2298 if (msg->msg_flags&MSG_OOB)
2299 return -EOPNOTSUPP;
2300
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002301 err = scm_send(sock, msg, &scm, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 if (err < 0)
2303 return err;
2304
2305 if (msg->msg_namelen) {
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002306 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307 if (addr->nl_family != AF_NETLINK)
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002308 goto out;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002309 dst_portid = addr->nl_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002310 dst_group = ffs(addr->nl_groups);
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002311 err = -EPERM;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002312 if ((dst_group || dst_portid) &&
Eric W. Biederman5187cd02014-04-23 14:25:48 -07002313 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002314 goto out;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002315 netlink_skb_flags |= NETLINK_SKB_DST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 } else {
Eric W. Biederman15e47302012-09-07 20:12:54 +00002317 dst_portid = nlk->dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002318 dst_group = nlk->dst_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 }
2320
Eric W. Biederman15e47302012-09-07 20:12:54 +00002321 if (!nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 err = netlink_autobind(sock);
2323 if (err)
2324 goto out;
2325 }
2326
Al Viroa8866ff2014-12-12 23:02:36 -05002327 /* It's a really convoluted way for userland to ask for mmaped
2328 * sendmsg(), but that's what we've got...
2329 */
Patrick McHardy5fd96122013-04-17 06:47:03 +00002330 if (netlink_tx_is_mmaped(sk) &&
Al Viroa8866ff2014-12-12 23:02:36 -05002331 msg->msg_iter.type == ITER_IOVEC &&
2332 msg->msg_iter.nr_segs == 1 &&
Al Viroc0371da2014-11-24 10:42:55 -05002333 msg->msg_iter.iov->iov_base == NULL) {
Patrick McHardy5fd96122013-04-17 06:47:03 +00002334 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002335 &scm);
Patrick McHardy5fd96122013-04-17 06:47:03 +00002336 goto out;
2337 }
2338
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 err = -EMSGSIZE;
2340 if (len > sk->sk_sndbuf - 32)
2341 goto out;
2342 err = -ENOBUFS;
Pablo Neira3a365152013-06-28 03:04:23 +02002343 skb = netlink_alloc_large_skb(len, dst_group);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002344 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 goto out;
2346
Eric W. Biederman15e47302012-09-07 20:12:54 +00002347 NETLINK_CB(skb).portid = nlk->portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002348 NETLINK_CB(skb).dst_group = dst_group;
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002349 NETLINK_CB(skb).creds = scm.creds;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002350 NETLINK_CB(skb).flags = netlink_skb_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 err = -EFAULT;
Al Viro6ce8e9c2014-04-06 21:25:44 -04002353 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 kfree_skb(skb);
2355 goto out;
2356 }
2357
2358 err = security_netlink_send(sk, skb);
2359 if (err) {
2360 kfree_skb(skb);
2361 goto out;
2362 }
2363
Patrick McHardyd629b832005-08-14 19:27:50 -07002364 if (dst_group) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 atomic_inc(&skb->users);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002366 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 }
Eric W. Biederman15e47302012-09-07 20:12:54 +00002368 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369
2370out:
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002371 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 return err;
2373}
2374
Ying Xue1b784142015-03-02 15:37:48 +08002375static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376 int flags)
2377{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 struct scm_cookie scm;
2379 struct sock *sk = sock->sk;
2380 struct netlink_sock *nlk = nlk_sk(sk);
2381 int noblock = flags&MSG_DONTWAIT;
2382 size_t copied;
Johannes Berg68d6ac62010-08-15 21:20:44 +00002383 struct sk_buff *skb, *data_skb;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002384 int err, ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385
2386 if (flags&MSG_OOB)
2387 return -EOPNOTSUPP;
2388
2389 copied = 0;
2390
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002391 skb = skb_recv_datagram(sk, flags, noblock, &err);
2392 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 goto out;
2394
Johannes Berg68d6ac62010-08-15 21:20:44 +00002395 data_skb = skb;
2396
Johannes Berg1dacc762009-07-01 11:26:02 +00002397#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2398 if (unlikely(skb_shinfo(skb)->frag_list)) {
Johannes Berg1dacc762009-07-01 11:26:02 +00002399 /*
Johannes Berg68d6ac62010-08-15 21:20:44 +00002400 * If this skb has a frag_list, then here that means that we
2401 * will have to use the frag_list skb's data for compat tasks
2402 * and the regular skb's data for normal (non-compat) tasks.
Johannes Berg1dacc762009-07-01 11:26:02 +00002403 *
Johannes Berg68d6ac62010-08-15 21:20:44 +00002404 * If we need to send the compat skb, assign it to the
2405 * 'data_skb' variable so that it will be used below for data
2406 * copying. We keep 'skb' for everything else, including
2407 * freeing both later.
Johannes Berg1dacc762009-07-01 11:26:02 +00002408 */
Johannes Berg68d6ac62010-08-15 21:20:44 +00002409 if (flags & MSG_CMSG_COMPAT)
2410 data_skb = skb_shinfo(skb)->frag_list;
Johannes Berg1dacc762009-07-01 11:26:02 +00002411 }
2412#endif
2413
Eric Dumazet9063e212014-03-07 12:02:33 -08002414 /* Record the max length of recvmsg() calls for future allocations */
2415 nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
2416 nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
2417 16384);
2418
Johannes Berg68d6ac62010-08-15 21:20:44 +00002419 copied = data_skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 if (len < copied) {
2421 msg->msg_flags |= MSG_TRUNC;
2422 copied = len;
2423 }
2424
Johannes Berg68d6ac62010-08-15 21:20:44 +00002425 skb_reset_transport_header(data_skb);
David S. Miller51f3d022014-11-05 16:46:40 -05002426 err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427
2428 if (msg->msg_name) {
Steffen Hurrle342dfc32014-01-17 22:53:15 +01002429 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430 addr->nl_family = AF_NETLINK;
2431 addr->nl_pad = 0;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002432 addr->nl_pid = NETLINK_CB(skb).portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002433 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 msg->msg_namelen = sizeof(*addr);
2435 }
2436
Patrick McHardycc9a06c2006-03-12 20:34:27 -08002437 if (nlk->flags & NETLINK_RECV_PKTINFO)
2438 netlink_cmsg_recv_pktinfo(msg, skb);
2439
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002440 memset(&scm, 0, sizeof(scm));
2441 scm.creds = *NETLINK_CREDS(skb);
Patrick McHardy188ccb52007-05-03 03:27:01 -07002442 if (flags & MSG_TRUNC)
Johannes Berg68d6ac62010-08-15 21:20:44 +00002443 copied = data_skb->len;
David S. Millerdaa37662010-08-15 23:21:50 -07002444
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 skb_free_datagram(sk, skb);
2446
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002447 if (nlk->cb_running &&
2448 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
Andrey Vaginb44d2112011-02-21 02:40:47 +00002449 ret = netlink_dump(sk);
2450 if (ret) {
Ben Pfaffac30ef82014-07-09 10:31:22 -07002451 sk->sk_err = -ret;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002452 sk->sk_error_report(sk);
2453 }
2454 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002456 scm_recv(sock, msg, &scm, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457out:
2458 netlink_rcv_wake(sk);
2459 return err ? : copied;
2460}
2461
David S. Miller676d2362014-04-11 16:15:36 -04002462static void netlink_data_ready(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463{
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002464 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465}
2466
2467/*
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002468 * We export these functions to other modules. They provide a
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 * complete set of kernel non-blocking support for message
2470 * queueing.
2471 */
2472
2473struct sock *
Pablo Neira Ayuso9f00d972012-09-08 02:53:54 +00002474__netlink_kernel_create(struct net *net, int unit, struct module *module,
2475 struct netlink_kernel_cfg *cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476{
2477 struct socket *sock;
2478 struct sock *sk;
Patrick McHardy77247bb2005-08-14 19:27:13 -07002479 struct netlink_sock *nlk;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002480 struct listeners *listeners = NULL;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002481 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2482 unsigned int groups;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483
Akinobu Mitafab2caf2006-08-29 02:15:24 -07002484 BUG_ON(!nl_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002486 if (unit < 0 || unit >= MAX_LINKS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 return NULL;
2488
2489 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2490 return NULL;
2491
Pavel Emelyanov23fe1862008-01-30 19:31:06 -08002492 /*
2493 * We have to just have a reference on the net from sk, but don't
2494 * get_net it. Besides, we cannot get and then put the net here.
2495 * So we create one inside init_net and the move it to net.
2496 */
2497
2498 if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
2499 goto out_sock_release_nosk;
2500
2501 sk = sock->sk;
Denis V. Lunevedf02082008-02-29 11:18:32 -08002502 sk_change_net(sk, net);
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002503
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002504 if (!cfg || cfg->groups < 32)
Patrick McHardy4277a082006-03-20 18:52:01 -08002505 groups = 32;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002506 else
2507 groups = cfg->groups;
Patrick McHardy4277a082006-03-20 18:52:01 -08002508
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002509 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
Patrick McHardy4277a082006-03-20 18:52:01 -08002510 if (!listeners)
2511 goto out_sock_release;
2512
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513 sk->sk_data_ready = netlink_data_ready;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002514 if (cfg && cfg->input)
2515 nlk_sk(sk)->netlink_rcv = cfg->input;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516
Herbert Xu8ea65f42015-01-26 14:02:56 +11002517 if (netlink_insert(sk, 0))
Patrick McHardy77247bb2005-08-14 19:27:13 -07002518 goto out_sock_release;
2519
2520 nlk = nlk_sk(sk);
2521 nlk->flags |= NETLINK_KERNEL_SOCKET;
2522
2523 netlink_table_grab();
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002524 if (!nl_table[unit].registered) {
2525 nl_table[unit].groups = groups;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002526 rcu_assign_pointer(nl_table[unit].listeners, listeners);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002527 nl_table[unit].cb_mutex = cb_mutex;
2528 nl_table[unit].module = module;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002529 if (cfg) {
2530 nl_table[unit].bind = cfg->bind;
Hiroaki SHIMODA6251edd2014-11-13 04:24:10 +09002531 nl_table[unit].unbind = cfg->unbind;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002532 nl_table[unit].flags = cfg->flags;
Gao fengda12c902013-06-06 14:49:11 +08002533 if (cfg->compare)
2534 nl_table[unit].compare = cfg->compare;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002535 }
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002536 nl_table[unit].registered = 1;
Jesper Juhlf937f1f462007-10-15 01:39:12 -07002537 } else {
2538 kfree(listeners);
Denis V. Lunev869e58f2008-01-18 23:53:31 -08002539 nl_table[unit].registered++;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002540 }
Patrick McHardy77247bb2005-08-14 19:27:13 -07002541 netlink_table_ungrab();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002542 return sk;
2543
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002544out_sock_release:
Patrick McHardy4277a082006-03-20 18:52:01 -08002545 kfree(listeners);
Denis V. Lunev9dfbec12008-02-29 11:17:56 -08002546 netlink_kernel_release(sk);
Pavel Emelyanov23fe1862008-01-30 19:31:06 -08002547 return NULL;
2548
2549out_sock_release_nosk:
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002550 sock_release(sock);
Patrick McHardy77247bb2005-08-14 19:27:13 -07002551 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552}
Pablo Neira Ayuso9f00d972012-09-08 02:53:54 +00002553EXPORT_SYMBOL(__netlink_kernel_create);
Denis V. Lunevb7c6ba62008-01-28 14:41:19 -08002554
2555void
2556netlink_kernel_release(struct sock *sk)
2557{
Denis V. Lunevedf02082008-02-29 11:18:32 -08002558 sk_release_kernel(sk);
Denis V. Lunevb7c6ba62008-01-28 14:41:19 -08002559}
2560EXPORT_SYMBOL(netlink_kernel_release);
2561
Johannes Bergd136f1b2009-09-12 03:03:15 +00002562int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002563{
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002564 struct listeners *new, *old;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002565 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002566
2567 if (groups < 32)
2568 groups = 32;
2569
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002570 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002571 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2572 if (!new)
Johannes Bergd136f1b2009-09-12 03:03:15 +00002573 return -ENOMEM;
Eric Dumazet6d772ac2012-10-18 03:21:55 +00002574 old = nl_deref_protected(tbl->listeners);
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002575 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2576 rcu_assign_pointer(tbl->listeners, new);
2577
Lai Jiangshan37b6b932011-03-15 18:01:42 +08002578 kfree_rcu(old, rcu);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002579 }
2580 tbl->groups = groups;
2581
Johannes Bergd136f1b2009-09-12 03:03:15 +00002582 return 0;
2583}
2584
2585/**
2586 * netlink_change_ngroups - change number of multicast groups
2587 *
2588 * This changes the number of multicast groups that are available
2589 * on a certain netlink family. Note that it is not possible to
2590 * change the number of groups to below 32. Also note that it does
2591 * not implicitly call netlink_clear_multicast_users() when the
2592 * number of groups is reduced.
2593 *
2594 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2595 * @groups: The new number of groups.
2596 */
2597int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2598{
2599 int err;
2600
2601 netlink_table_grab();
2602 err = __netlink_change_ngroups(sk, groups);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002603 netlink_table_ungrab();
Johannes Bergd136f1b2009-09-12 03:03:15 +00002604
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002605 return err;
2606}
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002607
Johannes Bergb8273572009-09-24 15:44:05 -07002608void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2609{
2610 struct sock *sk;
Johannes Bergb8273572009-09-24 15:44:05 -07002611 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2612
Sasha Levinb67bfe02013-02-27 17:06:00 -08002613 sk_for_each_bound(sk, &tbl->mc_list)
Johannes Bergb8273572009-09-24 15:44:05 -07002614 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2615}
2616
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002617struct nlmsghdr *
Eric W. Biederman15e47302012-09-07 20:12:54 +00002618__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002619{
2620 struct nlmsghdr *nlh;
Hong zhi guo573ce262013-03-27 06:47:04 +00002621 int size = nlmsg_msg_size(len);
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002622
Wang Yufen23b45672014-02-17 16:53:32 +08002623 nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002624 nlh->nlmsg_type = type;
2625 nlh->nlmsg_len = size;
2626 nlh->nlmsg_flags = flags;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002627 nlh->nlmsg_pid = portid;
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002628 nlh->nlmsg_seq = seq;
2629 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
Hong zhi guo573ce262013-03-27 06:47:04 +00002630 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002631 return nlh;
2632}
2633EXPORT_SYMBOL(__nlmsg_put);
2634
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635/*
2636 * It looks a bit ugly.
2637 * It would be better to create kernel thread.
2638 */
2639
2640static int netlink_dump(struct sock *sk)
2641{
2642 struct netlink_sock *nlk = nlk_sk(sk);
2643 struct netlink_callback *cb;
Greg Rosec7ac8672011-06-10 01:27:09 +00002644 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645 struct nlmsghdr *nlh;
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002646 int len, err = -ENOBUFS;
Greg Rosec7ac8672011-06-10 01:27:09 +00002647 int alloc_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002649 mutex_lock(nlk->cb_mutex);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002650 if (!nlk->cb_running) {
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002651 err = -EINVAL;
2652 goto errout_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 }
2654
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002655 cb = &nlk->cb;
Greg Rosec7ac8672011-06-10 01:27:09 +00002656 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2657
Patrick McHardyf9c22882013-04-17 06:47:04 +00002658 if (!netlink_rx_is_mmaped(sk) &&
2659 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2660 goto errout_skb;
Eric Dumazet9063e212014-03-07 12:02:33 -08002661
2662 /* NLMSG_GOODSIZE is small to avoid high order allocations being
2663 * required, but it makes sense to _attempt_ a 16K bytes allocation
2664 * to reduce number of system calls on dump operations, if user
2665 * ever provided a big enough buffer.
2666 */
2667 if (alloc_size < nlk->max_recvmsg_len) {
2668 skb = netlink_alloc_skb(sk,
2669 nlk->max_recvmsg_len,
2670 nlk->portid,
2671 GFP_KERNEL |
2672 __GFP_NOWARN |
2673 __GFP_NORETRY);
2674 /* available room should be exact amount to avoid MSG_TRUNC */
2675 if (skb)
2676 skb_reserve(skb, skb_tailroom(skb) -
2677 nlk->max_recvmsg_len);
2678 }
2679 if (!skb)
2680 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2681 GFP_KERNEL);
Greg Rosec7ac8672011-06-10 01:27:09 +00002682 if (!skb)
Dan Carpenterc63d6ea2011-06-15 03:11:42 +00002683 goto errout_skb;
Patrick McHardyf9c22882013-04-17 06:47:04 +00002684 netlink_skb_set_owner_r(skb, sk);
Greg Rosec7ac8672011-06-10 01:27:09 +00002685
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686 len = cb->dump(skb, cb);
2687
2688 if (len > 0) {
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002689 mutex_unlock(nlk->cb_mutex);
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07002690
2691 if (sk_filter(sk, skb))
2692 kfree_skb(skb);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00002693 else
2694 __netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695 return 0;
2696 }
2697
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002698 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2699 if (!nlh)
2700 goto errout_skb;
2701
Johannes Berg670dc282011-06-20 13:40:46 +02002702 nl_dump_check_consistent(cb, nlh);
2703
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002704 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2705
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07002706 if (sk_filter(sk, skb))
2707 kfree_skb(skb);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00002708 else
2709 __netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710
Thomas Grafa8f74b22005-11-10 02:25:52 +01002711 if (cb->done)
2712 cb->done(cb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002714 nlk->cb_running = false;
2715 mutex_unlock(nlk->cb_mutex);
Gao feng6dc878a2012-10-04 20:15:48 +00002716 module_put(cb->module);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002717 consume_skb(cb->skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718 return 0;
Thomas Graf17977542005-06-18 22:53:48 -07002719
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002720errout_skb:
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002721 mutex_unlock(nlk->cb_mutex);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002722 kfree_skb(skb);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002723 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724}
2725
Gao feng6dc878a2012-10-04 20:15:48 +00002726int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2727 const struct nlmsghdr *nlh,
2728 struct netlink_dump_control *control)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729{
2730 struct netlink_callback *cb;
2731 struct sock *sk;
2732 struct netlink_sock *nlk;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002733 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734
Patrick McHardyf9c22882013-04-17 06:47:04 +00002735 /* Memory mapped dump requests need to be copied to avoid looping
2736 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2737 * a reference to the skb.
2738 */
2739 if (netlink_skb_is_mmaped(skb)) {
2740 skb = skb_copy(skb, GFP_KERNEL);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002741 if (skb == NULL)
Patrick McHardyf9c22882013-04-17 06:47:04 +00002742 return -ENOBUFS;
Patrick McHardyf9c22882013-04-17 06:47:04 +00002743 } else
2744 atomic_inc(&skb->users);
2745
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002746 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2747 if (sk == NULL) {
2748 ret = -ECONNREFUSED;
2749 goto error_free;
2750 }
2751
2752 nlk = nlk_sk(sk);
2753 mutex_lock(nlk->cb_mutex);
2754 /* A dump is in progress... */
2755 if (nlk->cb_running) {
2756 ret = -EBUSY;
2757 goto error_unlock;
2758 }
2759 /* add reference of module which cb->dump belongs to */
2760 if (!try_module_get(control->module)) {
2761 ret = -EPROTONOSUPPORT;
2762 goto error_unlock;
2763 }
2764
2765 cb = &nlk->cb;
2766 memset(cb, 0, sizeof(*cb));
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +00002767 cb->dump = control->dump;
2768 cb->done = control->done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 cb->nlh = nlh;
Pablo Neira Ayuso7175c882012-02-24 14:30:16 +00002770 cb->data = control->data;
Gao feng6dc878a2012-10-04 20:15:48 +00002771 cb->module = control->module;
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +00002772 cb->min_dump_alloc = control->min_dump_alloc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773 cb->skb = skb;
2774
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002775 nlk->cb_running = true;
Gao feng6dc878a2012-10-04 20:15:48 +00002776
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002777 mutex_unlock(nlk->cb_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778
Andrey Vaginb44d2112011-02-21 02:40:47 +00002779 ret = netlink_dump(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 sock_put(sk);
Denis V. Lunev5c582982007-10-23 20:29:25 -07002781
Andrey Vaginb44d2112011-02-21 02:40:47 +00002782 if (ret)
2783 return ret;
2784
Denis V. Lunev5c582982007-10-23 20:29:25 -07002785 /* We successfully started a dump, by returning -EINTR we
2786 * signal not to send ACK even if it was requested.
2787 */
2788 return -EINTR;
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002789
2790error_unlock:
2791 sock_put(sk);
2792 mutex_unlock(nlk->cb_mutex);
2793error_free:
2794 kfree_skb(skb);
2795 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796}
Gao feng6dc878a2012-10-04 20:15:48 +00002797EXPORT_SYMBOL(__netlink_dump_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798
2799void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2800{
2801 struct sk_buff *skb;
2802 struct nlmsghdr *rep;
2803 struct nlmsgerr *errmsg;
Thomas Graf339bf982006-11-10 14:10:15 -08002804 size_t payload = sizeof(*errmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805
Thomas Graf339bf982006-11-10 14:10:15 -08002806 /* error messages get the original request appened */
2807 if (err)
2808 payload += nlmsg_len(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809
Patrick McHardyf9c22882013-04-17 06:47:04 +00002810 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2811 NETLINK_CB(in_skb).portid, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 if (!skb) {
2813 struct sock *sk;
2814
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002815 sk = netlink_lookup(sock_net(in_skb->sk),
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002816 in_skb->sk->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00002817 NETLINK_CB(in_skb).portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 if (sk) {
2819 sk->sk_err = ENOBUFS;
2820 sk->sk_error_report(sk);
2821 sock_put(sk);
2822 }
2823 return;
2824 }
2825
Eric W. Biederman15e47302012-09-07 20:12:54 +00002826 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
John Fastabend5dba93a2009-09-25 13:11:44 +00002827 NLMSG_ERROR, payload, 0);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002828 errmsg = nlmsg_data(rep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 errmsg->error = err;
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002830 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
Eric W. Biederman15e47302012-09-07 20:12:54 +00002831 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002833EXPORT_SYMBOL(netlink_ack);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002835int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
Thomas Graf1d00a4e2007-03-22 23:30:12 -07002836 struct nlmsghdr *))
Thomas Graf82ace472005-11-10 02:25:53 +01002837{
Thomas Graf82ace472005-11-10 02:25:53 +01002838 struct nlmsghdr *nlh;
2839 int err;
2840
2841 while (skb->len >= nlmsg_total_size(0)) {
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002842 int msglen;
2843
Arnaldo Carvalho de Melob529ccf2007-04-25 19:08:35 -07002844 nlh = nlmsg_hdr(skb);
Thomas Grafd35b6852007-03-22 23:28:46 -07002845 err = 0;
Thomas Graf82ace472005-11-10 02:25:53 +01002846
Martin Murrayad8e4b72006-01-10 13:02:29 -08002847 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
Thomas Graf82ace472005-11-10 02:25:53 +01002848 return 0;
2849
Thomas Grafd35b6852007-03-22 23:28:46 -07002850 /* Only requests are handled by the kernel */
2851 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
Denis V. Lunev5c582982007-10-23 20:29:25 -07002852 goto ack;
Thomas Grafd35b6852007-03-22 23:28:46 -07002853
Thomas Graf45e7ae72007-03-22 23:29:10 -07002854 /* Skip control messages */
2855 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
Denis V. Lunev5c582982007-10-23 20:29:25 -07002856 goto ack;
Thomas Graf45e7ae72007-03-22 23:29:10 -07002857
Thomas Graf1d00a4e2007-03-22 23:30:12 -07002858 err = cb(skb, nlh);
Denis V. Lunev5c582982007-10-23 20:29:25 -07002859 if (err == -EINTR)
2860 goto skip;
2861
2862ack:
Thomas Grafd35b6852007-03-22 23:28:46 -07002863 if (nlh->nlmsg_flags & NLM_F_ACK || err)
Thomas Graf82ace472005-11-10 02:25:53 +01002864 netlink_ack(skb, nlh, err);
Thomas Graf82ace472005-11-10 02:25:53 +01002865
Denis V. Lunev5c582982007-10-23 20:29:25 -07002866skip:
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002867 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002868 if (msglen > skb->len)
2869 msglen = skb->len;
2870 skb_pull(skb, msglen);
Thomas Graf82ace472005-11-10 02:25:53 +01002871 }
2872
2873 return 0;
2874}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002875EXPORT_SYMBOL(netlink_rcv_skb);
Thomas Graf82ace472005-11-10 02:25:53 +01002876
2877/**
Thomas Grafd387f6a2006-08-15 00:31:06 -07002878 * nlmsg_notify - send a notification netlink message
2879 * @sk: netlink socket to use
2880 * @skb: notification message
Eric W. Biederman15e47302012-09-07 20:12:54 +00002881 * @portid: destination netlink portid for reports or 0
Thomas Grafd387f6a2006-08-15 00:31:06 -07002882 * @group: destination multicast group or 0
2883 * @report: 1 to report back, 0 to disable
2884 * @flags: allocation flags
2885 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002886int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
Thomas Grafd387f6a2006-08-15 00:31:06 -07002887 unsigned int group, int report, gfp_t flags)
2888{
2889 int err = 0;
2890
2891 if (group) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00002892 int exclude_portid = 0;
Thomas Grafd387f6a2006-08-15 00:31:06 -07002893
2894 if (report) {
2895 atomic_inc(&skb->users);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002896 exclude_portid = portid;
Thomas Grafd387f6a2006-08-15 00:31:06 -07002897 }
2898
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002899 /* errors reported via destination sk->sk_err, but propagate
2900 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002901 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
Thomas Grafd387f6a2006-08-15 00:31:06 -07002902 }
2903
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002904 if (report) {
2905 int err2;
2906
Eric W. Biederman15e47302012-09-07 20:12:54 +00002907 err2 = nlmsg_unicast(sk, skb, portid);
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002908 if (!err || err == -ESRCH)
2909 err = err2;
2910 }
Thomas Grafd387f6a2006-08-15 00:31:06 -07002911
2912 return err;
2913}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002914EXPORT_SYMBOL(nlmsg_notify);
Thomas Grafd387f6a2006-08-15 00:31:06 -07002915
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916#ifdef CONFIG_PROC_FS
2917struct nl_seq_iter {
Denis V. Luneve372c412007-11-19 22:31:54 -08002918 struct seq_net_private p;
Herbert Xu56d28b12015-02-04 07:33:24 +11002919 struct rhashtable_iter hti;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920 int link;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921};
2922
Herbert Xu56d28b12015-02-04 07:33:24 +11002923static int netlink_walk_start(struct nl_seq_iter *iter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924{
Herbert Xu56d28b12015-02-04 07:33:24 +11002925 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926
Herbert Xu56d28b12015-02-04 07:33:24 +11002927 err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti);
2928 if (err) {
2929 iter->link = MAX_LINKS;
2930 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931 }
Herbert Xu56d28b12015-02-04 07:33:24 +11002932
2933 err = rhashtable_walk_start(&iter->hti);
2934 return err == -EAGAIN ? 0 : err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935}
2936
Herbert Xu56d28b12015-02-04 07:33:24 +11002937static void netlink_walk_stop(struct nl_seq_iter *iter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938{
Herbert Xu56d28b12015-02-04 07:33:24 +11002939 rhashtable_walk_stop(&iter->hti);
2940 rhashtable_walk_exit(&iter->hti);
2941}
2942
2943static void *__netlink_seq_next(struct seq_file *seq)
2944{
2945 struct nl_seq_iter *iter = seq->private;
2946 struct netlink_sock *nlk;
2947
2948 do {
2949 for (;;) {
2950 int err;
2951
2952 nlk = rhashtable_walk_next(&iter->hti);
2953
2954 if (IS_ERR(nlk)) {
2955 if (PTR_ERR(nlk) == -EAGAIN)
2956 continue;
2957
2958 return nlk;
2959 }
2960
2961 if (nlk)
2962 break;
2963
2964 netlink_walk_stop(iter);
2965 if (++iter->link >= MAX_LINKS)
2966 return NULL;
2967
2968 err = netlink_walk_start(iter);
2969 if (err)
2970 return ERR_PTR(err);
2971 }
2972 } while (sock_net(&nlk->sk) != seq_file_net(seq));
2973
2974 return nlk;
2975}
2976
2977static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
2978{
2979 struct nl_seq_iter *iter = seq->private;
2980 void *obj = SEQ_START_TOKEN;
2981 loff_t pos;
2982 int err;
2983
2984 iter->link = 0;
2985
2986 err = netlink_walk_start(iter);
2987 if (err)
2988 return ERR_PTR(err);
2989
2990 for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
2991 obj = __netlink_seq_next(seq);
2992
2993 return obj;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994}
2995
2996static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2997{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998 ++*pos;
Herbert Xu56d28b12015-02-04 07:33:24 +11002999 return __netlink_seq_next(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000}
3001
3002static void netlink_seq_stop(struct seq_file *seq, void *v)
3003{
Herbert Xu56d28b12015-02-04 07:33:24 +11003004 struct nl_seq_iter *iter = seq->private;
3005
3006 if (iter->link >= MAX_LINKS)
3007 return;
3008
3009 netlink_walk_stop(iter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010}
3011
3012
3013static int netlink_seq_show(struct seq_file *seq, void *v)
3014{
Eric Dumazet658cb352012-04-22 21:30:21 +00003015 if (v == SEQ_START_TOKEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016 seq_puts(seq,
3017 "sk Eth Pid Groups "
Masatake YAMATOcf0aa4e2010-02-27 19:45:37 +00003018 "Rmem Wmem Dump Locks Drops Inode\n");
Eric Dumazet658cb352012-04-22 21:30:21 +00003019 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020 struct sock *s = v;
3021 struct netlink_sock *nlk = nlk_sk(s);
3022
Pravin B Shelar16b304f2013-08-15 15:31:06 -07003023 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024 s,
3025 s->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00003026 nlk->portid,
Patrick McHardy513c2502005-09-06 15:43:59 -07003027 nlk->groups ? (u32)nlk->groups[0] : 0,
Eric Dumazet31e6d362009-06-17 19:05:41 -07003028 sk_rmem_alloc_get(s),
3029 sk_wmem_alloc_get(s),
Pravin B Shelar16b304f2013-08-15 15:31:06 -07003030 nlk->cb_running,
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07003031 atomic_read(&s->sk_refcnt),
Masatake YAMATOcf0aa4e2010-02-27 19:45:37 +00003032 atomic_read(&s->sk_drops),
3033 sock_i_ino(s)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034 );
3035
3036 }
3037 return 0;
3038}
3039
Philippe De Muyter56b3d972007-07-10 23:07:31 -07003040static const struct seq_operations netlink_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041 .start = netlink_seq_start,
3042 .next = netlink_seq_next,
3043 .stop = netlink_seq_stop,
3044 .show = netlink_seq_show,
3045};
3046
3047
3048static int netlink_seq_open(struct inode *inode, struct file *file)
3049{
Denis V. Luneve372c412007-11-19 22:31:54 -08003050 return seq_open_net(inode, file, &netlink_seq_ops,
3051 sizeof(struct nl_seq_iter));
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003052}
3053
Arjan van de Venda7071d2007-02-12 00:55:36 -08003054static const struct file_operations netlink_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055 .owner = THIS_MODULE,
3056 .open = netlink_seq_open,
3057 .read = seq_read,
3058 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003059 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003060};
3061
3062#endif
3063
3064int netlink_register_notifier(struct notifier_block *nb)
3065{
Alan Sterne041c682006-03-27 01:16:30 -08003066 return atomic_notifier_chain_register(&netlink_chain, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003067}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08003068EXPORT_SYMBOL(netlink_register_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003069
3070int netlink_unregister_notifier(struct notifier_block *nb)
3071{
Alan Sterne041c682006-03-27 01:16:30 -08003072 return atomic_notifier_chain_unregister(&netlink_chain, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08003074EXPORT_SYMBOL(netlink_unregister_notifier);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09003075
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003076static const struct proto_ops netlink_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003077 .family = PF_NETLINK,
3078 .owner = THIS_MODULE,
3079 .release = netlink_release,
3080 .bind = netlink_bind,
3081 .connect = netlink_connect,
3082 .socketpair = sock_no_socketpair,
3083 .accept = sock_no_accept,
3084 .getname = netlink_getname,
Patrick McHardy9652e932013-04-17 06:47:02 +00003085 .poll = netlink_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086 .ioctl = sock_no_ioctl,
3087 .listen = sock_no_listen,
3088 .shutdown = sock_no_shutdown,
Patrick McHardy9a4595b2005-08-15 12:32:15 -07003089 .setsockopt = netlink_setsockopt,
3090 .getsockopt = netlink_getsockopt,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003091 .sendmsg = netlink_sendmsg,
3092 .recvmsg = netlink_recvmsg,
Patrick McHardyccdfcc32013-04-17 06:47:01 +00003093 .mmap = netlink_mmap,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094 .sendpage = sock_no_sendpage,
3095};
3096
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00003097static const struct net_proto_family netlink_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003098 .family = PF_NETLINK,
3099 .create = netlink_create,
3100 .owner = THIS_MODULE, /* for consistency 8) */
3101};
3102
Pavel Emelyanov46650792007-10-08 20:38:39 -07003103static int __net_init netlink_net_init(struct net *net)
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003104{
3105#ifdef CONFIG_PROC_FS
Gao fengd4beaa62013-02-18 01:34:54 +00003106 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003107 return -ENOMEM;
3108#endif
3109 return 0;
3110}
3111
Pavel Emelyanov46650792007-10-08 20:38:39 -07003112static void __net_exit netlink_net_exit(struct net *net)
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003113{
3114#ifdef CONFIG_PROC_FS
Gao fengece31ff2013-02-18 01:34:56 +00003115 remove_proc_entry("netlink", net->proc_net);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003116#endif
3117}
3118
David S. Millerb963ea82010-08-30 19:08:01 -07003119static void __init netlink_add_usersock_entry(void)
3120{
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003121 struct listeners *listeners;
David S. Millerb963ea82010-08-30 19:08:01 -07003122 int groups = 32;
3123
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003124 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
David S. Millerb963ea82010-08-30 19:08:01 -07003125 if (!listeners)
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003126 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
David S. Millerb963ea82010-08-30 19:08:01 -07003127
3128 netlink_table_grab();
3129
3130 nl_table[NETLINK_USERSOCK].groups = groups;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003131 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
David S. Millerb963ea82010-08-30 19:08:01 -07003132 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3133 nl_table[NETLINK_USERSOCK].registered = 1;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00003134 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
David S. Millerb963ea82010-08-30 19:08:01 -07003135
3136 netlink_table_ungrab();
3137}
3138
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003139static struct pernet_operations __net_initdata netlink_net_ops = {
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003140 .init = netlink_net_init,
3141 .exit = netlink_net_exit,
3142};
3143
Patrick McHardy49f7b332015-03-25 13:07:45 +00003144static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
Herbert Xuc428ecd2015-03-20 21:57:01 +11003145{
3146 const struct netlink_sock *nlk = data;
3147 struct netlink_compare_arg arg;
3148
3149 netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
Herbert Xu11b58ba2015-03-24 00:50:22 +11003150 return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
Herbert Xuc428ecd2015-03-20 21:57:01 +11003151}
3152
3153static const struct rhashtable_params netlink_rhashtable_params = {
3154 .head_offset = offsetof(struct netlink_sock, node),
3155 .key_len = netlink_compare_arg_len,
Herbert Xuc428ecd2015-03-20 21:57:01 +11003156 .obj_hashfn = netlink_hash,
3157 .obj_cmpfn = netlink_compare,
Thomas Grafb5e2c152015-03-24 20:42:19 +00003158 .automatic_shrinking = true,
Herbert Xuc428ecd2015-03-20 21:57:01 +11003159};
3160
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161static int __init netlink_proto_init(void)
3162{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164 int err = proto_register(&netlink_proto, 0);
3165
3166 if (err != 0)
3167 goto out;
3168
YOSHIFUJI Hideaki / 吉藤英明fab25742013-01-09 07:19:48 +00003169 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07003171 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003172 if (!nl_table)
3173 goto panic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175 for (i = 0; i < MAX_LINKS; i++) {
Herbert Xuc428ecd2015-03-20 21:57:01 +11003176 if (rhashtable_init(&nl_table[i].hash,
3177 &netlink_rhashtable_params) < 0) {
Thomas Grafe3416942014-08-02 11:47:45 +02003178 while (--i > 0)
3179 rhashtable_destroy(&nl_table[i].hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180 kfree(nl_table);
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003181 goto panic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183 }
3184
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +02003185 INIT_LIST_HEAD(&netlink_tap_all);
3186
David S. Millerb963ea82010-08-30 19:08:01 -07003187 netlink_add_usersock_entry();
3188
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189 sock_register(&netlink_family_ops);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003190 register_pernet_subsys(&netlink_net_ops);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09003191 /* The netlink device handler may be needed early. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003192 rtnetlink_init();
3193out:
3194 return err;
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003195panic:
3196 panic("netlink_init: Cannot allocate nl_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197}
3198
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199core_initcall(netlink_proto_init);