blob: 980121e75d2ecd2004bd68ededc9b7de6bf0df8a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NETLINK Kernel-user communication protocol.
3 *
Alan Cox113aa832008-10-13 19:01:08 -07004 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
Patrick McHardycd1df522013-04-17 06:47:05 +00006 * Patrick McHardy <kaber@trash.net>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +090012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
Harald Welte4fdb3bb2005-08-09 19:40:55 -070017 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 */
24
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/module.h>
26
Randy Dunlap4fc268d2006-01-11 12:17:47 -080027#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/kernel.h>
29#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/signal.h>
31#include <linux/sched.h>
32#include <linux/errno.h>
33#include <linux/string.h>
34#include <linux/stat.h>
35#include <linux/socket.h>
36#include <linux/un.h>
37#include <linux/fcntl.h>
38#include <linux/termios.h>
39#include <linux/sockios.h>
40#include <linux/net.h>
41#include <linux/fs.h>
42#include <linux/slab.h>
43#include <asm/uaccess.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/rtnetlink.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
Andrew Morton54e0f522005-04-30 07:07:04 +010057#include <linux/audit.h>
Patrick McHardyaf65bdf2007-04-20 14:14:21 -070058#include <linux/mutex.h>
Patrick McHardyccdfcc32013-04-17 06:47:01 +000059#include <linux/vmalloc.h>
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +020060#include <linux/if_arp.h>
Thomas Grafe3416942014-08-02 11:47:45 +020061#include <linux/rhashtable.h>
Patrick McHardy9652e932013-04-17 06:47:02 +000062#include <asm/cacheflush.h>
Thomas Grafe3416942014-08-02 11:47:45 +020063#include <linux/hash.h>
Johannes Bergee1c24422015-01-16 11:37:14 +010064#include <linux/genetlink.h>
Andrew Morton54e0f522005-04-30 07:07:04 +010065
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020066#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <net/sock.h>
68#include <net/scm.h>
Thomas Graf82ace472005-11-10 02:25:53 +010069#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
Andrey Vagin0f29c762013-03-21 20:33:47 +040071#include "af_netlink.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
Eric Dumazet5c398dc2010-10-24 04:27:10 +000073struct listeners {
74 struct rcu_head rcu;
75 unsigned long masks[0];
Johannes Berg6c04bb12009-07-10 09:51:32 +000076};
77
Patrick McHardycd967e02013-04-17 06:46:56 +000078/* state bits */
79#define NETLINK_CONGESTED 0x0
80
81/* flags */
Patrick McHardy77247bb2005-08-14 19:27:13 -070082#define NETLINK_KERNEL_SOCKET 0x1
Patrick McHardy9a4595b2005-08-15 12:32:15 -070083#define NETLINK_RECV_PKTINFO 0x2
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +000084#define NETLINK_BROADCAST_SEND_ERROR 0x4
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -070085#define NETLINK_RECV_NO_ENOBUFS 0x8
Patrick McHardy77247bb2005-08-14 19:27:13 -070086
David S. Miller035c4c12011-12-23 17:33:03 -050087static inline int netlink_is_kernel(struct sock *sk)
Denis V. Lunevaed81562007-10-10 21:14:32 -070088{
89 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
90}
91
Eric Dumazet91dd93f2015-05-12 17:24:50 -070092struct netlink_table *nl_table __read_mostly;
Andrey Vagin0f29c762013-03-21 20:33:47 +040093EXPORT_SYMBOL_GPL(nl_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
95static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
96
97static int netlink_dump(struct sock *sk);
Patrick McHardy9652e932013-04-17 06:47:02 +000098static void netlink_skb_destructor(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
Thomas Graf78fd1d02014-10-21 22:05:38 +0200100/* nl_table locking explained:
Thomas Graf21e49022015-01-02 23:00:22 +0100101 * Lookup and traversal are protected with an RCU read-side lock. Insertion
Ying Xuec5adde92015-01-12 14:52:23 +0800102 * and removal are protected with per bucket lock while using RCU list
Thomas Graf21e49022015-01-02 23:00:22 +0100103 * modification primitives and may run in parallel to RCU protected lookups.
104 * Destruction of the Netlink socket may only occur *after* nl_table_lock has
105 * been acquired * either during or after the socket has been removed from
106 * the list and after an RCU grace period.
Thomas Graf78fd1d02014-10-21 22:05:38 +0200107 */
Andrey Vagin0f29c762013-03-21 20:33:47 +0400108DEFINE_RWLOCK(nl_table_lock);
109EXPORT_SYMBOL_GPL(nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110static atomic_t nl_table_users = ATOMIC_INIT(0);
111
Eric Dumazet6d772ac2012-10-18 03:21:55 +0000112#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
113
Alan Sterne041c682006-03-27 01:16:30 -0800114static ATOMIC_NOTIFIER_HEAD(netlink_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200116static DEFINE_SPINLOCK(netlink_tap_lock);
117static struct list_head netlink_tap_all __read_mostly;
118
Herbert Xuc428ecd2015-03-20 21:57:01 +1100119static const struct rhashtable_params netlink_rhashtable_params;
120
stephen hemmingerb57ef81f2011-12-22 08:52:02 +0000121static inline u32 netlink_group_mask(u32 group)
Patrick McHardyd629b832005-08-14 19:27:50 -0700122{
123 return group ? 1 << (group - 1) : 0;
124}
125
Daniel Borkmann65d48c62015-09-10 20:05:46 +0200126static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
127 gfp_t gfp_mask)
128{
129 unsigned int len = skb_end_offset(skb);
130 struct sk_buff *new;
131
132 new = alloc_skb(len, gfp_mask);
133 if (new == NULL)
134 return NULL;
135
136 NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
137 NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
138 NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
139
140 memcpy(skb_put(new, len), skb->data, len);
141 return new;
142}
143
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200144int netlink_add_tap(struct netlink_tap *nt)
145{
146 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
147 return -EINVAL;
148
149 spin_lock(&netlink_tap_lock);
150 list_add_rcu(&nt->list, &netlink_tap_all);
151 spin_unlock(&netlink_tap_lock);
152
Markus Elfringfcd4d352014-11-18 21:03:13 +0100153 __module_get(nt->module);
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200154
155 return 0;
156}
157EXPORT_SYMBOL_GPL(netlink_add_tap);
158
stephen hemminger2173f8d2013-12-30 10:49:22 -0800159static int __netlink_remove_tap(struct netlink_tap *nt)
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200160{
161 bool found = false;
162 struct netlink_tap *tmp;
163
164 spin_lock(&netlink_tap_lock);
165
166 list_for_each_entry(tmp, &netlink_tap_all, list) {
167 if (nt == tmp) {
168 list_del_rcu(&nt->list);
169 found = true;
170 goto out;
171 }
172 }
173
174 pr_warn("__netlink_remove_tap: %p not found\n", nt);
175out:
176 spin_unlock(&netlink_tap_lock);
177
178 if (found && nt->module)
179 module_put(nt->module);
180
181 return found ? 0 : -ENODEV;
182}
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200183
184int netlink_remove_tap(struct netlink_tap *nt)
185{
186 int ret;
187
188 ret = __netlink_remove_tap(nt);
189 synchronize_net();
190
191 return ret;
192}
193EXPORT_SYMBOL_GPL(netlink_remove_tap);
194
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200195static bool netlink_filter_tap(const struct sk_buff *skb)
196{
197 struct sock *sk = skb->sk;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200198
199 /* We take the more conservative approach and
200 * whitelist socket protocols that may pass.
201 */
202 switch (sk->sk_protocol) {
203 case NETLINK_ROUTE:
204 case NETLINK_USERSOCK:
205 case NETLINK_SOCK_DIAG:
206 case NETLINK_NFLOG:
207 case NETLINK_XFRM:
208 case NETLINK_FIB_LOOKUP:
209 case NETLINK_NETFILTER:
210 case NETLINK_GENERIC:
Varka Bhadram498044b2014-07-16 10:59:47 +0530211 return true;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200212 }
213
Varka Bhadram498044b2014-07-16 10:59:47 +0530214 return false;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200215}
216
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200217static int __netlink_deliver_tap_skb(struct sk_buff *skb,
218 struct net_device *dev)
219{
220 struct sk_buff *nskb;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200221 struct sock *sk = skb->sk;
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200222 int ret = -ENOMEM;
223
224 dev_hold(dev);
Daniel Borkmann65d48c62015-09-10 20:05:46 +0200225
226 if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head))
227 nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
228 else
229 nskb = skb_clone(skb, GFP_ATOMIC);
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200230 if (nskb) {
231 nskb->dev = dev;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200232 nskb->protocol = htons((u16) sk->sk_protocol);
Daniel Borkmann604d13c2013-12-23 14:35:56 +0100233 nskb->pkt_type = netlink_is_kernel(sk) ?
234 PACKET_KERNEL : PACKET_USER;
Daniel Borkmann4e48ed82014-08-07 22:22:47 +0200235 skb_reset_network_header(nskb);
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200236 ret = dev_queue_xmit(nskb);
237 if (unlikely(ret > 0))
238 ret = net_xmit_errno(ret);
239 }
240
241 dev_put(dev);
242 return ret;
243}
244
245static void __netlink_deliver_tap(struct sk_buff *skb)
246{
247 int ret;
248 struct netlink_tap *tmp;
249
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200250 if (!netlink_filter_tap(skb))
251 return;
252
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200253 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
254 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
255 if (unlikely(ret))
256 break;
257 }
258}
259
260static void netlink_deliver_tap(struct sk_buff *skb)
261{
262 rcu_read_lock();
263
264 if (unlikely(!list_empty(&netlink_tap_all)))
265 __netlink_deliver_tap(skb);
266
267 rcu_read_unlock();
268}
269
Daniel Borkmann73bfd372013-12-23 14:35:55 +0100270static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
271 struct sk_buff *skb)
272{
273 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
274 netlink_deliver_tap(skb);
275}
276
Patrick McHardycd1df522013-04-17 06:47:05 +0000277static void netlink_overrun(struct sock *sk)
278{
279 struct netlink_sock *nlk = nlk_sk(sk);
280
281 if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
282 if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
283 sk->sk_err = ENOBUFS;
284 sk->sk_error_report(sk);
285 }
286 }
287 atomic_inc(&sk->sk_drops);
288}
289
290static void netlink_rcv_wake(struct sock *sk)
291{
292 struct netlink_sock *nlk = nlk_sk(sk);
293
294 if (skb_queue_empty(&sk->sk_receive_queue))
295 clear_bit(NETLINK_CONGESTED, &nlk->state);
296 if (!test_bit(NETLINK_CONGESTED, &nlk->state))
297 wake_up_interruptible(&nlk->wait);
298}
299
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000300#ifdef CONFIG_NETLINK_MMAP
Patrick McHardyf9c22882013-04-17 06:47:04 +0000301static bool netlink_rx_is_mmaped(struct sock *sk)
302{
303 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
304}
305
Patrick McHardy5fd96122013-04-17 06:47:03 +0000306static bool netlink_tx_is_mmaped(struct sock *sk)
307{
308 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
309}
310
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000311static __pure struct page *pgvec_to_page(const void *addr)
312{
313 if (is_vmalloc_addr(addr))
314 return vmalloc_to_page(addr);
315 else
316 return virt_to_page(addr);
317}
318
319static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
320{
321 unsigned int i;
322
323 for (i = 0; i < len; i++) {
324 if (pg_vec[i] != NULL) {
325 if (is_vmalloc_addr(pg_vec[i]))
326 vfree(pg_vec[i]);
327 else
328 free_pages((unsigned long)pg_vec[i], order);
329 }
330 }
331 kfree(pg_vec);
332}
333
334static void *alloc_one_pg_vec_page(unsigned long order)
335{
336 void *buffer;
337 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
338 __GFP_NOWARN | __GFP_NORETRY;
339
340 buffer = (void *)__get_free_pages(gfp_flags, order);
341 if (buffer != NULL)
342 return buffer;
343
344 buffer = vzalloc((1 << order) * PAGE_SIZE);
345 if (buffer != NULL)
346 return buffer;
347
348 gfp_flags &= ~__GFP_NORETRY;
349 return (void *)__get_free_pages(gfp_flags, order);
350}
351
352static void **alloc_pg_vec(struct netlink_sock *nlk,
353 struct nl_mmap_req *req, unsigned int order)
354{
355 unsigned int block_nr = req->nm_block_nr;
356 unsigned int i;
Daniel Borkmann8a849bb2013-08-02 17:32:39 +0200357 void **pg_vec;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000358
359 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
360 if (pg_vec == NULL)
361 return NULL;
362
363 for (i = 0; i < block_nr; i++) {
Daniel Borkmann8a849bb2013-08-02 17:32:39 +0200364 pg_vec[i] = alloc_one_pg_vec_page(order);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000365 if (pg_vec[i] == NULL)
366 goto err1;
367 }
368
369 return pg_vec;
370err1:
371 free_pg_vec(pg_vec, order, block_nr);
372 return NULL;
373}
374
Florian Westphalb265c302015-07-21 16:33:50 +0200375
376static void
377__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
378 unsigned int order)
379{
380 struct netlink_sock *nlk = nlk_sk(sk);
381 struct sk_buff_head *queue;
382 struct netlink_ring *ring;
383
384 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
385 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
386
387 spin_lock_bh(&queue->lock);
388
389 ring->frame_max = req->nm_frame_nr - 1;
390 ring->head = 0;
391 ring->frame_size = req->nm_frame_size;
392 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
393
394 swap(ring->pg_vec_len, req->nm_block_nr);
395 swap(ring->pg_vec_order, order);
396 swap(ring->pg_vec, pg_vec);
397
398 __skb_queue_purge(queue);
399 spin_unlock_bh(&queue->lock);
400
401 WARN_ON(atomic_read(&nlk->mapped));
402
403 if (pg_vec)
404 free_pg_vec(pg_vec, order, req->nm_block_nr);
405}
406
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000407static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
Florian Westphalb265c302015-07-21 16:33:50 +0200408 bool tx_ring)
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000409{
410 struct netlink_sock *nlk = nlk_sk(sk);
411 struct netlink_ring *ring;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000412 void **pg_vec = NULL;
413 unsigned int order = 0;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000414
415 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000416
Florian Westphalb265c302015-07-21 16:33:50 +0200417 if (atomic_read(&nlk->mapped))
418 return -EBUSY;
419 if (atomic_read(&ring->pending))
420 return -EBUSY;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000421
422 if (req->nm_block_nr) {
423 if (ring->pg_vec != NULL)
424 return -EBUSY;
425
426 if ((int)req->nm_block_size <= 0)
427 return -EINVAL;
Tobias Klauser74e83b22014-07-31 12:17:08 +0200428 if (!PAGE_ALIGNED(req->nm_block_size))
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000429 return -EINVAL;
430 if (req->nm_frame_size < NL_MMAP_HDRLEN)
431 return -EINVAL;
432 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
433 return -EINVAL;
434
435 ring->frames_per_block = req->nm_block_size /
436 req->nm_frame_size;
437 if (ring->frames_per_block == 0)
438 return -EINVAL;
439 if (ring->frames_per_block * req->nm_block_nr !=
440 req->nm_frame_nr)
441 return -EINVAL;
442
443 order = get_order(req->nm_block_size);
444 pg_vec = alloc_pg_vec(nlk, req, order);
445 if (pg_vec == NULL)
446 return -ENOMEM;
447 } else {
448 if (req->nm_frame_nr)
449 return -EINVAL;
450 }
451
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000452 mutex_lock(&nlk->pg_vec_lock);
Florian Westphalb265c302015-07-21 16:33:50 +0200453 if (atomic_read(&nlk->mapped) == 0) {
454 __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
455 mutex_unlock(&nlk->pg_vec_lock);
456 return 0;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000457 }
Florian Westphalb265c302015-07-21 16:33:50 +0200458
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000459 mutex_unlock(&nlk->pg_vec_lock);
460
461 if (pg_vec)
462 free_pg_vec(pg_vec, order, req->nm_block_nr);
Florian Westphalb265c302015-07-21 16:33:50 +0200463
464 return -EBUSY;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000465}
466
467static void netlink_mm_open(struct vm_area_struct *vma)
468{
469 struct file *file = vma->vm_file;
470 struct socket *sock = file->private_data;
471 struct sock *sk = sock->sk;
472
473 if (sk)
474 atomic_inc(&nlk_sk(sk)->mapped);
475}
476
477static void netlink_mm_close(struct vm_area_struct *vma)
478{
479 struct file *file = vma->vm_file;
480 struct socket *sock = file->private_data;
481 struct sock *sk = sock->sk;
482
483 if (sk)
484 atomic_dec(&nlk_sk(sk)->mapped);
485}
486
487static const struct vm_operations_struct netlink_mmap_ops = {
488 .open = netlink_mm_open,
489 .close = netlink_mm_close,
490};
491
492static int netlink_mmap(struct file *file, struct socket *sock,
493 struct vm_area_struct *vma)
494{
495 struct sock *sk = sock->sk;
496 struct netlink_sock *nlk = nlk_sk(sk);
497 struct netlink_ring *ring;
498 unsigned long start, size, expected;
499 unsigned int i;
500 int err = -EINVAL;
501
502 if (vma->vm_pgoff)
503 return -EINVAL;
504
505 mutex_lock(&nlk->pg_vec_lock);
506
507 expected = 0;
508 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
509 if (ring->pg_vec == NULL)
510 continue;
511 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
512 }
513
514 if (expected == 0)
515 goto out;
516
517 size = vma->vm_end - vma->vm_start;
518 if (size != expected)
519 goto out;
520
521 start = vma->vm_start;
522 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
523 if (ring->pg_vec == NULL)
524 continue;
525
526 for (i = 0; i < ring->pg_vec_len; i++) {
527 struct page *page;
528 void *kaddr = ring->pg_vec[i];
529 unsigned int pg_num;
530
531 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
532 page = pgvec_to_page(kaddr);
533 err = vm_insert_page(vma, start, page);
534 if (err < 0)
535 goto out;
536 start += PAGE_SIZE;
537 kaddr += PAGE_SIZE;
538 }
539 }
540 }
541
542 atomic_inc(&nlk->mapped);
543 vma->vm_ops = &netlink_mmap_ops;
544 err = 0;
545out:
546 mutex_unlock(&nlk->pg_vec_lock);
Patrick McHardy7cdbac72013-06-11 02:52:47 -0700547 return err;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000548}
Patrick McHardy9652e932013-04-17 06:47:02 +0000549
David Miller4682a032014-12-16 17:58:17 -0500550static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
Patrick McHardy9652e932013-04-17 06:47:02 +0000551{
552#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
553 struct page *p_start, *p_end;
554
555 /* First page is flushed through netlink_{get,set}_status */
556 p_start = pgvec_to_page(hdr + PAGE_SIZE);
David Miller4682a032014-12-16 17:58:17 -0500557 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
Patrick McHardy9652e932013-04-17 06:47:02 +0000558 while (p_start <= p_end) {
559 flush_dcache_page(p_start);
560 p_start++;
561 }
562#endif
563}
564
565static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
566{
567 smp_rmb();
568 flush_dcache_page(pgvec_to_page(hdr));
569 return hdr->nm_status;
570}
571
572static void netlink_set_status(struct nl_mmap_hdr *hdr,
573 enum nl_mmap_status status)
574{
Thomas Grafa18e6a12014-12-18 10:30:26 +0000575 smp_mb();
Patrick McHardy9652e932013-04-17 06:47:02 +0000576 hdr->nm_status = status;
577 flush_dcache_page(pgvec_to_page(hdr));
Patrick McHardy9652e932013-04-17 06:47:02 +0000578}
579
580static struct nl_mmap_hdr *
581__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
582{
583 unsigned int pg_vec_pos, frame_off;
584
585 pg_vec_pos = pos / ring->frames_per_block;
586 frame_off = pos % ring->frames_per_block;
587
588 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
589}
590
591static struct nl_mmap_hdr *
592netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
593 enum nl_mmap_status status)
594{
595 struct nl_mmap_hdr *hdr;
596
597 hdr = __netlink_lookup_frame(ring, pos);
598 if (netlink_get_status(hdr) != status)
599 return NULL;
600
601 return hdr;
602}
603
604static struct nl_mmap_hdr *
605netlink_current_frame(const struct netlink_ring *ring,
606 enum nl_mmap_status status)
607{
608 return netlink_lookup_frame(ring, ring->head, status);
609}
610
611static struct nl_mmap_hdr *
612netlink_previous_frame(const struct netlink_ring *ring,
613 enum nl_mmap_status status)
614{
615 unsigned int prev;
616
617 prev = ring->head ? ring->head - 1 : ring->frame_max;
618 return netlink_lookup_frame(ring, prev, status);
619}
620
621static void netlink_increment_head(struct netlink_ring *ring)
622{
623 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
624}
625
626static void netlink_forward_ring(struct netlink_ring *ring)
627{
628 unsigned int head = ring->head, pos = head;
629 const struct nl_mmap_hdr *hdr;
630
631 do {
632 hdr = __netlink_lookup_frame(ring, pos);
633 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
634 break;
635 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
636 break;
637 netlink_increment_head(ring);
638 } while (ring->head != head);
639}
640
Patrick McHardycd1df522013-04-17 06:47:05 +0000641static bool netlink_dump_space(struct netlink_sock *nlk)
642{
643 struct netlink_ring *ring = &nlk->rx_ring;
644 struct nl_mmap_hdr *hdr;
645 unsigned int n;
646
647 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
648 if (hdr == NULL)
649 return false;
650
651 n = ring->head + ring->frame_max / 2;
652 if (n > ring->frame_max)
653 n -= ring->frame_max;
654
655 hdr = __netlink_lookup_frame(ring, n);
656
657 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
658}
659
Patrick McHardy9652e932013-04-17 06:47:02 +0000660static unsigned int netlink_poll(struct file *file, struct socket *sock,
661 poll_table *wait)
662{
663 struct sock *sk = sock->sk;
664 struct netlink_sock *nlk = nlk_sk(sk);
665 unsigned int mask;
Patrick McHardycd1df522013-04-17 06:47:05 +0000666 int err;
Patrick McHardy9652e932013-04-17 06:47:02 +0000667
Patrick McHardycd1df522013-04-17 06:47:05 +0000668 if (nlk->rx_ring.pg_vec != NULL) {
669 /* Memory mapped sockets don't call recvmsg(), so flow control
670 * for dumps is performed here. A dump is allowed to continue
671 * if at least half the ring is unused.
672 */
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700673 while (nlk->cb_running && netlink_dump_space(nlk)) {
Patrick McHardycd1df522013-04-17 06:47:05 +0000674 err = netlink_dump(sk);
675 if (err < 0) {
Ben Pfaffac30ef82014-07-09 10:31:22 -0700676 sk->sk_err = -err;
Patrick McHardycd1df522013-04-17 06:47:05 +0000677 sk->sk_error_report(sk);
678 break;
679 }
680 }
681 netlink_rcv_wake(sk);
682 }
Patrick McHardy5fd96122013-04-17 06:47:03 +0000683
Patrick McHardy9652e932013-04-17 06:47:02 +0000684 mask = datagram_poll(file, sock, wait);
685
686 spin_lock_bh(&sk->sk_receive_queue.lock);
687 if (nlk->rx_ring.pg_vec) {
688 netlink_forward_ring(&nlk->rx_ring);
689 if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
690 mask |= POLLIN | POLLRDNORM;
691 }
692 spin_unlock_bh(&sk->sk_receive_queue.lock);
693
694 spin_lock_bh(&sk->sk_write_queue.lock);
695 if (nlk->tx_ring.pg_vec) {
696 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
697 mask |= POLLOUT | POLLWRNORM;
698 }
699 spin_unlock_bh(&sk->sk_write_queue.lock);
700
701 return mask;
702}
703
704static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
705{
706 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
707}
708
709static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
710 struct netlink_ring *ring,
711 struct nl_mmap_hdr *hdr)
712{
713 unsigned int size;
714 void *data;
715
716 size = ring->frame_size - NL_MMAP_HDRLEN;
717 data = (void *)hdr + NL_MMAP_HDRLEN;
718
719 skb->head = data;
720 skb->data = data;
721 skb_reset_tail_pointer(skb);
722 skb->end = skb->tail + size;
723 skb->len = 0;
724
725 skb->destructor = netlink_skb_destructor;
726 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
727 NETLINK_CB(skb).sk = sk;
728}
Patrick McHardy5fd96122013-04-17 06:47:03 +0000729
730static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
731 u32 dst_portid, u32 dst_group,
Christoph Hellwig7cc05662015-01-28 18:04:53 +0100732 struct scm_cookie *scm)
Patrick McHardy5fd96122013-04-17 06:47:03 +0000733{
734 struct netlink_sock *nlk = nlk_sk(sk);
735 struct netlink_ring *ring;
736 struct nl_mmap_hdr *hdr;
737 struct sk_buff *skb;
738 unsigned int maxlen;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000739 int err = 0, len = 0;
740
Patrick McHardy5fd96122013-04-17 06:47:03 +0000741 mutex_lock(&nlk->pg_vec_lock);
742
743 ring = &nlk->tx_ring;
744 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
745
746 do {
David Miller4682a032014-12-16 17:58:17 -0500747 unsigned int nm_len;
748
Patrick McHardy5fd96122013-04-17 06:47:03 +0000749 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
750 if (hdr == NULL) {
751 if (!(msg->msg_flags & MSG_DONTWAIT) &&
752 atomic_read(&nlk->tx_ring.pending))
753 schedule();
754 continue;
755 }
David Miller4682a032014-12-16 17:58:17 -0500756
757 nm_len = ACCESS_ONCE(hdr->nm_len);
758 if (nm_len > maxlen) {
Patrick McHardy5fd96122013-04-17 06:47:03 +0000759 err = -EINVAL;
760 goto out;
761 }
762
David Miller4682a032014-12-16 17:58:17 -0500763 netlink_frame_flush_dcache(hdr, nm_len);
Patrick McHardy5fd96122013-04-17 06:47:03 +0000764
David Miller4682a032014-12-16 17:58:17 -0500765 skb = alloc_skb(nm_len, GFP_KERNEL);
766 if (skb == NULL) {
767 err = -ENOBUFS;
768 goto out;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000769 }
David Miller4682a032014-12-16 17:58:17 -0500770 __skb_put(skb, nm_len);
771 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
772 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
Patrick McHardy5fd96122013-04-17 06:47:03 +0000773
774 netlink_increment_head(ring);
775
776 NETLINK_CB(skb).portid = nlk->portid;
777 NETLINK_CB(skb).dst_group = dst_group;
Christoph Hellwig7cc05662015-01-28 18:04:53 +0100778 NETLINK_CB(skb).creds = scm->creds;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000779
780 err = security_netlink_send(sk, skb);
781 if (err) {
782 kfree_skb(skb);
783 goto out;
784 }
785
786 if (unlikely(dst_group)) {
787 atomic_inc(&skb->users);
788 netlink_broadcast(sk, skb, dst_portid, dst_group,
789 GFP_KERNEL);
790 }
791 err = netlink_unicast(sk, skb, dst_portid,
792 msg->msg_flags & MSG_DONTWAIT);
793 if (err < 0)
794 goto out;
795 len += err;
796
797 } while (hdr != NULL ||
798 (!(msg->msg_flags & MSG_DONTWAIT) &&
799 atomic_read(&nlk->tx_ring.pending)));
800
801 if (len > 0)
802 err = len;
803out:
804 mutex_unlock(&nlk->pg_vec_lock);
805 return err;
806}
Patrick McHardyf9c22882013-04-17 06:47:04 +0000807
808static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
809{
810 struct nl_mmap_hdr *hdr;
811
812 hdr = netlink_mmap_hdr(skb);
813 hdr->nm_len = skb->len;
814 hdr->nm_group = NETLINK_CB(skb).dst_group;
815 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
Nicolas Dichtel1bf93102013-04-24 10:36:23 +0200816 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
817 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
David Miller4682a032014-12-16 17:58:17 -0500818 netlink_frame_flush_dcache(hdr, hdr->nm_len);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000819 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
820
821 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
822 kfree_skb(skb);
823}
824
825static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
826{
827 struct netlink_sock *nlk = nlk_sk(sk);
828 struct netlink_ring *ring = &nlk->rx_ring;
829 struct nl_mmap_hdr *hdr;
830
831 spin_lock_bh(&sk->sk_receive_queue.lock);
832 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
833 if (hdr == NULL) {
834 spin_unlock_bh(&sk->sk_receive_queue.lock);
835 kfree_skb(skb);
Patrick McHardycd1df522013-04-17 06:47:05 +0000836 netlink_overrun(sk);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000837 return;
838 }
839 netlink_increment_head(ring);
840 __skb_queue_tail(&sk->sk_receive_queue, skb);
841 spin_unlock_bh(&sk->sk_receive_queue.lock);
842
843 hdr->nm_len = skb->len;
844 hdr->nm_group = NETLINK_CB(skb).dst_group;
845 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
Nicolas Dichtel1bf93102013-04-24 10:36:23 +0200846 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
847 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000848 netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
849}
850
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000851#else /* CONFIG_NETLINK_MMAP */
Patrick McHardyf9c22882013-04-17 06:47:04 +0000852#define netlink_rx_is_mmaped(sk) false
Patrick McHardy5fd96122013-04-17 06:47:03 +0000853#define netlink_tx_is_mmaped(sk) false
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000854#define netlink_mmap sock_no_mmap
Patrick McHardy9652e932013-04-17 06:47:02 +0000855#define netlink_poll datagram_poll
Christoph Hellwig7cc05662015-01-28 18:04:53 +0100856#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, scm) 0
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000857#endif /* CONFIG_NETLINK_MMAP */
858
Patrick McHardycf0a0182013-04-17 06:47:00 +0000859static void netlink_skb_destructor(struct sk_buff *skb)
860{
Patrick McHardy9652e932013-04-17 06:47:02 +0000861#ifdef CONFIG_NETLINK_MMAP
862 struct nl_mmap_hdr *hdr;
863 struct netlink_ring *ring;
864 struct sock *sk;
865
866 /* If a packet from the kernel to userspace was freed because of an
867 * error without being delivered to userspace, the kernel must reset
868 * the status. In the direction userspace to kernel, the status is
869 * always reset here after the packet was processed and freed.
870 */
871 if (netlink_skb_is_mmaped(skb)) {
872 hdr = netlink_mmap_hdr(skb);
873 sk = NETLINK_CB(skb).sk;
874
Patrick McHardy5fd96122013-04-17 06:47:03 +0000875 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
876 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
877 ring = &nlk_sk(sk)->tx_ring;
878 } else {
879 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
880 hdr->nm_len = 0;
881 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
882 }
883 ring = &nlk_sk(sk)->rx_ring;
Patrick McHardy9652e932013-04-17 06:47:02 +0000884 }
Patrick McHardy9652e932013-04-17 06:47:02 +0000885
886 WARN_ON(atomic_read(&ring->pending) == 0);
887 atomic_dec(&ring->pending);
888 sock_put(sk);
889
Pablo Neira5e71d9d2013-06-03 09:28:43 +0000890 skb->head = NULL;
Patrick McHardy9652e932013-04-17 06:47:02 +0000891 }
892#endif
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +0000893 if (is_vmalloc_addr(skb->head)) {
Pablo Neira3a365152013-06-28 03:04:23 +0200894 if (!skb->cloned ||
895 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
896 vfree(skb->head);
897
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +0000898 skb->head = NULL;
899 }
Patrick McHardy9652e932013-04-17 06:47:02 +0000900 if (skb->sk != NULL)
901 sock_rfree(skb);
Patrick McHardycf0a0182013-04-17 06:47:00 +0000902}
903
904static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
905{
906 WARN_ON(skb->sk != NULL);
907 skb->sk = sk;
908 skb->destructor = netlink_skb_destructor;
909 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
910 sk_mem_charge(sk, skb->truesize);
911}
912
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913static void netlink_sock_destruct(struct sock *sk)
914{
Herbert Xu3f660d62007-05-03 03:17:14 -0700915 struct netlink_sock *nlk = nlk_sk(sk);
916
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700917 if (nlk->cb_running) {
918 if (nlk->cb.done)
919 nlk->cb.done(&nlk->cb);
Gao feng6dc878a2012-10-04 20:15:48 +0000920
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700921 module_put(nlk->cb.module);
922 kfree_skb(nlk->cb.skb);
Herbert Xu3f660d62007-05-03 03:17:14 -0700923 }
924
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 skb_queue_purge(&sk->sk_receive_queue);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000926#ifdef CONFIG_NETLINK_MMAP
927 if (1) {
928 struct nl_mmap_req req;
929
930 memset(&req, 0, sizeof(req));
931 if (nlk->rx_ring.pg_vec)
Florian Westphalb265c302015-07-21 16:33:50 +0200932 __netlink_set_ring(sk, &req, false, NULL, 0);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000933 memset(&req, 0, sizeof(req));
934 if (nlk->tx_ring.pg_vec)
Florian Westphalb265c302015-07-21 16:33:50 +0200935 __netlink_set_ring(sk, &req, true, NULL, 0);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000936 }
937#endif /* CONFIG_NETLINK_MMAP */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
939 if (!sock_flag(sk, SOCK_DEAD)) {
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800940 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 return;
942 }
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700943
944 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
945 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
946 WARN_ON(nlk_sk(sk)->groups);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947}
948
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800949/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
950 * SMP. Look, when several writers sleep and reader wakes them up, all but one
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
952 * this, _but_ remember, it adds useless work on UP machines.
953 */
954
Johannes Bergd136f1b2009-09-12 03:03:15 +0000955void netlink_table_grab(void)
Eric Dumazet9a429c42008-01-01 21:58:02 -0800956 __acquires(nl_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957{
Johannes Bergd136f1b2009-09-12 03:03:15 +0000958 might_sleep();
959
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700960 write_lock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961
962 if (atomic_read(&nl_table_users)) {
963 DECLARE_WAITQUEUE(wait, current);
964
965 add_wait_queue_exclusive(&nl_table_wait, &wait);
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800966 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 set_current_state(TASK_UNINTERRUPTIBLE);
968 if (atomic_read(&nl_table_users) == 0)
969 break;
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700970 write_unlock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 schedule();
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700972 write_lock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 }
974
975 __set_current_state(TASK_RUNNING);
976 remove_wait_queue(&nl_table_wait, &wait);
977 }
978}
979
Johannes Bergd136f1b2009-09-12 03:03:15 +0000980void netlink_table_ungrab(void)
Eric Dumazet9a429c42008-01-01 21:58:02 -0800981 __releases(nl_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982{
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700983 write_unlock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 wake_up(&nl_table_wait);
985}
986
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800987static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988netlink_lock_table(void)
989{
990 /* read_lock() synchronizes us to netlink_table_grab */
991
992 read_lock(&nl_table_lock);
993 atomic_inc(&nl_table_users);
994 read_unlock(&nl_table_lock);
995}
996
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800997static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998netlink_unlock_table(void)
999{
1000 if (atomic_dec_and_test(&nl_table_users))
1001 wake_up(&nl_table_wait);
1002}
1003
Thomas Grafe3416942014-08-02 11:47:45 +02001004struct netlink_compare_arg
Gao fengda12c902013-06-06 14:49:11 +08001005{
Herbert Xuc428ecd2015-03-20 21:57:01 +11001006 possible_net_t pnet;
Thomas Grafe3416942014-08-02 11:47:45 +02001007 u32 portid;
1008};
1009
Herbert Xu8f2ddaa2015-03-21 14:14:03 +11001010/* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
1011#define netlink_compare_arg_len \
1012 (offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
Thomas Grafe3416942014-08-02 11:47:45 +02001013
Herbert Xuc428ecd2015-03-20 21:57:01 +11001014static inline int netlink_compare(struct rhashtable_compare_arg *arg,
1015 const void *ptr)
1016{
1017 const struct netlink_compare_arg *x = arg->key;
1018 const struct netlink_sock *nlk = ptr;
1019
Herbert Xud4862362015-09-22 11:38:56 +08001020 return nlk->portid != x->portid ||
Herbert Xuc428ecd2015-03-20 21:57:01 +11001021 !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
1022}
1023
1024static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
1025 struct net *net, u32 portid)
1026{
1027 memset(arg, 0, sizeof(*arg));
1028 write_pnet(&arg->pnet, net);
1029 arg->portid = portid;
Thomas Grafe3416942014-08-02 11:47:45 +02001030}
1031
1032static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
1033 struct net *net)
1034{
Herbert Xuc428ecd2015-03-20 21:57:01 +11001035 struct netlink_compare_arg arg;
Thomas Grafe3416942014-08-02 11:47:45 +02001036
Herbert Xuc428ecd2015-03-20 21:57:01 +11001037 netlink_compare_arg_init(&arg, net, portid);
1038 return rhashtable_lookup_fast(&table->hash, &arg,
1039 netlink_rhashtable_params);
Gao fengda12c902013-06-06 14:49:11 +08001040}
1041
Herbert Xuc428ecd2015-03-20 21:57:01 +11001042static int __netlink_insert(struct netlink_table *table, struct sock *sk)
Ying Xuec5adde92015-01-12 14:52:23 +08001043{
Herbert Xuc428ecd2015-03-20 21:57:01 +11001044 struct netlink_compare_arg arg;
Ying Xuec5adde92015-01-12 14:52:23 +08001045
Herbert Xud4862362015-09-22 11:38:56 +08001046 netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
Herbert Xuc428ecd2015-03-20 21:57:01 +11001047 return rhashtable_lookup_insert_key(&table->hash, &arg,
1048 &nlk_sk(sk)->node,
1049 netlink_rhashtable_params);
Ying Xuec5adde92015-01-12 14:52:23 +08001050}
1051
Eric W. Biederman15e47302012-09-07 20:12:54 +00001052static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053{
Gao fengda12c902013-06-06 14:49:11 +08001054 struct netlink_table *table = &nl_table[protocol];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056
Thomas Grafe3416942014-08-02 11:47:45 +02001057 rcu_read_lock();
1058 sk = __netlink_lookup(table, portid, net);
1059 if (sk)
1060 sock_hold(sk);
1061 rcu_read_unlock();
1062
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 return sk;
1064}
1065
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001066static const struct proto_ops netlink_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
Patrick McHardy4277a082006-03-20 18:52:01 -08001068static void
1069netlink_update_listeners(struct sock *sk)
1070{
1071 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
Patrick McHardy4277a082006-03-20 18:52:01 -08001072 unsigned long mask;
1073 unsigned int i;
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001074 struct listeners *listeners;
1075
1076 listeners = nl_deref_protected(tbl->listeners);
1077 if (!listeners)
1078 return;
Patrick McHardy4277a082006-03-20 18:52:01 -08001079
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001080 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
Patrick McHardy4277a082006-03-20 18:52:01 -08001081 mask = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001082 sk_for_each_bound(sk, &tbl->mc_list) {
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001083 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1084 mask |= nlk_sk(sk)->groups[i];
1085 }
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001086 listeners->masks[i] = mask;
Patrick McHardy4277a082006-03-20 18:52:01 -08001087 }
1088 /* this function is only called with the netlink table "grabbed", which
1089 * makes sure updates are visible before bind or setsockopt return. */
1090}
1091
Herbert Xu8ea65f42015-01-26 14:02:56 +11001092static int netlink_insert(struct sock *sk, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093{
Gao fengda12c902013-06-06 14:49:11 +08001094 struct netlink_table *table = &nl_table[sk->sk_protocol];
Herbert Xu919d9db2015-01-16 17:23:48 +11001095 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096
Ying Xuec5adde92015-01-12 14:52:23 +08001097 lock_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098
Herbert Xud4862362015-09-22 11:38:56 +08001099 err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY;
1100 if (nlk_sk(sk)->bound)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 goto err;
1102
1103 err = -ENOMEM;
Thomas Graf97defe12015-01-02 23:00:20 +01001104 if (BITS_PER_LONG > 32 &&
1105 unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 goto err;
1107
Herbert Xud4862362015-09-22 11:38:56 +08001108 nlk_sk(sk)->portid = portid;
Thomas Grafe3416942014-08-02 11:47:45 +02001109 sock_hold(sk);
Herbert Xu919d9db2015-01-16 17:23:48 +11001110
Herbert Xuc428ecd2015-03-20 21:57:01 +11001111 err = __netlink_insert(table, sk);
1112 if (err) {
Daniel Borkmannd3976172015-08-07 00:26:41 +02001113 /* In case the hashtable backend returns with -EBUSY
1114 * from here, it must not escape to the caller.
1115 */
1116 if (unlikely(err == -EBUSY))
1117 err = -EOVERFLOW;
Herbert Xuc428ecd2015-03-20 21:57:01 +11001118 if (err == -EEXIST)
1119 err = -EADDRINUSE;
Ying Xuec5adde92015-01-12 14:52:23 +08001120 sock_put(sk);
Herbert Xu919d9db2015-01-16 17:23:48 +11001121 }
1122
Herbert Xud4862362015-09-22 11:38:56 +08001123 /* We need to ensure that the socket is hashed and visible. */
1124 smp_wmb();
1125 nlk_sk(sk)->bound = portid;
Herbert Xu4e277622015-09-18 19:16:50 +08001126
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127err:
Ying Xuec5adde92015-01-12 14:52:23 +08001128 release_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 return err;
1130}
1131
1132static void netlink_remove(struct sock *sk)
1133{
Thomas Grafe3416942014-08-02 11:47:45 +02001134 struct netlink_table *table;
1135
Thomas Grafe3416942014-08-02 11:47:45 +02001136 table = &nl_table[sk->sk_protocol];
Herbert Xuc428ecd2015-03-20 21:57:01 +11001137 if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
1138 netlink_rhashtable_params)) {
Thomas Grafe3416942014-08-02 11:47:45 +02001139 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
1140 __sock_put(sk);
1141 }
Thomas Grafe3416942014-08-02 11:47:45 +02001142
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 netlink_table_grab();
Johannes Bergb10dcb32014-12-22 18:56:37 +01001144 if (nlk_sk(sk)->subscriptions) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 __sk_del_bind_node(sk);
Johannes Bergb10dcb32014-12-22 18:56:37 +01001146 netlink_update_listeners(sk);
1147 }
Johannes Bergee1c24422015-01-16 11:37:14 +01001148 if (sk->sk_protocol == NETLINK_GENERIC)
1149 atomic_inc(&genl_sk_destructing_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 netlink_table_ungrab();
1151}
1152
1153static struct proto netlink_proto = {
1154 .name = "NETLINK",
1155 .owner = THIS_MODULE,
1156 .obj_size = sizeof(struct netlink_sock),
1157};
1158
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001159static int __netlink_create(struct net *net, struct socket *sock,
1160 struct mutex *cb_mutex, int protocol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161{
1162 struct sock *sk;
1163 struct netlink_sock *nlk;
Patrick McHardyab33a172005-08-14 19:31:36 -07001164
1165 sock->ops = &netlink_ops;
1166
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001167 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
Patrick McHardyab33a172005-08-14 19:31:36 -07001168 if (!sk)
1169 return -ENOMEM;
1170
1171 sock_init_data(sock, sk);
1172
1173 nlk = nlk_sk(sk);
Eric Dumazet658cb352012-04-22 21:30:21 +00001174 if (cb_mutex) {
Patrick McHardyffa4d722007-04-25 14:01:17 -07001175 nlk->cb_mutex = cb_mutex;
Eric Dumazet658cb352012-04-22 21:30:21 +00001176 } else {
Patrick McHardyffa4d722007-04-25 14:01:17 -07001177 nlk->cb_mutex = &nlk->cb_def_mutex;
1178 mutex_init(nlk->cb_mutex);
1179 }
Patrick McHardyab33a172005-08-14 19:31:36 -07001180 init_waitqueue_head(&nlk->wait);
Patrick McHardyccdfcc32013-04-17 06:47:01 +00001181#ifdef CONFIG_NETLINK_MMAP
1182 mutex_init(&nlk->pg_vec_lock);
1183#endif
Patrick McHardyab33a172005-08-14 19:31:36 -07001184
1185 sk->sk_destruct = netlink_sock_destruct;
1186 sk->sk_protocol = protocol;
1187 return 0;
1188}
1189
Eric Paris3f378b62009-11-05 22:18:14 -08001190static int netlink_create(struct net *net, struct socket *sock, int protocol,
1191 int kern)
Patrick McHardyab33a172005-08-14 19:31:36 -07001192{
1193 struct module *module = NULL;
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07001194 struct mutex *cb_mutex;
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001195 struct netlink_sock *nlk;
Johannes Berg023e2cf2014-12-23 21:00:06 +01001196 int (*bind)(struct net *net, int group);
1197 void (*unbind)(struct net *net, int group);
Patrick McHardyab33a172005-08-14 19:31:36 -07001198 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199
1200 sock->state = SS_UNCONNECTED;
1201
1202 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1203 return -ESOCKTNOSUPPORT;
1204
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001205 if (protocol < 0 || protocol >= MAX_LINKS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 return -EPROTONOSUPPORT;
1207
Patrick McHardy77247bb2005-08-14 19:27:13 -07001208 netlink_lock_table();
Johannes Berg95a5afc2008-10-16 15:24:51 -07001209#ifdef CONFIG_MODULES
Patrick McHardyab33a172005-08-14 19:31:36 -07001210 if (!nl_table[protocol].registered) {
Patrick McHardy77247bb2005-08-14 19:27:13 -07001211 netlink_unlock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001212 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
Patrick McHardy77247bb2005-08-14 19:27:13 -07001213 netlink_lock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001214 }
Patrick McHardyab33a172005-08-14 19:31:36 -07001215#endif
1216 if (nl_table[protocol].registered &&
1217 try_module_get(nl_table[protocol].module))
1218 module = nl_table[protocol].module;
Alexey Dobriyan974c37e2010-01-30 10:05:05 +00001219 else
1220 err = -EPROTONOSUPPORT;
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07001221 cb_mutex = nl_table[protocol].cb_mutex;
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00001222 bind = nl_table[protocol].bind;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001223 unbind = nl_table[protocol].unbind;
Patrick McHardy77247bb2005-08-14 19:27:13 -07001224 netlink_unlock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001225
Alexey Dobriyan974c37e2010-01-30 10:05:05 +00001226 if (err < 0)
1227 goto out;
1228
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001229 err = __netlink_create(net, sock, cb_mutex, protocol);
1230 if (err < 0)
Patrick McHardyab33a172005-08-14 19:31:36 -07001231 goto out_module;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232
David S. Miller6f756a82008-11-23 17:34:03 -08001233 local_bh_disable();
Eric Dumazetc1fd3b92008-11-23 15:48:22 -08001234 sock_prot_inuse_add(net, &netlink_proto, 1);
David S. Miller6f756a82008-11-23 17:34:03 -08001235 local_bh_enable();
1236
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001237 nlk = nlk_sk(sock->sk);
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001238 nlk->module = module;
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00001239 nlk->netlink_bind = bind;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001240 nlk->netlink_unbind = unbind;
Patrick McHardyab33a172005-08-14 19:31:36 -07001241out:
1242 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243
Patrick McHardyab33a172005-08-14 19:31:36 -07001244out_module:
1245 module_put(module);
1246 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247}
1248
Thomas Graf21e49022015-01-02 23:00:22 +01001249static void deferred_put_nlk_sk(struct rcu_head *head)
1250{
1251 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
1252
1253 sock_put(&nlk->sk);
1254}
1255
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256static int netlink_release(struct socket *sock)
1257{
1258 struct sock *sk = sock->sk;
1259 struct netlink_sock *nlk;
1260
1261 if (!sk)
1262 return 0;
1263
1264 netlink_remove(sk);
Denis Lunevac57b3a2007-04-18 17:05:58 -07001265 sock_orphan(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 nlk = nlk_sk(sk);
1267
Herbert Xu3f660d62007-05-03 03:17:14 -07001268 /*
1269 * OK. Socket is unlinked, any packets that arrive now
1270 * will be purged.
1271 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
Johannes Bergee1c24422015-01-16 11:37:14 +01001273 /* must not acquire netlink_table_lock in any way again before unbind
1274 * and notifying genetlink is done as otherwise it might deadlock
1275 */
1276 if (nlk->netlink_unbind) {
1277 int i;
1278
1279 for (i = 0; i < nlk->ngroups; i++)
1280 if (test_bit(i, nlk->groups))
1281 nlk->netlink_unbind(sock_net(sk), i + 1);
1282 }
1283 if (sk->sk_protocol == NETLINK_GENERIC &&
1284 atomic_dec_return(&genl_sk_destructing_cnt) == 0)
1285 wake_up(&genl_sk_destructing_waitq);
1286
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 sock->sk = NULL;
1288 wake_up_interruptible_all(&nlk->wait);
1289
1290 skb_queue_purge(&sk->sk_write_queue);
1291
Eric W. Biederman15e47302012-09-07 20:12:54 +00001292 if (nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 struct netlink_notify n = {
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001294 .net = sock_net(sk),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 .protocol = sk->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001296 .portid = nlk->portid,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 };
Alan Sterne041c682006-03-27 01:16:30 -08001298 atomic_notifier_call_chain(&netlink_chain,
1299 NETLINK_URELEASE, &n);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001300 }
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001301
Mariusz Kozlowski5e7c0012007-01-02 15:24:30 -08001302 module_put(nlk->module);
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001303
Denis V. Lunevaed81562007-10-10 21:14:32 -07001304 if (netlink_is_kernel(sk)) {
Johannes Bergb10dcb32014-12-22 18:56:37 +01001305 netlink_table_grab();
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001306 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1307 if (--nl_table[sk->sk_protocol].registered == 0) {
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001308 struct listeners *old;
1309
1310 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1311 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1312 kfree_rcu(old, rcu);
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001313 nl_table[sk->sk_protocol].module = NULL;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001314 nl_table[sk->sk_protocol].bind = NULL;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001315 nl_table[sk->sk_protocol].unbind = NULL;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001316 nl_table[sk->sk_protocol].flags = 0;
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001317 nl_table[sk->sk_protocol].registered = 0;
1318 }
Johannes Bergb10dcb32014-12-22 18:56:37 +01001319 netlink_table_ungrab();
Eric Dumazet658cb352012-04-22 21:30:21 +00001320 }
Patrick McHardy77247bb2005-08-14 19:27:13 -07001321
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001322 kfree(nlk->groups);
1323 nlk->groups = NULL;
1324
Eric Dumazet37558102008-11-24 14:05:22 -08001325 local_bh_disable();
Eric Dumazetc1fd3b92008-11-23 15:48:22 -08001326 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
Eric Dumazet37558102008-11-24 14:05:22 -08001327 local_bh_enable();
Thomas Graf21e49022015-01-02 23:00:22 +01001328 call_rcu(&nlk->rcu, deferred_put_nlk_sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 return 0;
1330}
1331
1332static int netlink_autobind(struct socket *sock)
1333{
1334 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001335 struct net *net = sock_net(sk);
Gao fengda12c902013-06-06 14:49:11 +08001336 struct netlink_table *table = &nl_table[sk->sk_protocol];
Eric W. Biederman15e47302012-09-07 20:12:54 +00001337 s32 portid = task_tgid_vnr(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 int err;
1339 static s32 rover = -4097;
1340
1341retry:
1342 cond_resched();
Thomas Grafe3416942014-08-02 11:47:45 +02001343 rcu_read_lock();
1344 if (__netlink_lookup(table, portid, net)) {
1345 /* Bind collision, search negative portid values. */
1346 portid = rover--;
1347 if (rover > -4097)
1348 rover = -4097;
1349 rcu_read_unlock();
1350 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 }
Thomas Grafe3416942014-08-02 11:47:45 +02001352 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
Herbert Xu8ea65f42015-01-26 14:02:56 +11001354 err = netlink_insert(sk, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 if (err == -EADDRINUSE)
1356 goto retry;
David S. Millerd470e3b2005-06-26 15:31:51 -07001357
1358 /* If 2 threads race to autobind, that is fine. */
1359 if (err == -EBUSY)
1360 err = 0;
1361
1362 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363}
1364
Eric W. Biedermanaa4cf942014-04-23 14:28:03 -07001365/**
1366 * __netlink_ns_capable - General netlink message capability test
1367 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1368 * @user_ns: The user namespace of the capability to use
1369 * @cap: The capability to use
1370 *
1371 * Test to see if the opener of the socket we received the message
1372 * from had when the netlink socket was created and the sender of the
1373 * message has has the capability @cap in the user namespace @user_ns.
1374 */
1375bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1376 struct user_namespace *user_ns, int cap)
1377{
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07001378 return ((nsp->flags & NETLINK_SKB_DST) ||
1379 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1380 ns_capable(user_ns, cap);
Eric W. Biedermanaa4cf942014-04-23 14:28:03 -07001381}
1382EXPORT_SYMBOL(__netlink_ns_capable);
1383
1384/**
1385 * netlink_ns_capable - General netlink message capability test
1386 * @skb: socket buffer holding a netlink command from userspace
1387 * @user_ns: The user namespace of the capability to use
1388 * @cap: The capability to use
1389 *
1390 * Test to see if the opener of the socket we received the message
1391 * from had when the netlink socket was created and the sender of the
1392 * message has has the capability @cap in the user namespace @user_ns.
1393 */
1394bool netlink_ns_capable(const struct sk_buff *skb,
1395 struct user_namespace *user_ns, int cap)
1396{
1397 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
1398}
1399EXPORT_SYMBOL(netlink_ns_capable);
1400
1401/**
1402 * netlink_capable - Netlink global message capability test
1403 * @skb: socket buffer holding a netlink command from userspace
1404 * @cap: The capability to use
1405 *
1406 * Test to see if the opener of the socket we received the message
1407 * from had when the netlink socket was created and the sender of the
1408 * message has has the capability @cap in all user namespaces.
1409 */
1410bool netlink_capable(const struct sk_buff *skb, int cap)
1411{
1412 return netlink_ns_capable(skb, &init_user_ns, cap);
1413}
1414EXPORT_SYMBOL(netlink_capable);
1415
1416/**
1417 * netlink_net_capable - Netlink network namespace message capability test
1418 * @skb: socket buffer holding a netlink command from userspace
1419 * @cap: The capability to use
1420 *
1421 * Test to see if the opener of the socket we received the message
1422 * from had when the netlink socket was created and the sender of the
1423 * message has has the capability @cap over the network namespace of
1424 * the socket we received the message from.
1425 */
1426bool netlink_net_capable(const struct sk_buff *skb, int cap)
1427{
1428 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
1429}
1430EXPORT_SYMBOL(netlink_net_capable);
1431
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001432static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001433{
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001434 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
Eric W. Biedermandf008c92012-11-16 03:03:07 +00001435 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001436}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001438static void
1439netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1440{
1441 struct netlink_sock *nlk = nlk_sk(sk);
1442
1443 if (nlk->subscriptions && !subscriptions)
1444 __sk_del_bind_node(sk);
1445 else if (!nlk->subscriptions && subscriptions)
1446 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1447 nlk->subscriptions = subscriptions;
1448}
1449
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001450static int netlink_realloc_groups(struct sock *sk)
Patrick McHardy513c2502005-09-06 15:43:59 -07001451{
1452 struct netlink_sock *nlk = nlk_sk(sk);
1453 unsigned int groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001454 unsigned long *new_groups;
Patrick McHardy513c2502005-09-06 15:43:59 -07001455 int err = 0;
1456
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001457 netlink_table_grab();
1458
Patrick McHardy513c2502005-09-06 15:43:59 -07001459 groups = nl_table[sk->sk_protocol].groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001460 if (!nl_table[sk->sk_protocol].registered) {
Patrick McHardy513c2502005-09-06 15:43:59 -07001461 err = -ENOENT;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001462 goto out_unlock;
1463 }
Patrick McHardy513c2502005-09-06 15:43:59 -07001464
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001465 if (nlk->ngroups >= groups)
1466 goto out_unlock;
Patrick McHardy513c2502005-09-06 15:43:59 -07001467
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001468 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1469 if (new_groups == NULL) {
1470 err = -ENOMEM;
1471 goto out_unlock;
1472 }
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001473 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001474 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1475
1476 nlk->groups = new_groups;
Patrick McHardy513c2502005-09-06 15:43:59 -07001477 nlk->ngroups = groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001478 out_unlock:
1479 netlink_table_ungrab();
1480 return err;
Patrick McHardy513c2502005-09-06 15:43:59 -07001481}
1482
Johannes Berg02c81ab2014-12-22 18:56:35 +01001483static void netlink_undo_bind(int group, long unsigned int groups,
Johannes Berg023e2cf2014-12-23 21:00:06 +01001484 struct sock *sk)
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001485{
Johannes Berg023e2cf2014-12-23 21:00:06 +01001486 struct netlink_sock *nlk = nlk_sk(sk);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001487 int undo;
1488
1489 if (!nlk->netlink_unbind)
1490 return;
1491
1492 for (undo = 0; undo < group; undo++)
Hiroaki SHIMODA6251edd2014-11-13 04:24:10 +09001493 if (test_bit(undo, &groups))
Pablo Neira8b7c36d2015-01-29 10:51:53 +01001494 nlk->netlink_unbind(sock_net(sk), undo + 1);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001495}
1496
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001497static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1498 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499{
1500 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001501 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 struct netlink_sock *nlk = nlk_sk(sk);
1503 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1504 int err;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001505 long unsigned int groups = nladdr->nl_groups;
Herbert Xud4862362015-09-22 11:38:56 +08001506 bool bound;
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001507
Hannes Frederic Sowa4e4b5372012-12-15 15:42:19 +00001508 if (addr_len < sizeof(struct sockaddr_nl))
1509 return -EINVAL;
1510
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 if (nladdr->nl_family != AF_NETLINK)
1512 return -EINVAL;
1513
1514 /* Only superuser is allowed to listen multicasts */
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001515 if (groups) {
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001516 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
Patrick McHardy513c2502005-09-06 15:43:59 -07001517 return -EPERM;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001518 err = netlink_realloc_groups(sk);
1519 if (err)
1520 return err;
Patrick McHardy513c2502005-09-06 15:43:59 -07001521 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522
Herbert Xud4862362015-09-22 11:38:56 +08001523 bound = nlk->bound;
1524 if (bound) {
1525 /* Ensure nlk->portid is up-to-date. */
1526 smp_rmb();
1527
Eric W. Biederman15e47302012-09-07 20:12:54 +00001528 if (nladdr->nl_pid != nlk->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 return -EINVAL;
Herbert Xud4862362015-09-22 11:38:56 +08001530 }
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001531
1532 if (nlk->netlink_bind && groups) {
1533 int group;
1534
1535 for (group = 0; group < nlk->ngroups; group++) {
1536 if (!test_bit(group, &groups))
1537 continue;
Pablo Neira8b7c36d2015-01-29 10:51:53 +01001538 err = nlk->netlink_bind(net, group + 1);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001539 if (!err)
1540 continue;
Johannes Berg023e2cf2014-12-23 21:00:06 +01001541 netlink_undo_bind(group, groups, sk);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001542 return err;
1543 }
1544 }
1545
Herbert Xud4862362015-09-22 11:38:56 +08001546 /* No need for barriers here as we return to user-space without
1547 * using any of the bound attributes.
1548 */
1549 if (!bound) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 err = nladdr->nl_pid ?
Herbert Xu8ea65f42015-01-26 14:02:56 +11001551 netlink_insert(sk, nladdr->nl_pid) :
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 netlink_autobind(sock);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001553 if (err) {
Johannes Berg023e2cf2014-12-23 21:00:06 +01001554 netlink_undo_bind(nlk->ngroups, groups, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 return err;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001556 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 }
1558
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001559 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 return 0;
1561
1562 netlink_table_grab();
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001563 netlink_update_subscriptions(sk, nlk->subscriptions +
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001564 hweight32(groups) -
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001565 hweight32(nlk->groups[0]));
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001566 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
Patrick McHardy4277a082006-03-20 18:52:01 -08001567 netlink_update_listeners(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 netlink_table_ungrab();
1569
1570 return 0;
1571}
1572
1573static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1574 int alen, int flags)
1575{
1576 int err = 0;
1577 struct sock *sk = sock->sk;
1578 struct netlink_sock *nlk = nlk_sk(sk);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001579 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580
Changli Gao6503d962010-03-31 22:58:26 +00001581 if (alen < sizeof(addr->sa_family))
1582 return -EINVAL;
1583
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 if (addr->sa_family == AF_UNSPEC) {
1585 sk->sk_state = NETLINK_UNCONNECTED;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001586 nlk->dst_portid = 0;
Patrick McHardyd629b832005-08-14 19:27:50 -07001587 nlk->dst_group = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 return 0;
1589 }
1590 if (addr->sa_family != AF_NETLINK)
1591 return -EINVAL;
1592
Mike Pecovnik46833a82014-02-24 21:11:16 +01001593 if ((nladdr->nl_groups || nladdr->nl_pid) &&
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001594 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 return -EPERM;
1596
Herbert Xud4862362015-09-22 11:38:56 +08001597 /* No need for barriers here as we return to user-space without
1598 * using any of the bound attributes.
1599 */
1600 if (!nlk->bound)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 err = netlink_autobind(sock);
1602
1603 if (err == 0) {
1604 sk->sk_state = NETLINK_CONNECTED;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001605 nlk->dst_portid = nladdr->nl_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -07001606 nlk->dst_group = ffs(nladdr->nl_groups);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 }
1608
1609 return err;
1610}
1611
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001612static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1613 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614{
1615 struct sock *sk = sock->sk;
1616 struct netlink_sock *nlk = nlk_sk(sk);
Cyrill Gorcunov13cfa972009-11-08 05:51:19 +00001617 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001618
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 nladdr->nl_family = AF_NETLINK;
1620 nladdr->nl_pad = 0;
1621 *addr_len = sizeof(*nladdr);
1622
1623 if (peer) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001624 nladdr->nl_pid = nlk->dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07001625 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 } else {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001627 nladdr->nl_pid = nlk->portid;
Patrick McHardy513c2502005-09-06 15:43:59 -07001628 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 }
1630 return 0;
1631}
1632
Eric W. Biederman15e47302012-09-07 20:12:54 +00001633static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 struct sock *sock;
1636 struct netlink_sock *nlk;
1637
Eric W. Biederman15e47302012-09-07 20:12:54 +00001638 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 if (!sock)
1640 return ERR_PTR(-ECONNREFUSED);
1641
1642 /* Don't bother queuing skb if kernel socket has no input function */
1643 nlk = nlk_sk(sock);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001644 if (sock->sk_state == NETLINK_CONNECTED &&
Eric W. Biederman15e47302012-09-07 20:12:54 +00001645 nlk->dst_portid != nlk_sk(ssk)->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 sock_put(sock);
1647 return ERR_PTR(-ECONNREFUSED);
1648 }
1649 return sock;
1650}
1651
1652struct sock *netlink_getsockbyfilp(struct file *filp)
1653{
Al Viro496ad9a2013-01-23 17:07:38 -05001654 struct inode *inode = file_inode(filp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 struct sock *sock;
1656
1657 if (!S_ISSOCK(inode->i_mode))
1658 return ERR_PTR(-ENOTSOCK);
1659
1660 sock = SOCKET_I(inode)->sk;
1661 if (sock->sk_family != AF_NETLINK)
1662 return ERR_PTR(-EINVAL);
1663
1664 sock_hold(sock);
1665 return sock;
1666}
1667
Pablo Neira3a365152013-06-28 03:04:23 +02001668static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1669 int broadcast)
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001670{
1671 struct sk_buff *skb;
1672 void *data;
1673
Pablo Neira3a365152013-06-28 03:04:23 +02001674 if (size <= NLMSG_GOODSIZE || broadcast)
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001675 return alloc_skb(size, GFP_KERNEL);
1676
Pablo Neira3a365152013-06-28 03:04:23 +02001677 size = SKB_DATA_ALIGN(size) +
1678 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001679
1680 data = vmalloc(size);
1681 if (data == NULL)
Pablo Neira3a365152013-06-28 03:04:23 +02001682 return NULL;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001683
Eric Dumazet2ea2f622015-04-24 16:05:01 -07001684 skb = __build_skb(data, size);
Pablo Neira3a365152013-06-28 03:04:23 +02001685 if (skb == NULL)
1686 vfree(data);
Eric Dumazet2ea2f622015-04-24 16:05:01 -07001687 else
Pablo Neira3a365152013-06-28 03:04:23 +02001688 skb->destructor = netlink_skb_destructor;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001689
1690 return skb;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001691}
1692
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693/*
1694 * Attach a skb to a netlink socket.
1695 * The caller must hold a reference to the destination socket. On error, the
1696 * reference is dropped. The skb is not send to the destination, just all
1697 * all error checks are performed and memory in the queue is reserved.
1698 * Return values:
1699 * < 0: error. skb freed, reference to sock dropped.
1700 * 0: continue
1701 * 1: repeat lookup - reference dropped while waiting for socket memory.
1702 */
Denis V. Lunev9457afe2008-06-05 11:23:39 -07001703int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001704 long *timeo, struct sock *ssk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705{
1706 struct netlink_sock *nlk;
1707
1708 nlk = nlk_sk(sk);
1709
Patrick McHardy5fd96122013-04-17 06:47:03 +00001710 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1711 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1712 !netlink_skb_is_mmaped(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 DECLARE_WAITQUEUE(wait, current);
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001714 if (!*timeo) {
Denis V. Lunevaed81562007-10-10 21:14:32 -07001715 if (!ssk || netlink_is_kernel(ssk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 netlink_overrun(sk);
1717 sock_put(sk);
1718 kfree_skb(skb);
1719 return -EAGAIN;
1720 }
1721
1722 __set_current_state(TASK_INTERRUPTIBLE);
1723 add_wait_queue(&nlk->wait, &wait);
1724
1725 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
Patrick McHardycd967e02013-04-17 06:46:56 +00001726 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 !sock_flag(sk, SOCK_DEAD))
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001728 *timeo = schedule_timeout(*timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729
1730 __set_current_state(TASK_RUNNING);
1731 remove_wait_queue(&nlk->wait, &wait);
1732 sock_put(sk);
1733
1734 if (signal_pending(current)) {
1735 kfree_skb(skb);
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001736 return sock_intr_errno(*timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 }
1738 return 1;
1739 }
Patrick McHardycf0a0182013-04-17 06:47:00 +00001740 netlink_skb_set_owner_r(skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 return 0;
1742}
1743
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001744static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 int len = skb->len;
1747
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +02001748 netlink_deliver_tap(skb);
1749
Patrick McHardyf9c22882013-04-17 06:47:04 +00001750#ifdef CONFIG_NETLINK_MMAP
1751 if (netlink_skb_is_mmaped(skb))
1752 netlink_queue_mmaped_skb(sk, skb);
1753 else if (netlink_rx_is_mmaped(sk))
1754 netlink_ring_set_copied(sk, skb);
1755 else
1756#endif /* CONFIG_NETLINK_MMAP */
1757 skb_queue_tail(&sk->sk_receive_queue, skb);
David S. Miller676d2362014-04-11 16:15:36 -04001758 sk->sk_data_ready(sk);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001759 return len;
1760}
1761
1762int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1763{
1764 int len = __netlink_sendskb(sk, skb);
1765
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 sock_put(sk);
1767 return len;
1768}
1769
1770void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1771{
1772 kfree_skb(skb);
1773 sock_put(sk);
1774}
1775
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00001776static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777{
1778 int delta;
1779
Patrick McHardy1298ca42013-04-17 06:46:59 +00001780 WARN_ON(skb->sk != NULL);
Patrick McHardy5fd96122013-04-17 06:47:03 +00001781 if (netlink_skb_is_mmaped(skb))
1782 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001784 delta = skb->end - skb->tail;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001785 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 return skb;
1787
1788 if (skb_shared(skb)) {
1789 struct sk_buff *nskb = skb_clone(skb, allocation);
1790 if (!nskb)
1791 return skb;
Eric Dumazet8460c002012-04-19 02:24:28 +00001792 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 skb = nskb;
1794 }
1795
1796 if (!pskb_expand_head(skb, 0, -delta, allocation))
1797 skb->truesize -= delta;
1798
1799 return skb;
1800}
1801
Eric W. Biederman3fbc2902012-05-24 17:21:27 -06001802static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1803 struct sock *ssk)
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001804{
1805 int ret;
1806 struct netlink_sock *nlk = nlk_sk(sk);
1807
1808 ret = -ECONNREFUSED;
1809 if (nlk->netlink_rcv != NULL) {
1810 ret = skb->len;
Patrick McHardycf0a0182013-04-17 06:47:00 +00001811 netlink_skb_set_owner_r(skb, sk);
Patrick McHardye32123e2013-04-17 06:46:57 +00001812 NETLINK_CB(skb).sk = ssk;
Daniel Borkmann73bfd372013-12-23 14:35:55 +01001813 netlink_deliver_tap_kernel(sk, ssk, skb);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001814 nlk->netlink_rcv(skb);
Eric Dumazetbfb253c2012-04-22 21:30:29 +00001815 consume_skb(skb);
1816 } else {
1817 kfree_skb(skb);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001818 }
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001819 sock_put(sk);
1820 return ret;
1821}
1822
1823int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001824 u32 portid, int nonblock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825{
1826 struct sock *sk;
1827 int err;
1828 long timeo;
1829
1830 skb = netlink_trim(skb, gfp_any());
1831
1832 timeo = sock_sndtimeo(ssk, nonblock);
1833retry:
Eric W. Biederman15e47302012-09-07 20:12:54 +00001834 sk = netlink_getsockbyportid(ssk, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 if (IS_ERR(sk)) {
1836 kfree_skb(skb);
1837 return PTR_ERR(sk);
1838 }
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001839 if (netlink_is_kernel(sk))
Eric W. Biederman3fbc2902012-05-24 17:21:27 -06001840 return netlink_unicast_kernel(sk, skb, ssk);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001841
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001842 if (sk_filter(sk, skb)) {
Wang Chen84874602008-07-01 19:55:09 -07001843 err = skb->len;
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001844 kfree_skb(skb);
1845 sock_put(sk);
1846 return err;
1847 }
1848
Denis V. Lunev9457afe2008-06-05 11:23:39 -07001849 err = netlink_attachskb(sk, skb, &timeo, ssk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 if (err == 1)
1851 goto retry;
1852 if (err)
1853 return err;
1854
Denis V. Lunev7ee015e2007-10-10 21:14:03 -07001855 return netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001857EXPORT_SYMBOL(netlink_unicast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858
Patrick McHardyf9c22882013-04-17 06:47:04 +00001859struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1860 u32 dst_portid, gfp_t gfp_mask)
1861{
1862#ifdef CONFIG_NETLINK_MMAP
1863 struct sock *sk = NULL;
1864 struct sk_buff *skb;
1865 struct netlink_ring *ring;
1866 struct nl_mmap_hdr *hdr;
1867 unsigned int maxlen;
1868
1869 sk = netlink_getsockbyportid(ssk, dst_portid);
1870 if (IS_ERR(sk))
1871 goto out;
1872
1873 ring = &nlk_sk(sk)->rx_ring;
1874 /* fast-path without atomic ops for common case: non-mmaped receiver */
1875 if (ring->pg_vec == NULL)
1876 goto out_put;
1877
Thomas Grafaae9f0e2013-11-30 13:21:31 +01001878 if (ring->frame_size - NL_MMAP_HDRLEN < size)
1879 goto out_put;
1880
Patrick McHardyf9c22882013-04-17 06:47:04 +00001881 skb = alloc_skb_head(gfp_mask);
1882 if (skb == NULL)
1883 goto err1;
1884
1885 spin_lock_bh(&sk->sk_receive_queue.lock);
1886 /* check again under lock */
1887 if (ring->pg_vec == NULL)
1888 goto out_free;
1889
Thomas Grafaae9f0e2013-11-30 13:21:31 +01001890 /* check again under lock */
Patrick McHardyf9c22882013-04-17 06:47:04 +00001891 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1892 if (maxlen < size)
1893 goto out_free;
1894
1895 netlink_forward_ring(ring);
1896 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1897 if (hdr == NULL)
1898 goto err2;
1899 netlink_ring_setup_skb(skb, sk, ring, hdr);
1900 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1901 atomic_inc(&ring->pending);
1902 netlink_increment_head(ring);
1903
1904 spin_unlock_bh(&sk->sk_receive_queue.lock);
1905 return skb;
1906
1907err2:
1908 kfree_skb(skb);
1909 spin_unlock_bh(&sk->sk_receive_queue.lock);
Patrick McHardycd1df522013-04-17 06:47:05 +00001910 netlink_overrun(sk);
Patrick McHardyf9c22882013-04-17 06:47:04 +00001911err1:
1912 sock_put(sk);
1913 return NULL;
1914
1915out_free:
1916 kfree_skb(skb);
1917 spin_unlock_bh(&sk->sk_receive_queue.lock);
1918out_put:
1919 sock_put(sk);
1920out:
1921#endif
1922 return alloc_skb(size, gfp_mask);
1923}
1924EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1925
Patrick McHardy4277a082006-03-20 18:52:01 -08001926int netlink_has_listeners(struct sock *sk, unsigned int group)
1927{
1928 int res = 0;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00001929 struct listeners *listeners;
Patrick McHardy4277a082006-03-20 18:52:01 -08001930
Denis V. Lunevaed81562007-10-10 21:14:32 -07001931 BUG_ON(!netlink_is_kernel(sk));
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001932
1933 rcu_read_lock();
1934 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1935
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001936 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
Eric Dumazet5c398dc2010-10-24 04:27:10 +00001937 res = test_bit(group - 1, listeners->masks);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001938
1939 rcu_read_unlock();
1940
Patrick McHardy4277a082006-03-20 18:52:01 -08001941 return res;
1942}
1943EXPORT_SYMBOL_GPL(netlink_has_listeners);
1944
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00001945static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946{
1947 struct netlink_sock *nlk = nlk_sk(sk);
1948
1949 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
Patrick McHardycd967e02013-04-17 06:46:56 +00001950 !test_bit(NETLINK_CONGESTED, &nlk->state)) {
Patrick McHardycf0a0182013-04-17 06:47:00 +00001951 netlink_skb_set_owner_r(skb, sk);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001952 __netlink_sendskb(sk, skb);
stephen hemminger2c6458002011-12-22 08:52:03 +00001953 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954 }
1955 return -1;
1956}
1957
1958struct netlink_broadcast_data {
1959 struct sock *exclude_sk;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001960 struct net *net;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001961 u32 portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 u32 group;
1963 int failure;
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08001964 int delivery_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 int congested;
1966 int delivered;
Al Viro7d877f32005-10-21 03:20:43 -04001967 gfp_t allocation;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 struct sk_buff *skb, *skb2;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001969 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1970 void *tx_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971};
1972
Rami Rosen46c95212014-07-01 21:17:35 +03001973static void do_one_broadcast(struct sock *sk,
1974 struct netlink_broadcast_data *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975{
1976 struct netlink_sock *nlk = nlk_sk(sk);
1977 int val;
1978
1979 if (p->exclude_sk == sk)
Rami Rosen46c95212014-07-01 21:17:35 +03001980 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981
Eric W. Biederman15e47302012-09-07 20:12:54 +00001982 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001983 !test_bit(p->group - 1, nlk->groups))
Rami Rosen46c95212014-07-01 21:17:35 +03001984 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09001986 if (!net_eq(sock_net(sk), p->net))
Rami Rosen46c95212014-07-01 21:17:35 +03001987 return;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001988
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 if (p->failure) {
1990 netlink_overrun(sk);
Rami Rosen46c95212014-07-01 21:17:35 +03001991 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 }
1993
1994 sock_hold(sk);
1995 if (p->skb2 == NULL) {
Tommy S. Christensen68acc022005-05-19 13:06:35 -07001996 if (skb_shared(p->skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 p->skb2 = skb_clone(p->skb, p->allocation);
1998 } else {
Tommy S. Christensen68acc022005-05-19 13:06:35 -07001999 p->skb2 = skb_get(p->skb);
2000 /*
2001 * skb ownership may have been set when
2002 * delivered to a previous socket.
2003 */
2004 skb_orphan(p->skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 }
2006 }
2007 if (p->skb2 == NULL) {
2008 netlink_overrun(sk);
2009 /* Clone failed. Notify ALL listeners. */
2010 p->failure = 1;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002011 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
2012 p->delivery_failure = 1;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002013 } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
2014 kfree_skb(p->skb2);
2015 p->skb2 = NULL;
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07002016 } else if (sk_filter(sk, p->skb2)) {
2017 kfree_skb(p->skb2);
2018 p->skb2 = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
2020 netlink_overrun(sk);
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002021 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
2022 p->delivery_failure = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 } else {
2024 p->congested |= val;
2025 p->delivered = 1;
2026 p->skb2 = NULL;
2027 }
2028 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029}
2030
Eric W. Biederman15e47302012-09-07 20:12:54 +00002031int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002032 u32 group, gfp_t allocation,
2033 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
2034 void *filter_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002036 struct net *net = sock_net(ssk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 struct netlink_broadcast_data info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 struct sock *sk;
2039
2040 skb = netlink_trim(skb, allocation);
2041
2042 info.exclude_sk = ssk;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002043 info.net = net;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002044 info.portid = portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 info.group = group;
2046 info.failure = 0;
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08002047 info.delivery_failure = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 info.congested = 0;
2049 info.delivered = 0;
2050 info.allocation = allocation;
2051 info.skb = skb;
2052 info.skb2 = NULL;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002053 info.tx_filter = filter;
2054 info.tx_data = filter_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055
2056 /* While we sleep in clone, do not allow to change socket list */
2057
2058 netlink_lock_table();
2059
Sasha Levinb67bfe02013-02-27 17:06:00 -08002060 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 do_one_broadcast(sk, &info);
2062
Neil Horman70d4bf62010-07-20 06:45:56 +00002063 consume_skb(skb);
Tommy S. Christensenaa1c6a6f2005-05-19 13:07:32 -07002064
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 netlink_unlock_table();
2066
Neil Horman70d4bf62010-07-20 06:45:56 +00002067 if (info.delivery_failure) {
2068 kfree_skb(info.skb2);
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08002069 return -ENOBUFS;
Eric Dumazet658cb352012-04-22 21:30:21 +00002070 }
2071 consume_skb(info.skb2);
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08002072
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 if (info.delivered) {
2074 if (info.congested && (allocation & __GFP_WAIT))
2075 yield();
2076 return 0;
2077 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 return -ESRCH;
2079}
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002080EXPORT_SYMBOL(netlink_broadcast_filtered);
2081
Eric W. Biederman15e47302012-09-07 20:12:54 +00002082int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002083 u32 group, gfp_t allocation)
2084{
Eric W. Biederman15e47302012-09-07 20:12:54 +00002085 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002086 NULL, NULL);
2087}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002088EXPORT_SYMBOL(netlink_broadcast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089
2090struct netlink_set_err_data {
2091 struct sock *exclude_sk;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002092 u32 portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 u32 group;
2094 int code;
2095};
2096
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00002097static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098{
2099 struct netlink_sock *nlk = nlk_sk(sk);
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002100 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101
2102 if (sk == p->exclude_sk)
2103 goto out;
2104
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08002105 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002106 goto out;
2107
Eric W. Biederman15e47302012-09-07 20:12:54 +00002108 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07002109 !test_bit(p->group - 1, nlk->groups))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 goto out;
2111
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002112 if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
2113 ret = 1;
2114 goto out;
2115 }
2116
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 sk->sk_err = p->code;
2118 sk->sk_error_report(sk);
2119out:
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002120 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121}
2122
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002123/**
2124 * netlink_set_err - report error to broadcast listeners
2125 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
Eric W. Biederman15e47302012-09-07 20:12:54 +00002126 * @portid: the PORTID of a process that we want to skip (if any)
Johannes Berg840e93f22013-11-19 10:35:40 +01002127 * @group: the broadcast group that will notice the error
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002128 * @code: error code, must be negative (as usual in kernelspace)
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002129 *
2130 * This function returns the number of broadcast listeners that have set the
2131 * NETLINK_RECV_NO_ENOBUFS socket option.
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002132 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002133int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134{
2135 struct netlink_set_err_data info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 struct sock *sk;
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002137 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138
2139 info.exclude_sk = ssk;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002140 info.portid = portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141 info.group = group;
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002142 /* sk->sk_err wants a positive error value */
2143 info.code = -code;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144
2145 read_lock(&nl_table_lock);
2146
Sasha Levinb67bfe02013-02-27 17:06:00 -08002147 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002148 ret += do_one_set_err(sk, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149
2150 read_unlock(&nl_table_lock);
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002151 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152}
Pablo Neira Ayusodd5b6ce2009-03-23 13:21:06 +01002153EXPORT_SYMBOL(netlink_set_err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154
Johannes Berg84659eb2007-07-18 15:47:05 -07002155/* must be called with netlink table grabbed */
2156static void netlink_update_socket_mc(struct netlink_sock *nlk,
2157 unsigned int group,
2158 int is_new)
2159{
2160 int old, new = !!is_new, subscriptions;
2161
2162 old = test_bit(group - 1, nlk->groups);
2163 subscriptions = nlk->subscriptions - old + new;
2164 if (new)
2165 __set_bit(group - 1, nlk->groups);
2166 else
2167 __clear_bit(group - 1, nlk->groups);
2168 netlink_update_subscriptions(&nlk->sk, subscriptions);
2169 netlink_update_listeners(&nlk->sk);
2170}
2171
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002172static int netlink_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002173 char __user *optval, unsigned int optlen)
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002174{
2175 struct sock *sk = sock->sk;
2176 struct netlink_sock *nlk = nlk_sk(sk);
Johannes Bergeb496532007-07-18 02:07:51 -07002177 unsigned int val = 0;
2178 int err;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002179
2180 if (level != SOL_NETLINK)
2181 return -ENOPROTOOPT;
2182
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002183 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2184 optlen >= sizeof(int) &&
Johannes Bergeb496532007-07-18 02:07:51 -07002185 get_user(val, (unsigned int __user *)optval))
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002186 return -EFAULT;
2187
2188 switch (optname) {
2189 case NETLINK_PKTINFO:
2190 if (val)
2191 nlk->flags |= NETLINK_RECV_PKTINFO;
2192 else
2193 nlk->flags &= ~NETLINK_RECV_PKTINFO;
2194 err = 0;
2195 break;
2196 case NETLINK_ADD_MEMBERSHIP:
2197 case NETLINK_DROP_MEMBERSHIP: {
Eric W. Biederman5187cd02014-04-23 14:25:48 -07002198 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002199 return -EPERM;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002200 err = netlink_realloc_groups(sk);
2201 if (err)
2202 return err;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002203 if (!val || val - 1 >= nlk->ngroups)
2204 return -EINVAL;
Richard Guy Briggs7774d5e2014-04-22 21:31:55 -04002205 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
Johannes Berg023e2cf2014-12-23 21:00:06 +01002206 err = nlk->netlink_bind(sock_net(sk), val);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04002207 if (err)
2208 return err;
2209 }
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002210 netlink_table_grab();
Johannes Berg84659eb2007-07-18 15:47:05 -07002211 netlink_update_socket_mc(nlk, val,
2212 optname == NETLINK_ADD_MEMBERSHIP);
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002213 netlink_table_ungrab();
Richard Guy Briggs7774d5e2014-04-22 21:31:55 -04002214 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
Johannes Berg023e2cf2014-12-23 21:00:06 +01002215 nlk->netlink_unbind(sock_net(sk), val);
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00002216
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002217 err = 0;
2218 break;
2219 }
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002220 case NETLINK_BROADCAST_ERROR:
2221 if (val)
2222 nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
2223 else
2224 nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
2225 err = 0;
2226 break;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002227 case NETLINK_NO_ENOBUFS:
2228 if (val) {
2229 nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
Patrick McHardycd967e02013-04-17 06:46:56 +00002230 clear_bit(NETLINK_CONGESTED, &nlk->state);
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002231 wake_up_interruptible(&nlk->wait);
Eric Dumazet658cb352012-04-22 21:30:21 +00002232 } else {
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002233 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
Eric Dumazet658cb352012-04-22 21:30:21 +00002234 }
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002235 err = 0;
2236 break;
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002237#ifdef CONFIG_NETLINK_MMAP
2238 case NETLINK_RX_RING:
2239 case NETLINK_TX_RING: {
2240 struct nl_mmap_req req;
2241
2242 /* Rings might consume more memory than queue limits, require
2243 * CAP_NET_ADMIN.
2244 */
2245 if (!capable(CAP_NET_ADMIN))
2246 return -EPERM;
2247 if (optlen < sizeof(req))
2248 return -EINVAL;
2249 if (copy_from_user(&req, optval, sizeof(req)))
2250 return -EFAULT;
Florian Westphalb265c302015-07-21 16:33:50 +02002251 err = netlink_set_ring(sk, &req,
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002252 optname == NETLINK_TX_RING);
2253 break;
2254 }
2255#endif /* CONFIG_NETLINK_MMAP */
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002256 default:
2257 err = -ENOPROTOOPT;
2258 }
2259 return err;
2260}
2261
2262static int netlink_getsockopt(struct socket *sock, int level, int optname,
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002263 char __user *optval, int __user *optlen)
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002264{
2265 struct sock *sk = sock->sk;
2266 struct netlink_sock *nlk = nlk_sk(sk);
2267 int len, val, err;
2268
2269 if (level != SOL_NETLINK)
2270 return -ENOPROTOOPT;
2271
2272 if (get_user(len, optlen))
2273 return -EFAULT;
2274 if (len < 0)
2275 return -EINVAL;
2276
2277 switch (optname) {
2278 case NETLINK_PKTINFO:
2279 if (len < sizeof(int))
2280 return -EINVAL;
2281 len = sizeof(int);
2282 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
Heiko Carstensa27b58f2006-10-30 15:06:12 -08002283 if (put_user(len, optlen) ||
2284 put_user(val, optval))
2285 return -EFAULT;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002286 err = 0;
2287 break;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002288 case NETLINK_BROADCAST_ERROR:
2289 if (len < sizeof(int))
2290 return -EINVAL;
2291 len = sizeof(int);
2292 val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
2293 if (put_user(len, optlen) ||
2294 put_user(val, optval))
2295 return -EFAULT;
2296 err = 0;
2297 break;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002298 case NETLINK_NO_ENOBUFS:
2299 if (len < sizeof(int))
2300 return -EINVAL;
2301 len = sizeof(int);
2302 val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
2303 if (put_user(len, optlen) ||
2304 put_user(val, optval))
2305 return -EFAULT;
2306 err = 0;
2307 break;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002308 default:
2309 err = -ENOPROTOOPT;
2310 }
2311 return err;
2312}
2313
2314static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2315{
2316 struct nl_pktinfo info;
2317
2318 info.group = NETLINK_CB(skb).dst_group;
2319 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2320}
2321
Ying Xue1b784142015-03-02 15:37:48 +08002322static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 struct sock *sk = sock->sk;
2325 struct netlink_sock *nlk = nlk_sk(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +01002326 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002327 u32 dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002328 u32 dst_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 struct sk_buff *skb;
2330 int err;
2331 struct scm_cookie scm;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002332 u32 netlink_skb_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333
2334 if (msg->msg_flags&MSG_OOB)
2335 return -EOPNOTSUPP;
2336
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002337 err = scm_send(sock, msg, &scm, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338 if (err < 0)
2339 return err;
2340
2341 if (msg->msg_namelen) {
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002342 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 if (addr->nl_family != AF_NETLINK)
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002344 goto out;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002345 dst_portid = addr->nl_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002346 dst_group = ffs(addr->nl_groups);
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002347 err = -EPERM;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002348 if ((dst_group || dst_portid) &&
Eric W. Biederman5187cd02014-04-23 14:25:48 -07002349 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002350 goto out;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002351 netlink_skb_flags |= NETLINK_SKB_DST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 } else {
Eric W. Biederman15e47302012-09-07 20:12:54 +00002353 dst_portid = nlk->dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002354 dst_group = nlk->dst_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355 }
2356
Herbert Xud4862362015-09-22 11:38:56 +08002357 if (!nlk->bound) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 err = netlink_autobind(sock);
2359 if (err)
2360 goto out;
Herbert Xud4862362015-09-22 11:38:56 +08002361 } else {
2362 /* Ensure nlk is hashed and visible. */
2363 smp_rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 }
2365
Al Viroa8866ff2014-12-12 23:02:36 -05002366 /* It's a really convoluted way for userland to ask for mmaped
2367 * sendmsg(), but that's what we've got...
2368 */
Patrick McHardy5fd96122013-04-17 06:47:03 +00002369 if (netlink_tx_is_mmaped(sk) &&
Al Viroa8866ff2014-12-12 23:02:36 -05002370 msg->msg_iter.type == ITER_IOVEC &&
2371 msg->msg_iter.nr_segs == 1 &&
Al Viroc0371da2014-11-24 10:42:55 -05002372 msg->msg_iter.iov->iov_base == NULL) {
Patrick McHardy5fd96122013-04-17 06:47:03 +00002373 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002374 &scm);
Patrick McHardy5fd96122013-04-17 06:47:03 +00002375 goto out;
2376 }
2377
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 err = -EMSGSIZE;
2379 if (len > sk->sk_sndbuf - 32)
2380 goto out;
2381 err = -ENOBUFS;
Pablo Neira3a365152013-06-28 03:04:23 +02002382 skb = netlink_alloc_large_skb(len, dst_group);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002383 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 goto out;
2385
Eric W. Biederman15e47302012-09-07 20:12:54 +00002386 NETLINK_CB(skb).portid = nlk->portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002387 NETLINK_CB(skb).dst_group = dst_group;
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002388 NETLINK_CB(skb).creds = scm.creds;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002389 NETLINK_CB(skb).flags = netlink_skb_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 err = -EFAULT;
Al Viro6ce8e9c2014-04-06 21:25:44 -04002392 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 kfree_skb(skb);
2394 goto out;
2395 }
2396
2397 err = security_netlink_send(sk, skb);
2398 if (err) {
2399 kfree_skb(skb);
2400 goto out;
2401 }
2402
Patrick McHardyd629b832005-08-14 19:27:50 -07002403 if (dst_group) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 atomic_inc(&skb->users);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002405 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 }
Eric W. Biederman15e47302012-09-07 20:12:54 +00002407 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408
2409out:
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002410 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411 return err;
2412}
2413
Ying Xue1b784142015-03-02 15:37:48 +08002414static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 int flags)
2416{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 struct scm_cookie scm;
2418 struct sock *sk = sock->sk;
2419 struct netlink_sock *nlk = nlk_sk(sk);
2420 int noblock = flags&MSG_DONTWAIT;
2421 size_t copied;
Johannes Berg68d6ac62010-08-15 21:20:44 +00002422 struct sk_buff *skb, *data_skb;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002423 int err, ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424
2425 if (flags&MSG_OOB)
2426 return -EOPNOTSUPP;
2427
2428 copied = 0;
2429
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002430 skb = skb_recv_datagram(sk, flags, noblock, &err);
2431 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 goto out;
2433
Johannes Berg68d6ac62010-08-15 21:20:44 +00002434 data_skb = skb;
2435
Johannes Berg1dacc762009-07-01 11:26:02 +00002436#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2437 if (unlikely(skb_shinfo(skb)->frag_list)) {
Johannes Berg1dacc762009-07-01 11:26:02 +00002438 /*
Johannes Berg68d6ac62010-08-15 21:20:44 +00002439 * If this skb has a frag_list, then here that means that we
2440 * will have to use the frag_list skb's data for compat tasks
2441 * and the regular skb's data for normal (non-compat) tasks.
Johannes Berg1dacc762009-07-01 11:26:02 +00002442 *
Johannes Berg68d6ac62010-08-15 21:20:44 +00002443 * If we need to send the compat skb, assign it to the
2444 * 'data_skb' variable so that it will be used below for data
2445 * copying. We keep 'skb' for everything else, including
2446 * freeing both later.
Johannes Berg1dacc762009-07-01 11:26:02 +00002447 */
Johannes Berg68d6ac62010-08-15 21:20:44 +00002448 if (flags & MSG_CMSG_COMPAT)
2449 data_skb = skb_shinfo(skb)->frag_list;
Johannes Berg1dacc762009-07-01 11:26:02 +00002450 }
2451#endif
2452
Eric Dumazet9063e212014-03-07 12:02:33 -08002453 /* Record the max length of recvmsg() calls for future allocations */
2454 nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
2455 nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
2456 16384);
2457
Johannes Berg68d6ac62010-08-15 21:20:44 +00002458 copied = data_skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 if (len < copied) {
2460 msg->msg_flags |= MSG_TRUNC;
2461 copied = len;
2462 }
2463
Johannes Berg68d6ac62010-08-15 21:20:44 +00002464 skb_reset_transport_header(data_skb);
David S. Miller51f3d022014-11-05 16:46:40 -05002465 err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466
2467 if (msg->msg_name) {
Steffen Hurrle342dfc32014-01-17 22:53:15 +01002468 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 addr->nl_family = AF_NETLINK;
2470 addr->nl_pad = 0;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002471 addr->nl_pid = NETLINK_CB(skb).portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002472 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473 msg->msg_namelen = sizeof(*addr);
2474 }
2475
Patrick McHardycc9a06c2006-03-12 20:34:27 -08002476 if (nlk->flags & NETLINK_RECV_PKTINFO)
2477 netlink_cmsg_recv_pktinfo(msg, skb);
2478
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002479 memset(&scm, 0, sizeof(scm));
2480 scm.creds = *NETLINK_CREDS(skb);
Patrick McHardy188ccb52007-05-03 03:27:01 -07002481 if (flags & MSG_TRUNC)
Johannes Berg68d6ac62010-08-15 21:20:44 +00002482 copied = data_skb->len;
David S. Millerdaa37662010-08-15 23:21:50 -07002483
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484 skb_free_datagram(sk, skb);
2485
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002486 if (nlk->cb_running &&
2487 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
Andrey Vaginb44d2112011-02-21 02:40:47 +00002488 ret = netlink_dump(sk);
2489 if (ret) {
Ben Pfaffac30ef82014-07-09 10:31:22 -07002490 sk->sk_err = -ret;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002491 sk->sk_error_report(sk);
2492 }
2493 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002495 scm_recv(sock, msg, &scm, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496out:
2497 netlink_rcv_wake(sk);
2498 return err ? : copied;
2499}
2500
David S. Miller676d2362014-04-11 16:15:36 -04002501static void netlink_data_ready(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502{
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002503 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504}
2505
2506/*
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002507 * We export these functions to other modules. They provide a
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508 * complete set of kernel non-blocking support for message
2509 * queueing.
2510 */
2511
2512struct sock *
Pablo Neira Ayuso9f00d972012-09-08 02:53:54 +00002513__netlink_kernel_create(struct net *net, int unit, struct module *module,
2514 struct netlink_kernel_cfg *cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515{
2516 struct socket *sock;
2517 struct sock *sk;
Patrick McHardy77247bb2005-08-14 19:27:13 -07002518 struct netlink_sock *nlk;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002519 struct listeners *listeners = NULL;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002520 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2521 unsigned int groups;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522
Akinobu Mitafab2caf2006-08-29 02:15:24 -07002523 BUG_ON(!nl_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002525 if (unit < 0 || unit >= MAX_LINKS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526 return NULL;
2527
2528 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2529 return NULL;
2530
Pavel Emelyanov23fe1862008-01-30 19:31:06 -08002531 /*
2532 * We have to just have a reference on the net from sk, but don't
2533 * get_net it. Besides, we cannot get and then put the net here.
2534 * So we create one inside init_net and the move it to net.
2535 */
2536
2537 if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
2538 goto out_sock_release_nosk;
2539
2540 sk = sock->sk;
Denis V. Lunevedf02082008-02-29 11:18:32 -08002541 sk_change_net(sk, net);
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002542
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002543 if (!cfg || cfg->groups < 32)
Patrick McHardy4277a082006-03-20 18:52:01 -08002544 groups = 32;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002545 else
2546 groups = cfg->groups;
Patrick McHardy4277a082006-03-20 18:52:01 -08002547
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002548 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
Patrick McHardy4277a082006-03-20 18:52:01 -08002549 if (!listeners)
2550 goto out_sock_release;
2551
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 sk->sk_data_ready = netlink_data_ready;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002553 if (cfg && cfg->input)
2554 nlk_sk(sk)->netlink_rcv = cfg->input;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555
Herbert Xu8ea65f42015-01-26 14:02:56 +11002556 if (netlink_insert(sk, 0))
Patrick McHardy77247bb2005-08-14 19:27:13 -07002557 goto out_sock_release;
2558
2559 nlk = nlk_sk(sk);
2560 nlk->flags |= NETLINK_KERNEL_SOCKET;
2561
2562 netlink_table_grab();
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002563 if (!nl_table[unit].registered) {
2564 nl_table[unit].groups = groups;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002565 rcu_assign_pointer(nl_table[unit].listeners, listeners);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002566 nl_table[unit].cb_mutex = cb_mutex;
2567 nl_table[unit].module = module;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002568 if (cfg) {
2569 nl_table[unit].bind = cfg->bind;
Hiroaki SHIMODA6251edd2014-11-13 04:24:10 +09002570 nl_table[unit].unbind = cfg->unbind;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002571 nl_table[unit].flags = cfg->flags;
Gao fengda12c902013-06-06 14:49:11 +08002572 if (cfg->compare)
2573 nl_table[unit].compare = cfg->compare;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002574 }
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002575 nl_table[unit].registered = 1;
Jesper Juhlf937f1f462007-10-15 01:39:12 -07002576 } else {
2577 kfree(listeners);
Denis V. Lunev869e58f2008-01-18 23:53:31 -08002578 nl_table[unit].registered++;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002579 }
Patrick McHardy77247bb2005-08-14 19:27:13 -07002580 netlink_table_ungrab();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002581 return sk;
2582
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002583out_sock_release:
Patrick McHardy4277a082006-03-20 18:52:01 -08002584 kfree(listeners);
Denis V. Lunev9dfbec12008-02-29 11:17:56 -08002585 netlink_kernel_release(sk);
Pavel Emelyanov23fe1862008-01-30 19:31:06 -08002586 return NULL;
2587
2588out_sock_release_nosk:
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002589 sock_release(sock);
Patrick McHardy77247bb2005-08-14 19:27:13 -07002590 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591}
Pablo Neira Ayuso9f00d972012-09-08 02:53:54 +00002592EXPORT_SYMBOL(__netlink_kernel_create);
Denis V. Lunevb7c6ba62008-01-28 14:41:19 -08002593
2594void
2595netlink_kernel_release(struct sock *sk)
2596{
Denis V. Lunevedf02082008-02-29 11:18:32 -08002597 sk_release_kernel(sk);
Denis V. Lunevb7c6ba62008-01-28 14:41:19 -08002598}
2599EXPORT_SYMBOL(netlink_kernel_release);
2600
Johannes Bergd136f1b2009-09-12 03:03:15 +00002601int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002602{
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002603 struct listeners *new, *old;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002604 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002605
2606 if (groups < 32)
2607 groups = 32;
2608
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002609 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002610 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2611 if (!new)
Johannes Bergd136f1b2009-09-12 03:03:15 +00002612 return -ENOMEM;
Eric Dumazet6d772ac2012-10-18 03:21:55 +00002613 old = nl_deref_protected(tbl->listeners);
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002614 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2615 rcu_assign_pointer(tbl->listeners, new);
2616
Lai Jiangshan37b6b932011-03-15 18:01:42 +08002617 kfree_rcu(old, rcu);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002618 }
2619 tbl->groups = groups;
2620
Johannes Bergd136f1b2009-09-12 03:03:15 +00002621 return 0;
2622}
2623
2624/**
2625 * netlink_change_ngroups - change number of multicast groups
2626 *
2627 * This changes the number of multicast groups that are available
2628 * on a certain netlink family. Note that it is not possible to
2629 * change the number of groups to below 32. Also note that it does
2630 * not implicitly call netlink_clear_multicast_users() when the
2631 * number of groups is reduced.
2632 *
2633 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2634 * @groups: The new number of groups.
2635 */
2636int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2637{
2638 int err;
2639
2640 netlink_table_grab();
2641 err = __netlink_change_ngroups(sk, groups);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002642 netlink_table_ungrab();
Johannes Bergd136f1b2009-09-12 03:03:15 +00002643
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002644 return err;
2645}
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002646
Johannes Bergb8273572009-09-24 15:44:05 -07002647void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2648{
2649 struct sock *sk;
Johannes Bergb8273572009-09-24 15:44:05 -07002650 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2651
Sasha Levinb67bfe02013-02-27 17:06:00 -08002652 sk_for_each_bound(sk, &tbl->mc_list)
Johannes Bergb8273572009-09-24 15:44:05 -07002653 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2654}
2655
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002656struct nlmsghdr *
Eric W. Biederman15e47302012-09-07 20:12:54 +00002657__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002658{
2659 struct nlmsghdr *nlh;
Hong zhi guo573ce262013-03-27 06:47:04 +00002660 int size = nlmsg_msg_size(len);
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002661
Wang Yufen23b45672014-02-17 16:53:32 +08002662 nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002663 nlh->nlmsg_type = type;
2664 nlh->nlmsg_len = size;
2665 nlh->nlmsg_flags = flags;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002666 nlh->nlmsg_pid = portid;
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002667 nlh->nlmsg_seq = seq;
2668 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
Hong zhi guo573ce262013-03-27 06:47:04 +00002669 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002670 return nlh;
2671}
2672EXPORT_SYMBOL(__nlmsg_put);
2673
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674/*
2675 * It looks a bit ugly.
2676 * It would be better to create kernel thread.
2677 */
2678
2679static int netlink_dump(struct sock *sk)
2680{
2681 struct netlink_sock *nlk = nlk_sk(sk);
2682 struct netlink_callback *cb;
Greg Rosec7ac8672011-06-10 01:27:09 +00002683 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684 struct nlmsghdr *nlh;
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002685 int len, err = -ENOBUFS;
Greg Rosec7ac8672011-06-10 01:27:09 +00002686 int alloc_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002688 mutex_lock(nlk->cb_mutex);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002689 if (!nlk->cb_running) {
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002690 err = -EINVAL;
2691 goto errout_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692 }
2693
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002694 cb = &nlk->cb;
Greg Rosec7ac8672011-06-10 01:27:09 +00002695 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2696
Patrick McHardyf9c22882013-04-17 06:47:04 +00002697 if (!netlink_rx_is_mmaped(sk) &&
2698 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2699 goto errout_skb;
Eric Dumazet9063e212014-03-07 12:02:33 -08002700
2701 /* NLMSG_GOODSIZE is small to avoid high order allocations being
2702 * required, but it makes sense to _attempt_ a 16K bytes allocation
2703 * to reduce number of system calls on dump operations, if user
2704 * ever provided a big enough buffer.
2705 */
2706 if (alloc_size < nlk->max_recvmsg_len) {
2707 skb = netlink_alloc_skb(sk,
2708 nlk->max_recvmsg_len,
2709 nlk->portid,
2710 GFP_KERNEL |
2711 __GFP_NOWARN |
2712 __GFP_NORETRY);
2713 /* available room should be exact amount to avoid MSG_TRUNC */
2714 if (skb)
2715 skb_reserve(skb, skb_tailroom(skb) -
2716 nlk->max_recvmsg_len);
2717 }
2718 if (!skb)
2719 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2720 GFP_KERNEL);
Greg Rosec7ac8672011-06-10 01:27:09 +00002721 if (!skb)
Dan Carpenterc63d6ea2011-06-15 03:11:42 +00002722 goto errout_skb;
Patrick McHardyf9c22882013-04-17 06:47:04 +00002723 netlink_skb_set_owner_r(skb, sk);
Greg Rosec7ac8672011-06-10 01:27:09 +00002724
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 len = cb->dump(skb, cb);
2726
2727 if (len > 0) {
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002728 mutex_unlock(nlk->cb_mutex);
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07002729
2730 if (sk_filter(sk, skb))
2731 kfree_skb(skb);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00002732 else
2733 __netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734 return 0;
2735 }
2736
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002737 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2738 if (!nlh)
2739 goto errout_skb;
2740
Johannes Berg670dc282011-06-20 13:40:46 +02002741 nl_dump_check_consistent(cb, nlh);
2742
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002743 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2744
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07002745 if (sk_filter(sk, skb))
2746 kfree_skb(skb);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00002747 else
2748 __netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749
Thomas Grafa8f74b22005-11-10 02:25:52 +01002750 if (cb->done)
2751 cb->done(cb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002753 nlk->cb_running = false;
2754 mutex_unlock(nlk->cb_mutex);
Gao feng6dc878a2012-10-04 20:15:48 +00002755 module_put(cb->module);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002756 consume_skb(cb->skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 return 0;
Thomas Graf17977542005-06-18 22:53:48 -07002758
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002759errout_skb:
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002760 mutex_unlock(nlk->cb_mutex);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002761 kfree_skb(skb);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002762 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763}
2764
Gao feng6dc878a2012-10-04 20:15:48 +00002765int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2766 const struct nlmsghdr *nlh,
2767 struct netlink_dump_control *control)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768{
2769 struct netlink_callback *cb;
2770 struct sock *sk;
2771 struct netlink_sock *nlk;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002772 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773
Patrick McHardyf9c22882013-04-17 06:47:04 +00002774 /* Memory mapped dump requests need to be copied to avoid looping
2775 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2776 * a reference to the skb.
2777 */
2778 if (netlink_skb_is_mmaped(skb)) {
2779 skb = skb_copy(skb, GFP_KERNEL);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002780 if (skb == NULL)
Patrick McHardyf9c22882013-04-17 06:47:04 +00002781 return -ENOBUFS;
Patrick McHardyf9c22882013-04-17 06:47:04 +00002782 } else
2783 atomic_inc(&skb->users);
2784
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002785 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2786 if (sk == NULL) {
2787 ret = -ECONNREFUSED;
2788 goto error_free;
2789 }
2790
2791 nlk = nlk_sk(sk);
2792 mutex_lock(nlk->cb_mutex);
2793 /* A dump is in progress... */
2794 if (nlk->cb_running) {
2795 ret = -EBUSY;
2796 goto error_unlock;
2797 }
2798 /* add reference of module which cb->dump belongs to */
2799 if (!try_module_get(control->module)) {
2800 ret = -EPROTONOSUPPORT;
2801 goto error_unlock;
2802 }
2803
2804 cb = &nlk->cb;
2805 memset(cb, 0, sizeof(*cb));
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +00002806 cb->dump = control->dump;
2807 cb->done = control->done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 cb->nlh = nlh;
Pablo Neira Ayuso7175c882012-02-24 14:30:16 +00002809 cb->data = control->data;
Gao feng6dc878a2012-10-04 20:15:48 +00002810 cb->module = control->module;
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +00002811 cb->min_dump_alloc = control->min_dump_alloc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 cb->skb = skb;
2813
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002814 nlk->cb_running = true;
Gao feng6dc878a2012-10-04 20:15:48 +00002815
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002816 mutex_unlock(nlk->cb_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817
Andrey Vaginb44d2112011-02-21 02:40:47 +00002818 ret = netlink_dump(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 sock_put(sk);
Denis V. Lunev5c582982007-10-23 20:29:25 -07002820
Andrey Vaginb44d2112011-02-21 02:40:47 +00002821 if (ret)
2822 return ret;
2823
Denis V. Lunev5c582982007-10-23 20:29:25 -07002824 /* We successfully started a dump, by returning -EINTR we
2825 * signal not to send ACK even if it was requested.
2826 */
2827 return -EINTR;
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002828
2829error_unlock:
2830 sock_put(sk);
2831 mutex_unlock(nlk->cb_mutex);
2832error_free:
2833 kfree_skb(skb);
2834 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835}
Gao feng6dc878a2012-10-04 20:15:48 +00002836EXPORT_SYMBOL(__netlink_dump_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837
2838void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2839{
2840 struct sk_buff *skb;
2841 struct nlmsghdr *rep;
2842 struct nlmsgerr *errmsg;
Thomas Graf339bf982006-11-10 14:10:15 -08002843 size_t payload = sizeof(*errmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844
Thomas Graf339bf982006-11-10 14:10:15 -08002845 /* error messages get the original request appened */
2846 if (err)
2847 payload += nlmsg_len(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848
Patrick McHardyf9c22882013-04-17 06:47:04 +00002849 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2850 NETLINK_CB(in_skb).portid, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851 if (!skb) {
2852 struct sock *sk;
2853
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002854 sk = netlink_lookup(sock_net(in_skb->sk),
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002855 in_skb->sk->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00002856 NETLINK_CB(in_skb).portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 if (sk) {
2858 sk->sk_err = ENOBUFS;
2859 sk->sk_error_report(sk);
2860 sock_put(sk);
2861 }
2862 return;
2863 }
2864
Eric W. Biederman15e47302012-09-07 20:12:54 +00002865 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
John Fastabend5dba93a2009-09-25 13:11:44 +00002866 NLMSG_ERROR, payload, 0);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002867 errmsg = nlmsg_data(rep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 errmsg->error = err;
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002869 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
Eric W. Biederman15e47302012-09-07 20:12:54 +00002870 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002872EXPORT_SYMBOL(netlink_ack);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002874int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
Thomas Graf1d00a4e2007-03-22 23:30:12 -07002875 struct nlmsghdr *))
Thomas Graf82ace472005-11-10 02:25:53 +01002876{
Thomas Graf82ace472005-11-10 02:25:53 +01002877 struct nlmsghdr *nlh;
2878 int err;
2879
2880 while (skb->len >= nlmsg_total_size(0)) {
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002881 int msglen;
2882
Arnaldo Carvalho de Melob529ccf2007-04-25 19:08:35 -07002883 nlh = nlmsg_hdr(skb);
Thomas Grafd35b6852007-03-22 23:28:46 -07002884 err = 0;
Thomas Graf82ace472005-11-10 02:25:53 +01002885
Martin Murrayad8e4b72006-01-10 13:02:29 -08002886 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
Thomas Graf82ace472005-11-10 02:25:53 +01002887 return 0;
2888
Thomas Grafd35b6852007-03-22 23:28:46 -07002889 /* Only requests are handled by the kernel */
2890 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
Denis V. Lunev5c582982007-10-23 20:29:25 -07002891 goto ack;
Thomas Grafd35b6852007-03-22 23:28:46 -07002892
Thomas Graf45e7ae72007-03-22 23:29:10 -07002893 /* Skip control messages */
2894 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
Denis V. Lunev5c582982007-10-23 20:29:25 -07002895 goto ack;
Thomas Graf45e7ae72007-03-22 23:29:10 -07002896
Thomas Graf1d00a4e2007-03-22 23:30:12 -07002897 err = cb(skb, nlh);
Denis V. Lunev5c582982007-10-23 20:29:25 -07002898 if (err == -EINTR)
2899 goto skip;
2900
2901ack:
Thomas Grafd35b6852007-03-22 23:28:46 -07002902 if (nlh->nlmsg_flags & NLM_F_ACK || err)
Thomas Graf82ace472005-11-10 02:25:53 +01002903 netlink_ack(skb, nlh, err);
Thomas Graf82ace472005-11-10 02:25:53 +01002904
Denis V. Lunev5c582982007-10-23 20:29:25 -07002905skip:
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002906 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002907 if (msglen > skb->len)
2908 msglen = skb->len;
2909 skb_pull(skb, msglen);
Thomas Graf82ace472005-11-10 02:25:53 +01002910 }
2911
2912 return 0;
2913}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002914EXPORT_SYMBOL(netlink_rcv_skb);
Thomas Graf82ace472005-11-10 02:25:53 +01002915
2916/**
Thomas Grafd387f6a2006-08-15 00:31:06 -07002917 * nlmsg_notify - send a notification netlink message
2918 * @sk: netlink socket to use
2919 * @skb: notification message
Eric W. Biederman15e47302012-09-07 20:12:54 +00002920 * @portid: destination netlink portid for reports or 0
Thomas Grafd387f6a2006-08-15 00:31:06 -07002921 * @group: destination multicast group or 0
2922 * @report: 1 to report back, 0 to disable
2923 * @flags: allocation flags
2924 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002925int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
Thomas Grafd387f6a2006-08-15 00:31:06 -07002926 unsigned int group, int report, gfp_t flags)
2927{
2928 int err = 0;
2929
2930 if (group) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00002931 int exclude_portid = 0;
Thomas Grafd387f6a2006-08-15 00:31:06 -07002932
2933 if (report) {
2934 atomic_inc(&skb->users);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002935 exclude_portid = portid;
Thomas Grafd387f6a2006-08-15 00:31:06 -07002936 }
2937
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002938 /* errors reported via destination sk->sk_err, but propagate
2939 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002940 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
Thomas Grafd387f6a2006-08-15 00:31:06 -07002941 }
2942
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002943 if (report) {
2944 int err2;
2945
Eric W. Biederman15e47302012-09-07 20:12:54 +00002946 err2 = nlmsg_unicast(sk, skb, portid);
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002947 if (!err || err == -ESRCH)
2948 err = err2;
2949 }
Thomas Grafd387f6a2006-08-15 00:31:06 -07002950
2951 return err;
2952}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002953EXPORT_SYMBOL(nlmsg_notify);
Thomas Grafd387f6a2006-08-15 00:31:06 -07002954
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955#ifdef CONFIG_PROC_FS
2956struct nl_seq_iter {
Denis V. Luneve372c412007-11-19 22:31:54 -08002957 struct seq_net_private p;
Herbert Xu56d28b12015-02-04 07:33:24 +11002958 struct rhashtable_iter hti;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959 int link;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960};
2961
Herbert Xu56d28b12015-02-04 07:33:24 +11002962static int netlink_walk_start(struct nl_seq_iter *iter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963{
Herbert Xu56d28b12015-02-04 07:33:24 +11002964 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965
Herbert Xu56d28b12015-02-04 07:33:24 +11002966 err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti);
2967 if (err) {
2968 iter->link = MAX_LINKS;
2969 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970 }
Herbert Xu56d28b12015-02-04 07:33:24 +11002971
2972 err = rhashtable_walk_start(&iter->hti);
2973 return err == -EAGAIN ? 0 : err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974}
2975
Herbert Xu56d28b12015-02-04 07:33:24 +11002976static void netlink_walk_stop(struct nl_seq_iter *iter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977{
Herbert Xu56d28b12015-02-04 07:33:24 +11002978 rhashtable_walk_stop(&iter->hti);
2979 rhashtable_walk_exit(&iter->hti);
2980}
2981
2982static void *__netlink_seq_next(struct seq_file *seq)
2983{
2984 struct nl_seq_iter *iter = seq->private;
2985 struct netlink_sock *nlk;
2986
2987 do {
2988 for (;;) {
2989 int err;
2990
2991 nlk = rhashtable_walk_next(&iter->hti);
2992
2993 if (IS_ERR(nlk)) {
2994 if (PTR_ERR(nlk) == -EAGAIN)
2995 continue;
2996
2997 return nlk;
2998 }
2999
3000 if (nlk)
3001 break;
3002
3003 netlink_walk_stop(iter);
3004 if (++iter->link >= MAX_LINKS)
3005 return NULL;
3006
3007 err = netlink_walk_start(iter);
3008 if (err)
3009 return ERR_PTR(err);
3010 }
3011 } while (sock_net(&nlk->sk) != seq_file_net(seq));
3012
3013 return nlk;
3014}
3015
3016static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
3017{
3018 struct nl_seq_iter *iter = seq->private;
3019 void *obj = SEQ_START_TOKEN;
3020 loff_t pos;
3021 int err;
3022
3023 iter->link = 0;
3024
3025 err = netlink_walk_start(iter);
3026 if (err)
3027 return ERR_PTR(err);
3028
3029 for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
3030 obj = __netlink_seq_next(seq);
3031
3032 return obj;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033}
3034
3035static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3036{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037 ++*pos;
Herbert Xu56d28b12015-02-04 07:33:24 +11003038 return __netlink_seq_next(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039}
3040
3041static void netlink_seq_stop(struct seq_file *seq, void *v)
3042{
Herbert Xu56d28b12015-02-04 07:33:24 +11003043 struct nl_seq_iter *iter = seq->private;
3044
3045 if (iter->link >= MAX_LINKS)
3046 return;
3047
3048 netlink_walk_stop(iter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049}
3050
3051
3052static int netlink_seq_show(struct seq_file *seq, void *v)
3053{
Eric Dumazet658cb352012-04-22 21:30:21 +00003054 if (v == SEQ_START_TOKEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055 seq_puts(seq,
3056 "sk Eth Pid Groups "
Masatake YAMATOcf0aa4e2010-02-27 19:45:37 +00003057 "Rmem Wmem Dump Locks Drops Inode\n");
Eric Dumazet658cb352012-04-22 21:30:21 +00003058 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059 struct sock *s = v;
3060 struct netlink_sock *nlk = nlk_sk(s);
3061
Pravin B Shelar16b304f2013-08-15 15:31:06 -07003062 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063 s,
3064 s->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00003065 nlk->portid,
Patrick McHardy513c2502005-09-06 15:43:59 -07003066 nlk->groups ? (u32)nlk->groups[0] : 0,
Eric Dumazet31e6d362009-06-17 19:05:41 -07003067 sk_rmem_alloc_get(s),
3068 sk_wmem_alloc_get(s),
Pravin B Shelar16b304f2013-08-15 15:31:06 -07003069 nlk->cb_running,
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07003070 atomic_read(&s->sk_refcnt),
Masatake YAMATOcf0aa4e2010-02-27 19:45:37 +00003071 atomic_read(&s->sk_drops),
3072 sock_i_ino(s)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073 );
3074
3075 }
3076 return 0;
3077}
3078
Philippe De Muyter56b3d972007-07-10 23:07:31 -07003079static const struct seq_operations netlink_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080 .start = netlink_seq_start,
3081 .next = netlink_seq_next,
3082 .stop = netlink_seq_stop,
3083 .show = netlink_seq_show,
3084};
3085
3086
3087static int netlink_seq_open(struct inode *inode, struct file *file)
3088{
Denis V. Luneve372c412007-11-19 22:31:54 -08003089 return seq_open_net(inode, file, &netlink_seq_ops,
3090 sizeof(struct nl_seq_iter));
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003091}
3092
Arjan van de Venda7071d2007-02-12 00:55:36 -08003093static const struct file_operations netlink_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094 .owner = THIS_MODULE,
3095 .open = netlink_seq_open,
3096 .read = seq_read,
3097 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003098 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099};
3100
3101#endif
3102
3103int netlink_register_notifier(struct notifier_block *nb)
3104{
Alan Sterne041c682006-03-27 01:16:30 -08003105 return atomic_notifier_chain_register(&netlink_chain, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003106}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08003107EXPORT_SYMBOL(netlink_register_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003108
3109int netlink_unregister_notifier(struct notifier_block *nb)
3110{
Alan Sterne041c682006-03-27 01:16:30 -08003111 return atomic_notifier_chain_unregister(&netlink_chain, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003112}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08003113EXPORT_SYMBOL(netlink_unregister_notifier);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09003114
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003115static const struct proto_ops netlink_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003116 .family = PF_NETLINK,
3117 .owner = THIS_MODULE,
3118 .release = netlink_release,
3119 .bind = netlink_bind,
3120 .connect = netlink_connect,
3121 .socketpair = sock_no_socketpair,
3122 .accept = sock_no_accept,
3123 .getname = netlink_getname,
Patrick McHardy9652e932013-04-17 06:47:02 +00003124 .poll = netlink_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125 .ioctl = sock_no_ioctl,
3126 .listen = sock_no_listen,
3127 .shutdown = sock_no_shutdown,
Patrick McHardy9a4595b2005-08-15 12:32:15 -07003128 .setsockopt = netlink_setsockopt,
3129 .getsockopt = netlink_getsockopt,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130 .sendmsg = netlink_sendmsg,
3131 .recvmsg = netlink_recvmsg,
Patrick McHardyccdfcc32013-04-17 06:47:01 +00003132 .mmap = netlink_mmap,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003133 .sendpage = sock_no_sendpage,
3134};
3135
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00003136static const struct net_proto_family netlink_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137 .family = PF_NETLINK,
3138 .create = netlink_create,
3139 .owner = THIS_MODULE, /* for consistency 8) */
3140};
3141
Pavel Emelyanov46650792007-10-08 20:38:39 -07003142static int __net_init netlink_net_init(struct net *net)
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003143{
3144#ifdef CONFIG_PROC_FS
Gao fengd4beaa62013-02-18 01:34:54 +00003145 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003146 return -ENOMEM;
3147#endif
3148 return 0;
3149}
3150
Pavel Emelyanov46650792007-10-08 20:38:39 -07003151static void __net_exit netlink_net_exit(struct net *net)
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003152{
3153#ifdef CONFIG_PROC_FS
Gao fengece31ff2013-02-18 01:34:56 +00003154 remove_proc_entry("netlink", net->proc_net);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003155#endif
3156}
3157
David S. Millerb963ea82010-08-30 19:08:01 -07003158static void __init netlink_add_usersock_entry(void)
3159{
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003160 struct listeners *listeners;
David S. Millerb963ea82010-08-30 19:08:01 -07003161 int groups = 32;
3162
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003163 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
David S. Millerb963ea82010-08-30 19:08:01 -07003164 if (!listeners)
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003165 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
David S. Millerb963ea82010-08-30 19:08:01 -07003166
3167 netlink_table_grab();
3168
3169 nl_table[NETLINK_USERSOCK].groups = groups;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003170 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
David S. Millerb963ea82010-08-30 19:08:01 -07003171 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3172 nl_table[NETLINK_USERSOCK].registered = 1;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00003173 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
David S. Millerb963ea82010-08-30 19:08:01 -07003174
3175 netlink_table_ungrab();
3176}
3177
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003178static struct pernet_operations __net_initdata netlink_net_ops = {
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003179 .init = netlink_net_init,
3180 .exit = netlink_net_exit,
3181};
3182
Patrick McHardy49f7b332015-03-25 13:07:45 +00003183static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
Herbert Xuc428ecd2015-03-20 21:57:01 +11003184{
3185 const struct netlink_sock *nlk = data;
3186 struct netlink_compare_arg arg;
3187
Herbert Xud4862362015-09-22 11:38:56 +08003188 netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
Herbert Xu11b58ba2015-03-24 00:50:22 +11003189 return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
Herbert Xuc428ecd2015-03-20 21:57:01 +11003190}
3191
3192static const struct rhashtable_params netlink_rhashtable_params = {
3193 .head_offset = offsetof(struct netlink_sock, node),
3194 .key_len = netlink_compare_arg_len,
Herbert Xuc428ecd2015-03-20 21:57:01 +11003195 .obj_hashfn = netlink_hash,
3196 .obj_cmpfn = netlink_compare,
Thomas Grafb5e2c152015-03-24 20:42:19 +00003197 .automatic_shrinking = true,
Herbert Xuc428ecd2015-03-20 21:57:01 +11003198};
3199
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200static int __init netlink_proto_init(void)
3201{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 int err = proto_register(&netlink_proto, 0);
3204
3205 if (err != 0)
3206 goto out;
3207
YOSHIFUJI Hideaki / 吉藤英明fab25742013-01-09 07:19:48 +00003208 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07003210 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003211 if (!nl_table)
3212 goto panic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213
Linus Torvalds1da177e2005-04-16 15:20:36 -07003214 for (i = 0; i < MAX_LINKS; i++) {
Herbert Xuc428ecd2015-03-20 21:57:01 +11003215 if (rhashtable_init(&nl_table[i].hash,
3216 &netlink_rhashtable_params) < 0) {
Thomas Grafe3416942014-08-02 11:47:45 +02003217 while (--i > 0)
3218 rhashtable_destroy(&nl_table[i].hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003219 kfree(nl_table);
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003220 goto panic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003222 }
3223
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +02003224 INIT_LIST_HEAD(&netlink_tap_all);
3225
David S. Millerb963ea82010-08-30 19:08:01 -07003226 netlink_add_usersock_entry();
3227
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228 sock_register(&netlink_family_ops);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003229 register_pernet_subsys(&netlink_net_ops);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09003230 /* The netlink device handler may be needed early. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231 rtnetlink_init();
3232out:
3233 return err;
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003234panic:
3235 panic("netlink_init: Cannot allocate nl_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236}
3237
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238core_initcall(netlink_proto_init);