blob: a0e55ffc03c981e052e98be2d20f6b4a7085fba7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <net/dst.h>
102#include <net/pkt_sched.h>
103#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000104#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <linux/highmem.h>
106#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#include <linux/netpoll.h>
109#include <linux/rcupdate.h>
110#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500113#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700114#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700115#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700116#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700117#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700118#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700119#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700120#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700121#include <linux/ipv6.h>
122#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700123#include <linux/jhash.h>
124#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700125#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900126#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900127#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000128#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700129#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000130#include <linux/cpu_rmap.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100131#include <linux/static_key.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700133#include "net-sysfs.h"
134
Herbert Xud565b0a2008-12-15 23:38:52 -0800135/* Instead of increasing this, you should create a hash table. */
136#define MAX_GRO_SKBS 8
137
Herbert Xu5d38a072009-01-04 16:13:40 -0800138/* This should be increased if a protocol with a bigger head is added. */
139#define GRO_MAX_HEAD (MAX_HEADER + 128)
140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000142static DEFINE_SPINLOCK(offload_lock);
Cong Wang900ff8c2013-02-18 19:20:33 +0000143struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
144struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000145static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700148 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 * semaphore.
150 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800151 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 *
153 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700154 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 * actual updates. This allows pure readers to access the list even
156 * while a writer is preparing to update it.
157 *
158 * To put it another way, dev_base_lock is held for writing only to
159 * protect against pure readers; the rtnl semaphore provides the
160 * protection against other writers.
161 *
162 * See, for example usages, register_netdevice() and
163 * unregister_netdevice(), which must be called with the rtnl
164 * semaphore held.
165 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167EXPORT_SYMBOL(dev_base_lock);
168
Eric Dumazet30e6c9f2012-12-20 17:25:08 +0000169seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000170
Thomas Graf4e985ad2011-06-21 03:11:20 +0000171static inline void dev_base_seq_inc(struct net *net)
172{
173 while (++net->dev_base_seq == 0);
174}
175
Eric W. Biederman881d9662007-09-17 11:56:21 -0700176static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177{
Eric Dumazet95c96172012-04-15 05:58:06 +0000178 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
179
stephen hemminger08e98972009-11-10 07:20:34 +0000180 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181}
182
Eric W. Biederman881d9662007-09-17 11:56:21 -0700183static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700185 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186}
187
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000188static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000189{
190#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000191 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000192#endif
193}
194
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000195static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000196{
197#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000198 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000199#endif
200}
201
Eric W. Biedermance286d32007-09-12 13:53:49 +0200202/* Device list insertion */
dingtianhong53759be2013-04-17 22:17:50 +0000203static void list_netdevice(struct net_device *dev)
Eric W. Biedermance286d32007-09-12 13:53:49 +0200204{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900205 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200206
207 ASSERT_RTNL();
208
209 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800210 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000211 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000212 hlist_add_head_rcu(&dev->index_hlist,
213 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200214 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000215
216 dev_base_seq_inc(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200217}
218
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000219/* Device list removal
220 * caller must respect a RCU grace period before freeing/reusing dev
221 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200222static void unlist_netdevice(struct net_device *dev)
223{
224 ASSERT_RTNL();
225
226 /* Unlink dev from the device chain */
227 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800228 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000229 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000230 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200231 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000232
233 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200234}
235
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236/*
237 * Our notifier list
238 */
239
Alan Sternf07d5b92006-05-09 15:23:03 -0700240static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
242/*
243 * Device drivers call our routines to queue packets here. We empty the
244 * queue in the local softnet handler.
245 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700246
Eric Dumazet9958da02010-04-17 04:17:02 +0000247DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700248EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
David S. Millercf508b12008-07-22 14:16:42 -0700250#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700251/*
David S. Millerc773e842008-07-08 23:13:53 -0700252 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700253 * according to dev->type
254 */
255static const unsigned short netdev_lock_type[] =
256 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
257 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
258 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
259 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
260 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
261 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
262 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
263 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
264 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
265 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
266 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
267 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400268 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
269 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
270 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700271
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700272static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700273 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
274 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
275 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
276 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
277 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
278 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
279 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
280 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
281 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
282 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
283 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
284 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
Paul Gortmaker211ed862012-05-10 17:14:35 -0400285 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
286 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
287 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700288
289static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700290static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700291
292static inline unsigned short netdev_lock_pos(unsigned short dev_type)
293{
294 int i;
295
296 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
297 if (netdev_lock_type[i] == dev_type)
298 return i;
299 /* the last key is used by default */
300 return ARRAY_SIZE(netdev_lock_type) - 1;
301}
302
David S. Millercf508b12008-07-22 14:16:42 -0700303static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
304 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700305{
306 int i;
307
308 i = netdev_lock_pos(dev_type);
309 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
310 netdev_lock_name[i]);
311}
David S. Millercf508b12008-07-22 14:16:42 -0700312
313static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
314{
315 int i;
316
317 i = netdev_lock_pos(dev->type);
318 lockdep_set_class_and_name(&dev->addr_list_lock,
319 &netdev_addr_lock_key[i],
320 netdev_lock_name[i]);
321}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700322#else
David S. Millercf508b12008-07-22 14:16:42 -0700323static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
324 unsigned short dev_type)
325{
326}
327static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700328{
329}
330#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
332/*******************************************************************************
333
334 Protocol management and registration routines
335
336*******************************************************************************/
337
338/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 * Add a protocol ID to the list. Now that the input handler is
340 * smarter we can dispense with all the messy stuff that used to be
341 * here.
342 *
343 * BEWARE!!! Protocol handlers, mangling input packets,
344 * MUST BE last in hash buckets and checking protocol handlers
345 * MUST start from promiscuous ptype_all chain in net_bh.
346 * It is true now, do not change it.
347 * Explanation follows: if protocol handler, mangling packet, will
348 * be the first on list, it is not able to sense, that packet
349 * is cloned and should be copied-on-write, so that it will
350 * change it and subsequent readers will get broken packet.
351 * --ANK (980803)
352 */
353
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000354static inline struct list_head *ptype_head(const struct packet_type *pt)
355{
356 if (pt->type == htons(ETH_P_ALL))
357 return &ptype_all;
358 else
359 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
360}
361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362/**
363 * dev_add_pack - add packet handler
364 * @pt: packet type declaration
365 *
366 * Add a protocol handler to the networking stack. The passed &packet_type
367 * is linked into kernel lists and may not be freed until it has been
368 * removed from the kernel lists.
369 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900370 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 * guarantee all CPU's that are in middle of receiving packets
372 * will see the new packet type (until the next received packet).
373 */
374
375void dev_add_pack(struct packet_type *pt)
376{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000377 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000379 spin_lock(&ptype_lock);
380 list_add_rcu(&pt->list, head);
381 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700383EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385/**
386 * __dev_remove_pack - remove packet handler
387 * @pt: packet type declaration
388 *
389 * Remove a protocol handler that was previously added to the kernel
390 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
391 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900392 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 *
394 * The packet type might still be in use by receivers
395 * and must not be freed until after all the CPU's have gone
396 * through a quiescent state.
397 */
398void __dev_remove_pack(struct packet_type *pt)
399{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000400 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 struct packet_type *pt1;
402
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000403 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
405 list_for_each_entry(pt1, head, list) {
406 if (pt == pt1) {
407 list_del_rcu(&pt->list);
408 goto out;
409 }
410 }
411
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000412 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000414 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700416EXPORT_SYMBOL(__dev_remove_pack);
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418/**
419 * dev_remove_pack - remove packet handler
420 * @pt: packet type declaration
421 *
422 * Remove a protocol handler that was previously added to the kernel
423 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
424 * from the kernel lists and can be freed or reused once this function
425 * returns.
426 *
427 * This call sleeps to guarantee that no CPU is looking at the packet
428 * type after return.
429 */
430void dev_remove_pack(struct packet_type *pt)
431{
432 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900433
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 synchronize_net();
435}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700436EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
Vlad Yasevich62532da2012-11-15 08:49:10 +0000438
439/**
440 * dev_add_offload - register offload handlers
441 * @po: protocol offload declaration
442 *
443 * Add protocol offload handlers to the networking stack. The passed
444 * &proto_offload is linked into kernel lists and may not be freed until
445 * it has been removed from the kernel lists.
446 *
447 * This call does not sleep therefore it can not
448 * guarantee all CPU's that are in middle of receiving packets
449 * will see the new offload handlers (until the next received packet).
450 */
451void dev_add_offload(struct packet_offload *po)
452{
453 struct list_head *head = &offload_base;
454
455 spin_lock(&offload_lock);
456 list_add_rcu(&po->list, head);
457 spin_unlock(&offload_lock);
458}
459EXPORT_SYMBOL(dev_add_offload);
460
461/**
462 * __dev_remove_offload - remove offload handler
463 * @po: packet offload declaration
464 *
465 * Remove a protocol offload handler that was previously added to the
466 * kernel offload handlers by dev_add_offload(). The passed &offload_type
467 * is removed from the kernel lists and can be freed or reused once this
468 * function returns.
469 *
470 * The packet type might still be in use by receivers
471 * and must not be freed until after all the CPU's have gone
472 * through a quiescent state.
473 */
474void __dev_remove_offload(struct packet_offload *po)
475{
476 struct list_head *head = &offload_base;
477 struct packet_offload *po1;
478
Eric Dumazetc53aa502012-11-16 08:08:23 +0000479 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000480
481 list_for_each_entry(po1, head, list) {
482 if (po == po1) {
483 list_del_rcu(&po->list);
484 goto out;
485 }
486 }
487
488 pr_warn("dev_remove_offload: %p not found\n", po);
489out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000490 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000491}
492EXPORT_SYMBOL(__dev_remove_offload);
493
494/**
495 * dev_remove_offload - remove packet offload handler
496 * @po: packet offload declaration
497 *
498 * Remove a packet offload handler that was previously added to the kernel
499 * offload handlers by dev_add_offload(). The passed &offload_type is
500 * removed from the kernel lists and can be freed or reused once this
501 * function returns.
502 *
503 * This call sleeps to guarantee that no CPU is looking at the packet
504 * type after return.
505 */
506void dev_remove_offload(struct packet_offload *po)
507{
508 __dev_remove_offload(po);
509
510 synchronize_net();
511}
512EXPORT_SYMBOL(dev_remove_offload);
513
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514/******************************************************************************
515
516 Device Boot-time Settings Routines
517
518*******************************************************************************/
519
520/* Boot time configuration table */
521static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
522
523/**
524 * netdev_boot_setup_add - add new setup entry
525 * @name: name of the device
526 * @map: configured settings for the device
527 *
528 * Adds new setup entry to the dev_boot_setup list. The function
529 * returns 0 on error and 1 on success. This is a generic routine to
530 * all netdevices.
531 */
532static int netdev_boot_setup_add(char *name, struct ifmap *map)
533{
534 struct netdev_boot_setup *s;
535 int i;
536
537 s = dev_boot_setup;
538 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
539 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
540 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700541 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 memcpy(&s[i].map, map, sizeof(s[i].map));
543 break;
544 }
545 }
546
547 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
548}
549
550/**
551 * netdev_boot_setup_check - check boot time settings
552 * @dev: the netdevice
553 *
554 * Check boot time settings for the device.
555 * The found settings are set for the device to be used
556 * later in the device probing.
557 * Returns 0 if no settings found, 1 if they are.
558 */
559int netdev_boot_setup_check(struct net_device *dev)
560{
561 struct netdev_boot_setup *s = dev_boot_setup;
562 int i;
563
564 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
565 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700566 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 dev->irq = s[i].map.irq;
568 dev->base_addr = s[i].map.base_addr;
569 dev->mem_start = s[i].map.mem_start;
570 dev->mem_end = s[i].map.mem_end;
571 return 1;
572 }
573 }
574 return 0;
575}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700576EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
578
579/**
580 * netdev_boot_base - get address from boot time settings
581 * @prefix: prefix for network device
582 * @unit: id for network device
583 *
584 * Check boot time settings for the base address of device.
585 * The found settings are set for the device to be used
586 * later in the device probing.
587 * Returns 0 if no settings found.
588 */
589unsigned long netdev_boot_base(const char *prefix, int unit)
590{
591 const struct netdev_boot_setup *s = dev_boot_setup;
592 char name[IFNAMSIZ];
593 int i;
594
595 sprintf(name, "%s%d", prefix, unit);
596
597 /*
598 * If device already registered then return base of 1
599 * to indicate not to probe for this interface
600 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700601 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 return 1;
603
604 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
605 if (!strcmp(name, s[i].name))
606 return s[i].map.base_addr;
607 return 0;
608}
609
610/*
611 * Saves at boot time configured settings for any netdevice.
612 */
613int __init netdev_boot_setup(char *str)
614{
615 int ints[5];
616 struct ifmap map;
617
618 str = get_options(str, ARRAY_SIZE(ints), ints);
619 if (!str || !*str)
620 return 0;
621
622 /* Save settings */
623 memset(&map, 0, sizeof(map));
624 if (ints[0] > 0)
625 map.irq = ints[1];
626 if (ints[0] > 1)
627 map.base_addr = ints[2];
628 if (ints[0] > 2)
629 map.mem_start = ints[3];
630 if (ints[0] > 3)
631 map.mem_end = ints[4];
632
633 /* Add new entry to the list */
634 return netdev_boot_setup_add(str, &map);
635}
636
637__setup("netdev=", netdev_boot_setup);
638
639/*******************************************************************************
640
641 Device Interface Subroutines
642
643*******************************************************************************/
644
645/**
646 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700647 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 * @name: name to find
649 *
650 * Find an interface by name. Must be called under RTNL semaphore
651 * or @dev_base_lock. If the name is found a pointer to the device
652 * is returned. If the name is not found then %NULL is returned. The
653 * reference counters are not incremented so the caller must be
654 * careful with locks.
655 */
656
Eric W. Biederman881d9662007-09-17 11:56:21 -0700657struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700659 struct net_device *dev;
660 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661
Sasha Levinb67bfe02013-02-27 17:06:00 -0800662 hlist_for_each_entry(dev, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 if (!strncmp(dev->name, name, IFNAMSIZ))
664 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700665
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 return NULL;
667}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700668EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
670/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000671 * dev_get_by_name_rcu - find a device by its name
672 * @net: the applicable net namespace
673 * @name: name to find
674 *
675 * Find an interface by name.
676 * If the name is found a pointer to the device is returned.
677 * If the name is not found then %NULL is returned.
678 * The reference counters are not incremented so the caller must be
679 * careful with locks. The caller must hold RCU lock.
680 */
681
682struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
683{
Eric Dumazet72c95282009-10-30 07:11:27 +0000684 struct net_device *dev;
685 struct hlist_head *head = dev_name_hash(net, name);
686
Sasha Levinb67bfe02013-02-27 17:06:00 -0800687 hlist_for_each_entry_rcu(dev, head, name_hlist)
Eric Dumazet72c95282009-10-30 07:11:27 +0000688 if (!strncmp(dev->name, name, IFNAMSIZ))
689 return dev;
690
691 return NULL;
692}
693EXPORT_SYMBOL(dev_get_by_name_rcu);
694
695/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700697 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 * @name: name to find
699 *
700 * Find an interface by name. This can be called from any
701 * context and does its own locking. The returned handle has
702 * the usage count incremented and the caller must use dev_put() to
703 * release it when it is no longer needed. %NULL is returned if no
704 * matching device is found.
705 */
706
Eric W. Biederman881d9662007-09-17 11:56:21 -0700707struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708{
709 struct net_device *dev;
710
Eric Dumazet72c95282009-10-30 07:11:27 +0000711 rcu_read_lock();
712 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 if (dev)
714 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000715 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 return dev;
717}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700718EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719
720/**
721 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700722 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 * @ifindex: index of device
724 *
725 * Search for an interface by index. Returns %NULL if the device
726 * is not found or a pointer to the device. The device has not
727 * had its reference counter increased so the caller must be careful
728 * about locking. The caller must hold either the RTNL semaphore
729 * or @dev_base_lock.
730 */
731
Eric W. Biederman881d9662007-09-17 11:56:21 -0700732struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700734 struct net_device *dev;
735 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
Sasha Levinb67bfe02013-02-27 17:06:00 -0800737 hlist_for_each_entry(dev, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 if (dev->ifindex == ifindex)
739 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700740
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 return NULL;
742}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700743EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000745/**
746 * dev_get_by_index_rcu - find a device by its ifindex
747 * @net: the applicable net namespace
748 * @ifindex: index of device
749 *
750 * Search for an interface by index. Returns %NULL if the device
751 * is not found or a pointer to the device. The device has not
752 * had its reference counter increased so the caller must be careful
753 * about locking. The caller must hold RCU lock.
754 */
755
756struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
757{
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000758 struct net_device *dev;
759 struct hlist_head *head = dev_index_hash(net, ifindex);
760
Sasha Levinb67bfe02013-02-27 17:06:00 -0800761 hlist_for_each_entry_rcu(dev, head, index_hlist)
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000762 if (dev->ifindex == ifindex)
763 return dev;
764
765 return NULL;
766}
767EXPORT_SYMBOL(dev_get_by_index_rcu);
768
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
770/**
771 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700772 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 * @ifindex: index of device
774 *
775 * Search for an interface by index. Returns NULL if the device
776 * is not found or a pointer to the device. The device returned has
777 * had a reference added and the pointer is safe until the user calls
778 * dev_put to indicate they have finished with it.
779 */
780
Eric W. Biederman881d9662007-09-17 11:56:21 -0700781struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782{
783 struct net_device *dev;
784
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000785 rcu_read_lock();
786 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 if (dev)
788 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000789 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 return dev;
791}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700792EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
794/**
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200795 * netdev_get_name - get a netdevice name, knowing its ifindex.
796 * @net: network namespace
797 * @name: a pointer to the buffer where the name will be stored.
798 * @ifindex: the ifindex of the interface to get the name from.
799 *
800 * The use of raw_seqcount_begin() and cond_resched() before
801 * retrying is required as we want to give the writers a chance
802 * to complete when CONFIG_PREEMPT is not set.
803 */
804int netdev_get_name(struct net *net, char *name, int ifindex)
805{
806 struct net_device *dev;
807 unsigned int seq;
808
809retry:
810 seq = raw_seqcount_begin(&devnet_rename_seq);
811 rcu_read_lock();
812 dev = dev_get_by_index_rcu(net, ifindex);
813 if (!dev) {
814 rcu_read_unlock();
815 return -ENODEV;
816 }
817
818 strcpy(name, dev->name);
819 rcu_read_unlock();
820 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
821 cond_resched();
822 goto retry;
823 }
824
825 return 0;
826}
827
828/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000829 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700830 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 * @type: media type of device
832 * @ha: hardware address
833 *
834 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800835 * is not found or a pointer to the device.
836 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000837 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 * and the caller must therefore be careful about locking
839 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 */
841
Eric Dumazet941666c2010-12-05 01:23:53 +0000842struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
843 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844{
845 struct net_device *dev;
846
Eric Dumazet941666c2010-12-05 01:23:53 +0000847 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 if (dev->type == type &&
849 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700850 return dev;
851
852 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853}
Eric Dumazet941666c2010-12-05 01:23:53 +0000854EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300855
Eric W. Biederman881d9662007-09-17 11:56:21 -0700856struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700857{
858 struct net_device *dev;
859
860 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700861 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700862 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700863 return dev;
864
865 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700866}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700867EXPORT_SYMBOL(__dev_getfirstbyhwtype);
868
Eric W. Biederman881d9662007-09-17 11:56:21 -0700869struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000871 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000873 rcu_read_lock();
874 for_each_netdev_rcu(net, dev)
875 if (dev->type == type) {
876 dev_hold(dev);
877 ret = dev;
878 break;
879 }
880 rcu_read_unlock();
881 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883EXPORT_SYMBOL(dev_getfirstbyhwtype);
884
885/**
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000886 * dev_get_by_flags_rcu - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700887 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 * @if_flags: IFF_* values
889 * @mask: bitmask of bits in if_flags to check
890 *
891 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000892 * is not found or a pointer to the device. Must be called inside
893 * rcu_read_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 */
895
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000896struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700897 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700899 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900
Pavel Emelianov7562f872007-05-03 15:13:45 -0700901 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800902 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -0700904 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 break;
906 }
907 }
Pavel Emelianov7562f872007-05-03 15:13:45 -0700908 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909}
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000910EXPORT_SYMBOL(dev_get_by_flags_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911
912/**
913 * dev_valid_name - check if name is okay for network device
914 * @name: name string
915 *
916 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700917 * to allow sysfs to work. We also disallow any kind of
918 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 */
David S. Miller95f050b2012-03-06 16:12:15 -0500920bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700922 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -0500923 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700924 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -0500925 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700926 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -0500927 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700928
929 while (*name) {
930 if (*name == '/' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -0500931 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700932 name++;
933 }
David S. Miller95f050b2012-03-06 16:12:15 -0500934 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700936EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937
938/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200939 * __dev_alloc_name - allocate a name for a device
940 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200942 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 *
944 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700945 * id. It scans list of devices to build up a free map, then chooses
946 * the first empty slot. The caller must hold the dev_base or rtnl lock
947 * while allocating the name and adding the device in order to avoid
948 * duplicates.
949 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
950 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 */
952
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200953static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954{
955 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 const char *p;
957 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700958 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 struct net_device *d;
960
961 p = strnchr(name, IFNAMSIZ-1, '%');
962 if (p) {
963 /*
964 * Verify the string as this thing may have come from
965 * the user. There must be either one "%d" and no other "%"
966 * characters.
967 */
968 if (p[1] != 'd' || strchr(p + 2, '%'))
969 return -EINVAL;
970
971 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700972 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 if (!inuse)
974 return -ENOMEM;
975
Eric W. Biederman881d9662007-09-17 11:56:21 -0700976 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 if (!sscanf(d->name, name, &i))
978 continue;
979 if (i < 0 || i >= max_netdevices)
980 continue;
981
982 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200983 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 if (!strncmp(buf, d->name, IFNAMSIZ))
985 set_bit(i, inuse);
986 }
987
988 i = find_first_zero_bit(inuse, max_netdevices);
989 free_page((unsigned long) inuse);
990 }
991
Octavian Purdilad9031022009-11-18 02:36:59 +0000992 if (buf != name)
993 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200994 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
997 /* It is possible to run out of possible slots
998 * when the name is long and there isn't enough space left
999 * for the digits, or if all bits are used.
1000 */
1001 return -ENFILE;
1002}
1003
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001004/**
1005 * dev_alloc_name - allocate a name for a device
1006 * @dev: device
1007 * @name: name format string
1008 *
1009 * Passed a format string - eg "lt%d" it will try and find a suitable
1010 * id. It scans list of devices to build up a free map, then chooses
1011 * the first empty slot. The caller must hold the dev_base or rtnl lock
1012 * while allocating the name and adding the device in order to avoid
1013 * duplicates.
1014 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1015 * Returns the number of the unit assigned or a negative errno code.
1016 */
1017
1018int dev_alloc_name(struct net_device *dev, const char *name)
1019{
1020 char buf[IFNAMSIZ];
1021 struct net *net;
1022 int ret;
1023
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001024 BUG_ON(!dev_net(dev));
1025 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001026 ret = __dev_alloc_name(net, name, buf);
1027 if (ret >= 0)
1028 strlcpy(dev->name, buf, IFNAMSIZ);
1029 return ret;
1030}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001031EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001032
Gao feng828de4f2012-09-13 20:58:27 +00001033static int dev_alloc_name_ns(struct net *net,
1034 struct net_device *dev,
1035 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001036{
Gao feng828de4f2012-09-13 20:58:27 +00001037 char buf[IFNAMSIZ];
1038 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001039
Gao feng828de4f2012-09-13 20:58:27 +00001040 ret = __dev_alloc_name(net, name, buf);
1041 if (ret >= 0)
1042 strlcpy(dev->name, buf, IFNAMSIZ);
1043 return ret;
1044}
1045
1046static int dev_get_valid_name(struct net *net,
1047 struct net_device *dev,
1048 const char *name)
1049{
1050 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001051
Octavian Purdilad9031022009-11-18 02:36:59 +00001052 if (!dev_valid_name(name))
1053 return -EINVAL;
1054
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001055 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001056 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001057 else if (__dev_get_by_name(net, name))
1058 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001059 else if (dev->name != name)
1060 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001061
1062 return 0;
1063}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064
1065/**
1066 * dev_change_name - change name of a device
1067 * @dev: device
1068 * @newname: name (or format string) must be at least IFNAMSIZ
1069 *
1070 * Change name of a device, can pass format strings "eth%d".
1071 * for wildcarding.
1072 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001073int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074{
Herbert Xufcc5a032007-07-30 17:03:38 -07001075 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001077 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001078 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079
1080 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001081 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001083 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 if (dev->flags & IFF_UP)
1085 return -EBUSY;
1086
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001087 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001088
1089 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001090 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001091 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001092 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001093
Herbert Xufcc5a032007-07-30 17:03:38 -07001094 memcpy(oldname, dev->name, IFNAMSIZ);
1095
Gao feng828de4f2012-09-13 20:58:27 +00001096 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001097 if (err < 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001098 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001099 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001100 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
Herbert Xufcc5a032007-07-30 17:03:38 -07001102rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001103 ret = device_rename(&dev->dev, dev->name);
1104 if (ret) {
1105 memcpy(dev->name, oldname, IFNAMSIZ);
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001106 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001107 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001108 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001109
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001110 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001111
Herbert Xu7f988ea2007-07-30 16:35:46 -07001112 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001113 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001114 write_unlock_bh(&dev_base_lock);
1115
1116 synchronize_rcu();
1117
1118 write_lock_bh(&dev_base_lock);
1119 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001120 write_unlock_bh(&dev_base_lock);
1121
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001122 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001123 ret = notifier_to_errno(ret);
1124
1125 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001126 /* err >= 0 after dev_alloc_name() or stores the first errno */
1127 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001128 err = ret;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001129 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001130 memcpy(dev->name, oldname, IFNAMSIZ);
1131 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001132 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001133 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001134 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001135 }
1136 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137
1138 return err;
1139}
1140
1141/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001142 * dev_set_alias - change ifalias of a device
1143 * @dev: device
1144 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001145 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001146 *
1147 * Set ifalias for a device,
1148 */
1149int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1150{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001151 char *new_ifalias;
1152
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001153 ASSERT_RTNL();
1154
1155 if (len >= IFALIASZ)
1156 return -EINVAL;
1157
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001158 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001159 kfree(dev->ifalias);
1160 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001161 return 0;
1162 }
1163
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001164 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1165 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001166 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001167 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001168
1169 strlcpy(dev->ifalias, alias, len+1);
1170 return len;
1171}
1172
1173
1174/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001175 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001176 * @dev: device to cause notification
1177 *
1178 * Called to indicate a device has changed features.
1179 */
1180void netdev_features_change(struct net_device *dev)
1181{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001182 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001183}
1184EXPORT_SYMBOL(netdev_features_change);
1185
1186/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 * netdev_state_change - device changes state
1188 * @dev: device to cause notification
1189 *
1190 * Called to indicate a device has changed state. This function calls
1191 * the notifier chains for netdev_chain and sends a NEWLINK message
1192 * to the routing socket.
1193 */
1194void netdev_state_change(struct net_device *dev)
1195{
1196 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001197 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1199 }
1200}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001201EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202
Amerigo Wangee89bab2012-08-09 22:14:56 +00001203/**
1204 * netdev_notify_peers - notify network peers about existence of @dev
1205 * @dev: network device
1206 *
1207 * Generate traffic such that interested network peers are aware of
1208 * @dev, such as by generating a gratuitous ARP. This may be used when
1209 * a device wants to inform the rest of the network about some sort of
1210 * reconfiguration such as a failover event or virtual machine
1211 * migration.
1212 */
1213void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001214{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001215 rtnl_lock();
1216 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1217 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001218}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001219EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001220
Patrick McHardybd380812010-02-26 06:34:53 +00001221static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001223 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001224 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001226 ASSERT_RTNL();
1227
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 if (!netif_device_present(dev))
1229 return -ENODEV;
1230
Neil Hormanca99ca12013-02-05 08:05:43 +00001231 /* Block netpoll from trying to do any rx path servicing.
1232 * If we don't do this there is a chance ndo_poll_controller
1233 * or ndo_poll may be running while we open the device
1234 */
1235 ret = netpoll_rx_disable(dev);
1236 if (ret)
1237 return ret;
1238
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001239 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1240 ret = notifier_to_errno(ret);
1241 if (ret)
1242 return ret;
1243
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001245
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001246 if (ops->ndo_validate_addr)
1247 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001248
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001249 if (!ret && ops->ndo_open)
1250 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251
Neil Hormanca99ca12013-02-05 08:05:43 +00001252 netpoll_rx_enable(dev);
1253
Jeff Garzikbada3392007-10-23 20:19:37 -07001254 if (ret)
1255 clear_bit(__LINK_STATE_START, &dev->state);
1256 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 dev->flags |= IFF_UP;
David S. Millerb4bd07c2009-02-06 22:06:43 -08001258 net_dmaengine_get();
Patrick McHardy4417da62007-06-27 01:28:10 -07001259 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001261 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001263
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 return ret;
1265}
Patrick McHardybd380812010-02-26 06:34:53 +00001266
1267/**
1268 * dev_open - prepare an interface for use.
1269 * @dev: device to open
1270 *
1271 * Takes a device from down to up state. The device's private open
1272 * function is invoked and then the multicast lists are loaded. Finally
1273 * the device is moved into the up state and a %NETDEV_UP message is
1274 * sent to the netdev notifier chain.
1275 *
1276 * Calling this function on an active interface is a nop. On a failure
1277 * a negative errno code is returned.
1278 */
1279int dev_open(struct net_device *dev)
1280{
1281 int ret;
1282
Patrick McHardybd380812010-02-26 06:34:53 +00001283 if (dev->flags & IFF_UP)
1284 return 0;
1285
Patrick McHardybd380812010-02-26 06:34:53 +00001286 ret = __dev_open(dev);
1287 if (ret < 0)
1288 return ret;
1289
Patrick McHardybd380812010-02-26 06:34:53 +00001290 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1291 call_netdevice_notifiers(NETDEV_UP, dev);
1292
1293 return ret;
1294}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001295EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
Octavian Purdila44345722010-12-13 12:44:07 +00001297static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298{
Octavian Purdila44345722010-12-13 12:44:07 +00001299 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001300
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001301 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001302 might_sleep();
1303
Octavian Purdila44345722010-12-13 12:44:07 +00001304 list_for_each_entry(dev, head, unreg_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001305 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306
Octavian Purdila44345722010-12-13 12:44:07 +00001307 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308
Octavian Purdila44345722010-12-13 12:44:07 +00001309 /* Synchronize to scheduled poll. We cannot touch poll list, it
1310 * can be even on different cpu. So just clear netif_running().
1311 *
1312 * dev->stop() will invoke napi_disable() on all of it's
1313 * napi_struct instances on this device.
1314 */
1315 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1316 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317
Octavian Purdila44345722010-12-13 12:44:07 +00001318 dev_deactivate_many(head);
1319
1320 list_for_each_entry(dev, head, unreg_list) {
1321 const struct net_device_ops *ops = dev->netdev_ops;
1322
1323 /*
1324 * Call the device specific close. This cannot fail.
1325 * Only if device is UP
1326 *
1327 * We allow it to be called even after a DETACH hot-plug
1328 * event.
1329 */
1330 if (ops->ndo_stop)
1331 ops->ndo_stop(dev);
1332
Octavian Purdila44345722010-12-13 12:44:07 +00001333 dev->flags &= ~IFF_UP;
Octavian Purdila44345722010-12-13 12:44:07 +00001334 net_dmaengine_put();
1335 }
1336
1337 return 0;
1338}
1339
1340static int __dev_close(struct net_device *dev)
1341{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001342 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001343 LIST_HEAD(single);
1344
Neil Hormanca99ca12013-02-05 08:05:43 +00001345 /* Temporarily disable netpoll until the interface is down */
1346 retval = netpoll_rx_disable(dev);
1347 if (retval)
1348 return retval;
1349
Octavian Purdila44345722010-12-13 12:44:07 +00001350 list_add(&dev->unreg_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001351 retval = __dev_close_many(&single);
1352 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001353
1354 netpoll_rx_enable(dev);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001355 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001356}
1357
Eric Dumazet3fbd8752011-01-19 21:23:22 +00001358static int dev_close_many(struct list_head *head)
Octavian Purdila44345722010-12-13 12:44:07 +00001359{
1360 struct net_device *dev, *tmp;
1361 LIST_HEAD(tmp_list);
1362
1363 list_for_each_entry_safe(dev, tmp, head, unreg_list)
1364 if (!(dev->flags & IFF_UP))
1365 list_move(&dev->unreg_list, &tmp_list);
1366
1367 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001368
Octavian Purdila44345722010-12-13 12:44:07 +00001369 list_for_each_entry(dev, head, unreg_list) {
1370 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1371 call_netdevice_notifiers(NETDEV_DOWN, dev);
1372 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373
Octavian Purdila44345722010-12-13 12:44:07 +00001374 /* rollback_registered_many needs the complete original list */
1375 list_splice(&tmp_list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 return 0;
1377}
Patrick McHardybd380812010-02-26 06:34:53 +00001378
1379/**
1380 * dev_close - shutdown an interface.
1381 * @dev: device to shutdown
1382 *
1383 * This function moves an active device into down state. A
1384 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1385 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1386 * chain.
1387 */
1388int dev_close(struct net_device *dev)
1389{
Neil Hormanca99ca12013-02-05 08:05:43 +00001390 int ret = 0;
Eric Dumazete14a5992011-05-10 12:26:06 -07001391 if (dev->flags & IFF_UP) {
1392 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001393
Neil Hormanca99ca12013-02-05 08:05:43 +00001394 /* Block netpoll rx while the interface is going down */
1395 ret = netpoll_rx_disable(dev);
1396 if (ret)
1397 return ret;
1398
Eric Dumazete14a5992011-05-10 12:26:06 -07001399 list_add(&dev->unreg_list, &single);
1400 dev_close_many(&single);
1401 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001402
1403 netpoll_rx_enable(dev);
Eric Dumazete14a5992011-05-10 12:26:06 -07001404 }
Neil Hormanca99ca12013-02-05 08:05:43 +00001405 return ret;
Patrick McHardybd380812010-02-26 06:34:53 +00001406}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001407EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
1409
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001410/**
1411 * dev_disable_lro - disable Large Receive Offload on a device
1412 * @dev: device
1413 *
1414 * Disable Large Receive Offload (LRO) on a net device. Must be
1415 * called under RTNL. This is needed if received packets may be
1416 * forwarded to another interface.
1417 */
1418void dev_disable_lro(struct net_device *dev)
1419{
Neil Hormanf11970e2011-05-24 08:31:09 +00001420 /*
1421 * If we're trying to disable lro on a vlan device
1422 * use the underlying physical device instead
1423 */
1424 if (is_vlan_dev(dev))
1425 dev = vlan_dev_real_dev(dev);
1426
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001427 dev->wanted_features &= ~NETIF_F_LRO;
1428 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001429
Michał Mirosław22d59692011-04-21 12:42:15 +00001430 if (unlikely(dev->features & NETIF_F_LRO))
1431 netdev_WARN(dev, "failed to disable LRO!\n");
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001432}
1433EXPORT_SYMBOL(dev_disable_lro);
1434
1435
Eric W. Biederman881d9662007-09-17 11:56:21 -07001436static int dev_boot_phase = 1;
1437
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438/**
1439 * register_netdevice_notifier - register a network notifier block
1440 * @nb: notifier
1441 *
1442 * Register a notifier to be called when network device events occur.
1443 * The notifier passed is linked into the kernel structures and must
1444 * not be reused until it has been unregistered. A negative errno code
1445 * is returned on a failure.
1446 *
1447 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001448 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 * view of the network device list.
1450 */
1451
1452int register_netdevice_notifier(struct notifier_block *nb)
1453{
1454 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001455 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001456 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 int err;
1458
1459 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001460 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001461 if (err)
1462 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001463 if (dev_boot_phase)
1464 goto unlock;
1465 for_each_net(net) {
1466 for_each_netdev(net, dev) {
1467 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1468 err = notifier_to_errno(err);
1469 if (err)
1470 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471
Eric W. Biederman881d9662007-09-17 11:56:21 -07001472 if (!(dev->flags & IFF_UP))
1473 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001474
Eric W. Biederman881d9662007-09-17 11:56:21 -07001475 nb->notifier_call(nb, NETDEV_UP, dev);
1476 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001478
1479unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 rtnl_unlock();
1481 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001482
1483rollback:
1484 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001485 for_each_net(net) {
1486 for_each_netdev(net, dev) {
1487 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001488 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001489
Eric W. Biederman881d9662007-09-17 11:56:21 -07001490 if (dev->flags & IFF_UP) {
1491 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1492 nb->notifier_call(nb, NETDEV_DOWN, dev);
1493 }
1494 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001495 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001496 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001497
RongQing.Li8f891482011-11-30 23:43:07 -05001498outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001499 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001500 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001502EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503
1504/**
1505 * unregister_netdevice_notifier - unregister a network notifier block
1506 * @nb: notifier
1507 *
1508 * Unregister a notifier previously registered by
1509 * register_netdevice_notifier(). The notifier is unlinked into the
1510 * kernel structures and may then be reused. A negative errno code
1511 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001512 *
1513 * After unregistering unregister and down device events are synthesized
1514 * for all devices on the device list to the removed notifier to remove
1515 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 */
1517
1518int unregister_netdevice_notifier(struct notifier_block *nb)
1519{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001520 struct net_device *dev;
1521 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001522 int err;
1523
1524 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001525 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001526 if (err)
1527 goto unlock;
1528
1529 for_each_net(net) {
1530 for_each_netdev(net, dev) {
1531 if (dev->flags & IFF_UP) {
1532 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1533 nb->notifier_call(nb, NETDEV_DOWN, dev);
1534 }
1535 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001536 }
1537 }
1538unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001539 rtnl_unlock();
1540 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001542EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543
1544/**
1545 * call_netdevice_notifiers - call all network notifier blocks
1546 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001547 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 *
1549 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001550 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 */
1552
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001553int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554{
Jiri Pirkoab930472010-04-20 01:45:37 -07001555 ASSERT_RTNL();
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001556 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001558EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559
Ingo Molnarc5905af2012-02-24 08:31:31 +01001560static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001561#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01001562/* We are not allowed to call static_key_slow_dec() from irq context
Eric Dumazetb90e5792011-11-28 11:16:50 +00001563 * If net_disable_timestamp() is called from irq context, defer the
Ingo Molnarc5905af2012-02-24 08:31:31 +01001564 * static_key_slow_dec() calls.
Eric Dumazetb90e5792011-11-28 11:16:50 +00001565 */
1566static atomic_t netstamp_needed_deferred;
1567#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568
1569void net_enable_timestamp(void)
1570{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001571#ifdef HAVE_JUMP_LABEL
1572 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1573
1574 if (deferred) {
1575 while (--deferred)
Ingo Molnarc5905af2012-02-24 08:31:31 +01001576 static_key_slow_dec(&netstamp_needed);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001577 return;
1578 }
1579#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001580 static_key_slow_inc(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001582EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583
1584void net_disable_timestamp(void)
1585{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001586#ifdef HAVE_JUMP_LABEL
1587 if (in_interrupt()) {
1588 atomic_inc(&netstamp_needed_deferred);
1589 return;
1590 }
1591#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001592 static_key_slow_dec(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001594EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595
Eric Dumazet3b098e22010-05-15 23:57:10 -07001596static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597{
Eric Dumazet588f0332011-11-15 04:12:55 +00001598 skb->tstamp.tv64 = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001599 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001600 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601}
1602
Eric Dumazet588f0332011-11-15 04:12:55 +00001603#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001604 if (static_key_false(&netstamp_needed)) { \
Eric Dumazet588f0332011-11-15 04:12:55 +00001605 if ((COND) && !(SKB)->tstamp.tv64) \
1606 __net_timestamp(SKB); \
1607 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001608
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001609static inline bool is_skb_forwardable(struct net_device *dev,
1610 struct sk_buff *skb)
1611{
1612 unsigned int len;
1613
1614 if (!(dev->flags & IFF_UP))
1615 return false;
1616
1617 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1618 if (skb->len <= len)
1619 return true;
1620
1621 /* if TSO is enabled, we don't care about the length as the packet
1622 * could be forwarded without being segmented before
1623 */
1624 if (skb_is_gso(skb))
1625 return true;
1626
1627 return false;
1628}
1629
Arnd Bergmann44540962009-11-26 06:07:08 +00001630/**
1631 * dev_forward_skb - loopback an skb to another netif
1632 *
1633 * @dev: destination network device
1634 * @skb: buffer to forward
1635 *
1636 * return values:
1637 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001638 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001639 *
1640 * dev_forward_skb can be used for injecting an skb from the
1641 * start_xmit function of one device into the receive queue
1642 * of another device.
1643 *
1644 * The receiving device may be in another namespace, so
1645 * we have to clear all information in the skb that could
1646 * impact namespace isolation.
1647 */
1648int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1649{
Michael S. Tsirkin48c83012011-08-31 08:03:29 +00001650 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1651 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1652 atomic_long_inc(&dev->rx_dropped);
1653 kfree_skb(skb);
1654 return NET_RX_DROP;
1655 }
1656 }
1657
Arnd Bergmann44540962009-11-26 06:07:08 +00001658 skb_orphan(skb);
1659
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001660 if (unlikely(!is_skb_forwardable(dev, skb))) {
Eric Dumazetcaf586e2010-09-30 21:06:55 +00001661 atomic_long_inc(&dev->rx_dropped);
Eric Dumazet6ec82562010-05-06 00:53:53 -07001662 kfree_skb(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001663 return NET_RX_DROP;
Eric Dumazet6ec82562010-05-06 00:53:53 -07001664 }
Benjamin LaHaise3b9785c2012-03-27 15:55:44 +00001665 skb->skb_iif = 0;
David S. Miller59b99972012-05-10 23:03:34 -04001666 skb->dev = dev;
1667 skb_dst_drop(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001668 skb->tstamp.tv64 = 0;
1669 skb->pkt_type = PACKET_HOST;
1670 skb->protocol = eth_type_trans(skb, dev);
David S. Miller59b99972012-05-10 23:03:34 -04001671 skb->mark = 0;
1672 secpath_reset(skb);
1673 nf_reset(skb);
Patrick McHardy124dff02013-04-05 20:42:05 +02001674 nf_reset_trace(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001675 return netif_rx(skb);
1676}
1677EXPORT_SYMBOL_GPL(dev_forward_skb);
1678
Changli Gao71d9dec2010-12-15 19:57:25 +00001679static inline int deliver_skb(struct sk_buff *skb,
1680 struct packet_type *pt_prev,
1681 struct net_device *orig_dev)
1682{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001683 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1684 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001685 atomic_inc(&skb->users);
1686 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1687}
1688
Eric Leblondc0de08d2012-08-16 22:02:58 +00001689static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1690{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001691 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001692 return false;
1693
1694 if (ptype->id_match)
1695 return ptype->id_match(ptype, skb->sk);
1696 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1697 return true;
1698
1699 return false;
1700}
1701
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702/*
1703 * Support routine. Sends outgoing frames to any network
1704 * taps currently in use.
1705 */
1706
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001707static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708{
1709 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001710 struct sk_buff *skb2 = NULL;
1711 struct packet_type *pt_prev = NULL;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001712
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 rcu_read_lock();
1714 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1715 /* Never send packets back to the socket
1716 * they originated from - MvS (miquels@drinkel.ow.org)
1717 */
1718 if ((ptype->dev == dev || !ptype->dev) &&
Eric Leblondc0de08d2012-08-16 22:02:58 +00001719 (!skb_loop_sk(ptype, skb))) {
Changli Gao71d9dec2010-12-15 19:57:25 +00001720 if (pt_prev) {
1721 deliver_skb(skb2, pt_prev, skb->dev);
1722 pt_prev = ptype;
1723 continue;
1724 }
1725
1726 skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 if (!skb2)
1728 break;
1729
Eric Dumazet70978182010-12-20 21:22:51 +00001730 net_timestamp_set(skb2);
1731
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 /* skb->nh should be correctly
1733 set by sender, so that the second statement is
1734 just protection against buggy protocols.
1735 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001736 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001738 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001739 skb2->network_header > skb2->tail) {
Joe Perchese87cc472012-05-13 21:56:26 +00001740 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1741 ntohs(skb2->protocol),
1742 dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001743 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 }
1745
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001746 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 skb2->pkt_type = PACKET_OUTGOING;
Changli Gao71d9dec2010-12-15 19:57:25 +00001748 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 }
1750 }
Changli Gao71d9dec2010-12-15 19:57:25 +00001751 if (pt_prev)
1752 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 rcu_read_unlock();
1754}
1755
Ben Hutchings2c530402012-07-10 10:55:09 +00001756/**
1757 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001758 * @dev: Network device
1759 * @txq: number of queues available
1760 *
1761 * If real_num_tx_queues is changed the tc mappings may no longer be
1762 * valid. To resolve this verify the tc mapping remains valid and if
1763 * not NULL the mapping. With no priorities mapping to this
1764 * offset/count pair it will no longer be used. In the worst case TC0
1765 * is invalid nothing can be done so disable priority mappings. If is
1766 * expected that drivers will fix this mapping if they can before
1767 * calling netif_set_real_num_tx_queues.
1768 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001769static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001770{
1771 int i;
1772 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1773
1774 /* If TC0 is invalidated disable TC mapping */
1775 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001776 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001777 dev->num_tc = 0;
1778 return;
1779 }
1780
1781 /* Invalidated prio to tc mappings set to TC0 */
1782 for (i = 1; i < TC_BITMASK + 1; i++) {
1783 int q = netdev_get_prio_tc_map(dev, i);
1784
1785 tc = &dev->tc_to_txq[q];
1786 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001787 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1788 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001789 netdev_set_prio_tc_map(dev, i, 0);
1790 }
1791 }
1792}
1793
Alexander Duyck537c00d2013-01-10 08:57:02 +00001794#ifdef CONFIG_XPS
1795static DEFINE_MUTEX(xps_map_mutex);
1796#define xmap_dereference(P) \
1797 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1798
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001799static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1800 int cpu, u16 index)
1801{
1802 struct xps_map *map = NULL;
1803 int pos;
1804
1805 if (dev_maps)
1806 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1807
1808 for (pos = 0; map && pos < map->len; pos++) {
1809 if (map->queues[pos] == index) {
1810 if (map->len > 1) {
1811 map->queues[pos] = map->queues[--map->len];
1812 } else {
1813 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1814 kfree_rcu(map, rcu);
1815 map = NULL;
1816 }
1817 break;
1818 }
1819 }
1820
1821 return map;
1822}
1823
Alexander Duyck024e9672013-01-10 08:57:46 +00001824static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00001825{
1826 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00001827 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001828 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001829
1830 mutex_lock(&xps_map_mutex);
1831 dev_maps = xmap_dereference(dev->xps_maps);
1832
1833 if (!dev_maps)
1834 goto out_no_maps;
1835
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001836 for_each_possible_cpu(cpu) {
Alexander Duyck024e9672013-01-10 08:57:46 +00001837 for (i = index; i < dev->num_tx_queues; i++) {
1838 if (!remove_xps_queue(dev_maps, cpu, i))
1839 break;
1840 }
1841 if (i == dev->num_tx_queues)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001842 active = true;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001843 }
1844
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001845 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00001846 RCU_INIT_POINTER(dev->xps_maps, NULL);
1847 kfree_rcu(dev_maps, rcu);
1848 }
1849
Alexander Duyck024e9672013-01-10 08:57:46 +00001850 for (i = index; i < dev->num_tx_queues; i++)
1851 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1852 NUMA_NO_NODE);
1853
Alexander Duyck537c00d2013-01-10 08:57:02 +00001854out_no_maps:
1855 mutex_unlock(&xps_map_mutex);
1856}
1857
Alexander Duyck01c5f862013-01-10 08:57:35 +00001858static struct xps_map *expand_xps_map(struct xps_map *map,
1859 int cpu, u16 index)
1860{
1861 struct xps_map *new_map;
1862 int alloc_len = XPS_MIN_MAP_ALLOC;
1863 int i, pos;
1864
1865 for (pos = 0; map && pos < map->len; pos++) {
1866 if (map->queues[pos] != index)
1867 continue;
1868 return map;
1869 }
1870
1871 /* Need to add queue to this CPU's existing map */
1872 if (map) {
1873 if (pos < map->alloc_len)
1874 return map;
1875
1876 alloc_len = map->alloc_len * 2;
1877 }
1878
1879 /* Need to allocate new map to store queue on this CPU's map */
1880 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1881 cpu_to_node(cpu));
1882 if (!new_map)
1883 return NULL;
1884
1885 for (i = 0; i < pos; i++)
1886 new_map->queues[i] = map->queues[i];
1887 new_map->alloc_len = alloc_len;
1888 new_map->len = pos;
1889
1890 return new_map;
1891}
1892
Alexander Duyck537c00d2013-01-10 08:57:02 +00001893int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
1894{
Alexander Duyck01c5f862013-01-10 08:57:35 +00001895 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001896 struct xps_map *map, *new_map;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001897 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001898 int cpu, numa_node_id = -2;
1899 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001900
1901 mutex_lock(&xps_map_mutex);
1902
1903 dev_maps = xmap_dereference(dev->xps_maps);
1904
Alexander Duyck01c5f862013-01-10 08:57:35 +00001905 /* allocate memory for queue storage */
1906 for_each_online_cpu(cpu) {
1907 if (!cpumask_test_cpu(cpu, mask))
1908 continue;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001909
Alexander Duyck01c5f862013-01-10 08:57:35 +00001910 if (!new_dev_maps)
1911 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00001912 if (!new_dev_maps) {
1913 mutex_unlock(&xps_map_mutex);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001914 return -ENOMEM;
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00001915 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00001916
1917 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1918 NULL;
1919
1920 map = expand_xps_map(map, cpu, index);
1921 if (!map)
1922 goto error;
1923
1924 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1925 }
1926
1927 if (!new_dev_maps)
1928 goto out_no_new_maps;
1929
1930 for_each_possible_cpu(cpu) {
1931 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1932 /* add queue to CPU maps */
1933 int pos = 0;
1934
1935 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1936 while ((pos < map->len) && (map->queues[pos] != index))
1937 pos++;
1938
1939 if (pos == map->len)
1940 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001941#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00001942 if (numa_node_id == -2)
1943 numa_node_id = cpu_to_node(cpu);
1944 else if (numa_node_id != cpu_to_node(cpu))
1945 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001946#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00001947 } else if (dev_maps) {
1948 /* fill in the new device map from the old device map */
1949 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1950 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00001951 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00001952
Alexander Duyck537c00d2013-01-10 08:57:02 +00001953 }
1954
Alexander Duyck01c5f862013-01-10 08:57:35 +00001955 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1956
Alexander Duyck537c00d2013-01-10 08:57:02 +00001957 /* Cleanup old maps */
Alexander Duyck01c5f862013-01-10 08:57:35 +00001958 if (dev_maps) {
1959 for_each_possible_cpu(cpu) {
1960 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1961 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1962 if (map && map != new_map)
1963 kfree_rcu(map, rcu);
1964 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00001965
Alexander Duyck537c00d2013-01-10 08:57:02 +00001966 kfree_rcu(dev_maps, rcu);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001967 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00001968
Alexander Duyck01c5f862013-01-10 08:57:35 +00001969 dev_maps = new_dev_maps;
1970 active = true;
1971
1972out_no_new_maps:
1973 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00001974 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
1975 (numa_node_id >= 0) ? numa_node_id :
1976 NUMA_NO_NODE);
1977
Alexander Duyck01c5f862013-01-10 08:57:35 +00001978 if (!dev_maps)
1979 goto out_no_maps;
1980
1981 /* removes queue from unused CPUs */
1982 for_each_possible_cpu(cpu) {
1983 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
1984 continue;
1985
1986 if (remove_xps_queue(dev_maps, cpu, index))
1987 active = true;
1988 }
1989
1990 /* free map if not active */
1991 if (!active) {
1992 RCU_INIT_POINTER(dev->xps_maps, NULL);
1993 kfree_rcu(dev_maps, rcu);
1994 }
1995
1996out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00001997 mutex_unlock(&xps_map_mutex);
1998
1999 return 0;
2000error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002001 /* remove any maps that we added */
2002 for_each_possible_cpu(cpu) {
2003 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2004 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2005 NULL;
2006 if (new_map && new_map != map)
2007 kfree(new_map);
2008 }
2009
Alexander Duyck537c00d2013-01-10 08:57:02 +00002010 mutex_unlock(&xps_map_mutex);
2011
Alexander Duyck537c00d2013-01-10 08:57:02 +00002012 kfree(new_dev_maps);
2013 return -ENOMEM;
2014}
2015EXPORT_SYMBOL(netif_set_xps_queue);
2016
2017#endif
John Fastabendf0796d52010-07-01 13:21:57 +00002018/*
2019 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2020 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2021 */
Tom Herberte6484932010-10-18 18:04:39 +00002022int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002023{
Tom Herbert1d24eb42010-11-21 13:17:27 +00002024 int rc;
2025
Tom Herberte6484932010-10-18 18:04:39 +00002026 if (txq < 1 || txq > dev->num_tx_queues)
2027 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002028
Ben Hutchings5c565802011-02-15 19:39:21 +00002029 if (dev->reg_state == NETREG_REGISTERED ||
2030 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002031 ASSERT_RTNL();
2032
Tom Herbert1d24eb42010-11-21 13:17:27 +00002033 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2034 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002035 if (rc)
2036 return rc;
2037
John Fastabend4f57c082011-01-17 08:06:04 +00002038 if (dev->num_tc)
2039 netif_setup_tc(dev, txq);
2040
Alexander Duyck024e9672013-01-10 08:57:46 +00002041 if (txq < dev->real_num_tx_queues) {
Tom Herberte6484932010-10-18 18:04:39 +00002042 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002043#ifdef CONFIG_XPS
2044 netif_reset_xps_queues_gt(dev, txq);
2045#endif
2046 }
John Fastabendf0796d52010-07-01 13:21:57 +00002047 }
Tom Herberte6484932010-10-18 18:04:39 +00002048
2049 dev->real_num_tx_queues = txq;
2050 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002051}
2052EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002053
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002054#ifdef CONFIG_RPS
2055/**
2056 * netif_set_real_num_rx_queues - set actual number of RX queues used
2057 * @dev: Network device
2058 * @rxq: Actual number of RX queues
2059 *
2060 * This must be called either with the rtnl_lock held or before
2061 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002062 * negative error code. If called before registration, it always
2063 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002064 */
2065int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2066{
2067 int rc;
2068
Tom Herbertbd25fa72010-10-18 18:00:16 +00002069 if (rxq < 1 || rxq > dev->num_rx_queues)
2070 return -EINVAL;
2071
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002072 if (dev->reg_state == NETREG_REGISTERED) {
2073 ASSERT_RTNL();
2074
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002075 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2076 rxq);
2077 if (rc)
2078 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002079 }
2080
2081 dev->real_num_rx_queues = rxq;
2082 return 0;
2083}
2084EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2085#endif
2086
Ben Hutchings2c530402012-07-10 10:55:09 +00002087/**
2088 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002089 *
2090 * This routine should set an upper limit on the number of RSS queues
2091 * used by default by multiqueue devices.
2092 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002093int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002094{
2095 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2096}
2097EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2098
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002099static inline void __netif_reschedule(struct Qdisc *q)
2100{
2101 struct softnet_data *sd;
2102 unsigned long flags;
2103
2104 local_irq_save(flags);
2105 sd = &__get_cpu_var(softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002106 q->next_sched = NULL;
2107 *sd->output_queue_tailp = q;
2108 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002109 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2110 local_irq_restore(flags);
2111}
2112
David S. Miller37437bb2008-07-16 02:15:04 -07002113void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002114{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002115 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2116 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002117}
2118EXPORT_SYMBOL(__netif_schedule);
2119
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002120void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002121{
David S. Miller3578b0c2010-08-03 00:24:04 -07002122 if (atomic_dec_and_test(&skb->users)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002123 struct softnet_data *sd;
2124 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08002125
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002126 local_irq_save(flags);
2127 sd = &__get_cpu_var(softnet_data);
2128 skb->next = sd->completion_queue;
2129 sd->completion_queue = skb;
2130 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2131 local_irq_restore(flags);
2132 }
Denis Vlasenko56079432006-03-29 15:57:29 -08002133}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002134EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08002135
2136void dev_kfree_skb_any(struct sk_buff *skb)
2137{
2138 if (in_irq() || irqs_disabled())
2139 dev_kfree_skb_irq(skb);
2140 else
2141 dev_kfree_skb(skb);
2142}
2143EXPORT_SYMBOL(dev_kfree_skb_any);
2144
2145
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002146/**
2147 * netif_device_detach - mark device as removed
2148 * @dev: network device
2149 *
2150 * Mark device as removed from system and therefore no longer available.
2151 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002152void netif_device_detach(struct net_device *dev)
2153{
2154 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2155 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002156 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002157 }
2158}
2159EXPORT_SYMBOL(netif_device_detach);
2160
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002161/**
2162 * netif_device_attach - mark device as attached
2163 * @dev: network device
2164 *
2165 * Mark device as attached from system and restart if needed.
2166 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002167void netif_device_attach(struct net_device *dev)
2168{
2169 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2170 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002171 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002172 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002173 }
2174}
2175EXPORT_SYMBOL(netif_device_attach);
2176
Ben Hutchings36c92472012-01-17 07:57:56 +00002177static void skb_warn_bad_offload(const struct sk_buff *skb)
2178{
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002179 static const netdev_features_t null_features = 0;
Ben Hutchings36c92472012-01-17 07:57:56 +00002180 struct net_device *dev = skb->dev;
2181 const char *driver = "";
2182
Ben Greearc846ad92013-04-19 10:45:52 +00002183 if (!net_ratelimit())
2184 return;
2185
Ben Hutchings36c92472012-01-17 07:57:56 +00002186 if (dev && dev->dev.parent)
2187 driver = dev_driver_string(dev->dev.parent);
2188
2189 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2190 "gso_type=%d ip_summed=%d\n",
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002191 driver, dev ? &dev->features : &null_features,
2192 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002193 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2194 skb_shinfo(skb)->gso_type, skb->ip_summed);
2195}
2196
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197/*
2198 * Invalidate hardware checksum when packet is to be mangled, and
2199 * complete checksum manually on outgoing path.
2200 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002201int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202{
Al Virod3bc23e2006-11-14 21:24:49 -08002203 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002204 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
Patrick McHardy84fa7932006-08-29 16:44:56 -07002206 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002207 goto out_set_summed;
2208
2209 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002210 skb_warn_bad_offload(skb);
2211 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 }
2213
Eric Dumazetcef401d2013-01-25 20:34:37 +00002214 /* Before computing a checksum, we should make sure no frag could
2215 * be modified by an external entity : checksum could be wrong.
2216 */
2217 if (skb_has_shared_frag(skb)) {
2218 ret = __skb_linearize(skb);
2219 if (ret)
2220 goto out;
2221 }
2222
Michał Mirosław55508d62010-12-14 15:24:08 +00002223 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002224 BUG_ON(offset >= skb_headlen(skb));
2225 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2226
2227 offset += skb->csum_offset;
2228 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2229
2230 if (skb_cloned(skb) &&
2231 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2233 if (ret)
2234 goto out;
2235 }
2236
Herbert Xua0308472007-10-15 01:47:15 -07002237 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07002238out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002240out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 return ret;
2242}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002243EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002245__be16 skb_network_protocol(struct sk_buff *skb)
2246{
2247 __be16 type = skb->protocol;
David S. Miller61816592013-03-20 12:46:26 -04002248 int vlan_depth = ETH_HLEN;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002249
Pravin B Shelar19acc322013-05-07 20:41:07 +00002250 /* Tunnel gso handlers can set protocol to ethernet. */
2251 if (type == htons(ETH_P_TEB)) {
2252 struct ethhdr *eth;
2253
2254 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2255 return 0;
2256
2257 eth = (struct ethhdr *)skb_mac_header(skb);
2258 type = eth->h_proto;
2259 }
2260
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002261 while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002262 struct vlan_hdr *vh;
2263
2264 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
2265 return 0;
2266
2267 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2268 type = vh->h_vlan_encapsulated_proto;
2269 vlan_depth += VLAN_HLEN;
2270 }
2271
2272 return type;
2273}
2274
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002275/**
2276 * skb_mac_gso_segment - mac layer segmentation handler.
2277 * @skb: buffer to segment
2278 * @features: features for the output path (see dev->features)
2279 */
2280struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2281 netdev_features_t features)
2282{
2283 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2284 struct packet_offload *ptype;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002285 __be16 type = skb_network_protocol(skb);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002286
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002287 if (unlikely(!type))
2288 return ERR_PTR(-EINVAL);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002289
2290 __skb_pull(skb, skb->mac_len);
2291
2292 rcu_read_lock();
2293 list_for_each_entry_rcu(ptype, &offload_base, list) {
2294 if (ptype->type == type && ptype->callbacks.gso_segment) {
2295 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2296 int err;
2297
2298 err = ptype->callbacks.gso_send_check(skb);
2299 segs = ERR_PTR(err);
2300 if (err || skb_gso_ok(skb, features))
2301 break;
2302 __skb_push(skb, (skb->data -
2303 skb_network_header(skb)));
2304 }
2305 segs = ptype->callbacks.gso_segment(skb, features);
2306 break;
2307 }
2308 }
2309 rcu_read_unlock();
2310
2311 __skb_push(skb, skb->data - skb_mac_header(skb));
2312
2313 return segs;
2314}
2315EXPORT_SYMBOL(skb_mac_gso_segment);
2316
2317
Cong Wang12b00042013-02-05 16:36:38 +00002318/* openvswitch calls this on rx path, so we need a different check.
2319 */
2320static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2321{
2322 if (tx_path)
2323 return skb->ip_summed != CHECKSUM_PARTIAL;
2324 else
2325 return skb->ip_summed == CHECKSUM_NONE;
2326}
2327
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002328/**
Cong Wang12b00042013-02-05 16:36:38 +00002329 * __skb_gso_segment - Perform segmentation on skb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002330 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002331 * @features: features for the output path (see dev->features)
Cong Wang12b00042013-02-05 16:36:38 +00002332 * @tx_path: whether it is called in TX path
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002333 *
2334 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002335 *
2336 * It may return NULL if the skb requires no segmentation. This is
2337 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002338 */
Cong Wang12b00042013-02-05 16:36:38 +00002339struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2340 netdev_features_t features, bool tx_path)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002341{
Cong Wang12b00042013-02-05 16:36:38 +00002342 if (unlikely(skb_needs_check(skb, tx_path))) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002343 int err;
2344
Ben Hutchings36c92472012-01-17 07:57:56 +00002345 skb_warn_bad_offload(skb);
Herbert Xu67fd1a72009-01-19 16:26:44 -08002346
Herbert Xua430a432006-07-08 13:34:56 -07002347 if (skb_header_cloned(skb) &&
2348 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2349 return ERR_PTR(err);
2350 }
2351
Pravin B Shelar68c33162013-02-14 14:02:41 +00002352 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002353 skb_reset_mac_header(skb);
2354 skb_reset_mac_len(skb);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002355
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002356 return skb_mac_gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002357}
Cong Wang12b00042013-02-05 16:36:38 +00002358EXPORT_SYMBOL(__skb_gso_segment);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002359
Herbert Xufb286bb2005-11-10 13:01:24 -08002360/* Take action when hardware reception checksum errors are detected. */
2361#ifdef CONFIG_BUG
2362void netdev_rx_csum_fault(struct net_device *dev)
2363{
2364 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002365 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002366 dump_stack();
2367 }
2368}
2369EXPORT_SYMBOL(netdev_rx_csum_fault);
2370#endif
2371
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372/* Actually, we should eliminate this check as soon as we know, that:
2373 * 1. IOMMU is present and allows to map all the memory.
2374 * 2. No high memory really exists on this machine.
2375 */
2376
Florian Westphala999dd52014-02-22 10:30:18 +01002377static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002379#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 int i;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002381 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002382 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2383 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2384 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002385 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002386 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002387 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002389 if (PCI_DMA_BUS_IS_PHYS) {
2390 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391
Eric Dumazet9092c652010-04-02 13:34:49 -07002392 if (!pdev)
2393 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002394 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002395 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2396 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002397 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2398 return 1;
2399 }
2400 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002401#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 return 0;
2403}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002405struct dev_gso_cb {
2406 void (*destructor)(struct sk_buff *skb);
2407};
2408
2409#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2410
2411static void dev_gso_skb_destructor(struct sk_buff *skb)
2412{
2413 struct dev_gso_cb *cb;
2414
2415 do {
2416 struct sk_buff *nskb = skb->next;
2417
2418 skb->next = nskb->next;
2419 nskb->next = NULL;
2420 kfree_skb(nskb);
2421 } while (skb->next);
2422
2423 cb = DEV_GSO_CB(skb);
2424 if (cb->destructor)
2425 cb->destructor(skb);
2426}
2427
2428/**
2429 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2430 * @skb: buffer to segment
Jesse Gross91ecb632011-01-09 06:23:33 +00002431 * @features: device features as applicable to this skb
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002432 *
2433 * This function segments the given skb and stores the list of segments
2434 * in skb->next.
2435 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002436static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002437{
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002438 struct sk_buff *segs;
2439
Herbert Xu576a30e2006-06-27 13:22:38 -07002440 segs = skb_gso_segment(skb, features);
2441
2442 /* Verifying header integrity only. */
2443 if (!segs)
2444 return 0;
2445
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07002446 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002447 return PTR_ERR(segs);
2448
2449 skb->next = segs;
2450 DEV_GSO_CB(skb)->destructor = skb->destructor;
2451 skb->destructor = dev_gso_skb_destructor;
2452
2453 return 0;
2454}
2455
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002456static netdev_features_t harmonize_features(struct sk_buff *skb,
Florian Westphala999dd52014-02-22 10:30:18 +01002457 __be16 protocol,
2458 const struct net_device *dev,
2459 netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002460{
Ed Cashinc0d680e2012-09-19 15:49:00 +00002461 if (skb->ip_summed != CHECKSUM_NONE &&
2462 !can_checksum_protocol(features, protocol)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002463 features &= ~NETIF_F_ALL_CSUM;
Florian Westphala999dd52014-02-22 10:30:18 +01002464 } else if (illegal_highdma(dev, skb)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002465 features &= ~NETIF_F_SG;
2466 }
2467
2468 return features;
2469}
2470
Florian Westphala999dd52014-02-22 10:30:18 +01002471netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
2472 const struct net_device *dev)
Jesse Gross58e998c2010-10-29 12:14:55 +00002473{
2474 __be16 protocol = skb->protocol;
Florian Westphala999dd52014-02-22 10:30:18 +01002475 netdev_features_t features = dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00002476
Florian Westphala999dd52014-02-22 10:30:18 +01002477 if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
Ben Hutchings30b678d2012-07-30 15:57:00 +00002478 features &= ~NETIF_F_GSO_MASK;
2479
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002480 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
Jesse Gross58e998c2010-10-29 12:14:55 +00002481 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2482 protocol = veh->h_vlan_encapsulated_proto;
Jesse Grossf01a5232011-01-09 06:23:31 +00002483 } else if (!vlan_tx_tag_present(skb)) {
Florian Westphala999dd52014-02-22 10:30:18 +01002484 return harmonize_features(skb, protocol, dev, features);
Jesse Grossf01a5232011-01-09 06:23:31 +00002485 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002486
Florian Westphala999dd52014-02-22 10:30:18 +01002487 features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002488 NETIF_F_HW_VLAN_STAG_TX);
Jesse Grossf01a5232011-01-09 06:23:31 +00002489
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002490 if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD)) {
Florian Westphala999dd52014-02-22 10:30:18 +01002491 return harmonize_features(skb, protocol, dev, features);
Jesse Grossf01a5232011-01-09 06:23:31 +00002492 } else {
2493 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002494 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2495 NETIF_F_HW_VLAN_STAG_TX;
Florian Westphala999dd52014-02-22 10:30:18 +01002496 return harmonize_features(skb, protocol, dev, features);
Jesse Grossf01a5232011-01-09 06:23:31 +00002497 }
Florian Westphala999dd52014-02-22 10:30:18 +01002498
2499 return harmonize_features(skb, protocol, dev, features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002500}
Florian Westphala999dd52014-02-22 10:30:18 +01002501EXPORT_SYMBOL(netif_skb_dev_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002502
John Fastabend6afff0c2010-06-16 14:18:12 +00002503/*
2504 * Returns true if either:
2505 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
Rami Rosend1a53df2012-08-27 23:39:24 +00002506 * 2. skb is fragmented and the device does not support SG.
John Fastabend6afff0c2010-06-16 14:18:12 +00002507 */
2508static inline int skb_needs_linearize(struct sk_buff *skb,
Patrick McHardy6708c9e2013-05-01 22:36:49 +00002509 netdev_features_t features)
John Fastabend6afff0c2010-06-16 14:18:12 +00002510{
Jesse Gross02932ce2011-01-09 06:23:34 +00002511 return skb_is_nonlinear(skb) &&
2512 ((skb_has_frag_list(skb) &&
2513 !(features & NETIF_F_FRAGLIST)) ||
Jesse Grosse1e78db2010-10-29 12:14:53 +00002514 (skb_shinfo(skb)->nr_frags &&
Jesse Gross02932ce2011-01-09 06:23:34 +00002515 !(features & NETIF_F_SG)));
John Fastabend6afff0c2010-06-16 14:18:12 +00002516}
2517
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002518int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2519 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002520{
Stephen Hemminger00829822008-11-20 20:14:53 -08002521 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00002522 int rc = NETDEV_TX_OK;
Koki Sanagiec764bf2011-05-30 21:48:34 +00002523 unsigned int skb_len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002524
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002525 if (likely(!skb->next)) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002526 netdev_features_t features;
Jesse Grossfc741212011-01-09 06:23:32 +00002527
Eric Dumazet93f154b2009-05-18 22:19:19 -07002528 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002529 * If device doesn't need skb->dst, release it right now while
Eric Dumazet93f154b2009-05-18 22:19:19 -07002530 * its hot in this cpu cache
2531 */
Eric Dumazetadf30902009-06-02 05:19:30 +00002532 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2533 skb_dst_drop(skb);
2534
Jesse Grossfc741212011-01-09 06:23:32 +00002535 features = netif_skb_features(skb);
2536
Jesse Gross7b9c6092010-10-20 13:56:04 +00002537 if (vlan_tx_tag_present(skb) &&
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002538 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
2539 skb = __vlan_put_tag(skb, skb->vlan_proto,
2540 vlan_tx_tag_get(skb));
Jesse Gross7b9c6092010-10-20 13:56:04 +00002541 if (unlikely(!skb))
2542 goto out;
2543
2544 skb->vlan_tci = 0;
2545 }
2546
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002547 /* If encapsulation offload request, verify we are testing
2548 * hardware encapsulation features instead of standard
2549 * features for the netdev
2550 */
2551 if (skb->encapsulation)
2552 features &= dev->hw_enc_features;
2553
Jesse Grossfc741212011-01-09 06:23:32 +00002554 if (netif_needs_gso(skb, features)) {
Jesse Gross91ecb632011-01-09 06:23:33 +00002555 if (unlikely(dev_gso_segment(skb, features)))
David S. Miller9ccb8972010-04-22 01:02:07 -07002556 goto out_kfree_skb;
2557 if (skb->next)
2558 goto gso;
John Fastabend6afff0c2010-06-16 14:18:12 +00002559 } else {
Jesse Gross02932ce2011-01-09 06:23:34 +00002560 if (skb_needs_linearize(skb, features) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002561 __skb_linearize(skb))
2562 goto out_kfree_skb;
2563
2564 /* If packet is not checksummed and device does not
2565 * support checksumming for this protocol, complete
2566 * checksumming here.
2567 */
2568 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002569 if (skb->encapsulation)
2570 skb_set_inner_transport_header(skb,
2571 skb_checksum_start_offset(skb));
2572 else
2573 skb_set_transport_header(skb,
2574 skb_checksum_start_offset(skb));
Jesse Gross03634662011-01-09 06:23:35 +00002575 if (!(features & NETIF_F_ALL_CSUM) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002576 skb_checksum_help(skb))
2577 goto out_kfree_skb;
2578 }
David S. Miller9ccb8972010-04-22 01:02:07 -07002579 }
2580
Eric Dumazetb40863c2012-09-18 20:44:49 +00002581 if (!list_empty(&ptype_all))
2582 dev_queue_xmit_nit(skb, dev);
2583
Koki Sanagiec764bf2011-05-30 21:48:34 +00002584 skb_len = skb->len;
Patrick Ohlyac45f602009-02-12 05:03:37 +00002585 rc = ops->ndo_start_xmit(skb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002586 trace_net_dev_xmit(skb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002587 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07002588 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00002589 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002590 }
2591
Herbert Xu576a30e2006-06-27 13:22:38 -07002592gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002593 do {
2594 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002595
2596 skb->next = nskb->next;
2597 nskb->next = NULL;
Krishna Kumar068a2de2009-12-09 20:59:58 +00002598
Eric Dumazetb40863c2012-09-18 20:44:49 +00002599 if (!list_empty(&ptype_all))
2600 dev_queue_xmit_nit(nskb, dev);
2601
Koki Sanagiec764bf2011-05-30 21:48:34 +00002602 skb_len = nskb->len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002603 rc = ops->ndo_start_xmit(nskb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002604 trace_net_dev_xmit(nskb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002605 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002606 if (rc & ~NETDEV_TX_MASK)
2607 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07002608 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002609 skb->next = nskb;
2610 return rc;
2611 }
Eric Dumazet08baf562009-05-25 22:58:01 -07002612 txq_trans_update(txq);
Tom Herbert734664982011-11-28 16:32:44 +00002613 if (unlikely(netif_xmit_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07002614 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002615 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002616
Patrick McHardy572a9d72009-11-10 06:14:14 +00002617out_kfree_gso_skb:
Sridhar Samudrala0c772152013-04-29 13:02:42 +00002618 if (likely(skb->next == NULL)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002619 skb->destructor = DEV_GSO_CB(skb)->destructor;
Sridhar Samudrala0c772152013-04-29 13:02:42 +00002620 consume_skb(skb);
2621 return rc;
2622 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002623out_kfree_skb:
2624 kfree_skb(skb);
Jesse Gross7b9c6092010-10-20 13:56:04 +00002625out:
Patrick McHardy572a9d72009-11-10 06:14:14 +00002626 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002627}
2628
Eric Dumazet1def9232013-01-10 12:36:42 +00002629static void qdisc_pkt_len_init(struct sk_buff *skb)
2630{
2631 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2632
2633 qdisc_skb_cb(skb)->pkt_len = skb->len;
2634
2635 /* To get more precise estimation of bytes sent on wire,
2636 * we add to pkt_len the headers size of all segments
2637 */
2638 if (shinfo->gso_size) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08002639 unsigned int hdr_len;
Jason Wang15e5a032013-03-25 20:19:59 +00002640 u16 gso_segs = shinfo->gso_segs;
Eric Dumazet1def9232013-01-10 12:36:42 +00002641
Eric Dumazet757b8b12013-01-15 21:14:21 -08002642 /* mac layer + network layer */
2643 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2644
2645 /* + transport layer */
Eric Dumazet1def9232013-01-10 12:36:42 +00002646 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2647 hdr_len += tcp_hdrlen(skb);
2648 else
2649 hdr_len += sizeof(struct udphdr);
Jason Wang15e5a032013-03-25 20:19:59 +00002650
2651 if (shinfo->gso_type & SKB_GSO_DODGY)
2652 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2653 shinfo->gso_size);
2654
2655 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00002656 }
2657}
2658
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002659static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2660 struct net_device *dev,
2661 struct netdev_queue *txq)
2662{
2663 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002664 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002665 int rc;
2666
Eric Dumazet1def9232013-01-10 12:36:42 +00002667 qdisc_pkt_len_init(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002668 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002669 /*
2670 * Heuristic to force contended enqueues to serialize on a
2671 * separate lock before trying to get qdisc main lock.
2672 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2673 * and dequeue packets faster.
2674 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00002675 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002676 if (unlikely(contended))
2677 spin_lock(&q->busylock);
2678
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002679 spin_lock(root_lock);
2680 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2681 kfree_skb(skb);
2682 rc = NET_XMIT_DROP;
2683 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07002684 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002685 /*
2686 * This is a work-conserving queue; there are no old skbs
2687 * waiting to be sent out; and the qdisc is not running -
2688 * xmit the skb directly.
2689 */
Eric Dumazet7fee2262010-05-11 23:19:48 +00002690 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2691 skb_dst_force(skb);
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002692
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002693 qdisc_bstats_update(q, skb);
2694
Eric Dumazet79640a42010-06-02 05:09:29 -07002695 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2696 if (unlikely(contended)) {
2697 spin_unlock(&q->busylock);
2698 contended = false;
2699 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002700 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002701 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07002702 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002703
2704 rc = NET_XMIT_SUCCESS;
2705 } else {
Eric Dumazet7fee2262010-05-11 23:19:48 +00002706 skb_dst_force(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002707 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07002708 if (qdisc_run_begin(q)) {
2709 if (unlikely(contended)) {
2710 spin_unlock(&q->busylock);
2711 contended = false;
2712 }
2713 __qdisc_run(q);
2714 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002715 }
2716 spin_unlock(root_lock);
Eric Dumazet79640a42010-06-02 05:09:29 -07002717 if (unlikely(contended))
2718 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002719 return rc;
2720}
2721
Neil Horman5bc14212011-11-22 05:10:51 +00002722#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2723static void skb_update_prio(struct sk_buff *skb)
2724{
Igor Maravic6977a792011-11-25 07:44:54 +00002725 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00002726
Eric Dumazet91c68ce2012-07-08 21:45:10 +00002727 if (!skb->priority && skb->sk && map) {
2728 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2729
2730 if (prioidx < map->priomap_len)
2731 skb->priority = map->priomap[prioidx];
2732 }
Neil Horman5bc14212011-11-22 05:10:51 +00002733}
2734#else
2735#define skb_update_prio(skb)
2736#endif
2737
Eric Dumazet745e20f2010-09-29 13:23:09 -07002738static DEFINE_PER_CPU(int, xmit_recursion);
David S. Miller11a766c2010-10-25 12:51:55 -07002739#define RECURSION_LIMIT 10
Eric Dumazet745e20f2010-09-29 13:23:09 -07002740
Dave Jonesd29f7492008-07-22 14:09:06 -07002741/**
Michel Machado95603e22012-06-12 10:16:35 +00002742 * dev_loopback_xmit - loop back @skb
2743 * @skb: buffer to transmit
2744 */
2745int dev_loopback_xmit(struct sk_buff *skb)
2746{
2747 skb_reset_mac_header(skb);
2748 __skb_pull(skb, skb_network_offset(skb));
2749 skb->pkt_type = PACKET_LOOPBACK;
2750 skb->ip_summed = CHECKSUM_UNNECESSARY;
2751 WARN_ON(!skb_dst(skb));
2752 skb_dst_force(skb);
2753 netif_rx_ni(skb);
2754 return 0;
2755}
2756EXPORT_SYMBOL(dev_loopback_xmit);
2757
2758/**
Dave Jonesd29f7492008-07-22 14:09:06 -07002759 * dev_queue_xmit - transmit a buffer
2760 * @skb: buffer to transmit
2761 *
2762 * Queue a buffer for transmission to a network device. The caller must
2763 * have set the device and priority and built the buffer before calling
2764 * this function. The function can be called from an interrupt.
2765 *
2766 * A negative errno code is returned on a failure. A success does not
2767 * guarantee the frame will be transmitted as it may be dropped due
2768 * to congestion or traffic shaping.
2769 *
2770 * -----------------------------------------------------------------------------------
2771 * I notice this method can also return errors from the queue disciplines,
2772 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2773 * be positive.
2774 *
2775 * Regardless of the return value, the skb is consumed, so it is currently
2776 * difficult to retry a send to this method. (You can bump the ref count
2777 * before sending to hold a reference for retry if you are careful.)
2778 *
2779 * When calling this method, interrupts MUST be enabled. This is because
2780 * the BH enable code must have IRQs enabled so that it will not deadlock.
2781 * --BLG
2782 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783int dev_queue_xmit(struct sk_buff *skb)
2784{
2785 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002786 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787 struct Qdisc *q;
2788 int rc = -ENOMEM;
2789
Eric Dumazet6d1ccff2013-02-05 20:22:20 +00002790 skb_reset_mac_header(skb);
2791
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002792 /* Disable soft irqs for various locks below. Also
2793 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002795 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796
Neil Horman5bc14212011-11-22 05:10:51 +00002797 skb_update_prio(skb);
2798
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00002799 txq = netdev_pick_tx(dev, skb);
Paul E. McKenneya898def2010-02-22 17:04:49 -08002800 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002801
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002803 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804#endif
Koki Sanagicf66ba52010-08-23 18:45:02 +09002805 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002807 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002808 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 }
2810
2811 /* The device has no queue. Common case for software devices:
2812 loopback, all the sorts of tunnels...
2813
Herbert Xu932ff272006-06-09 12:20:56 -07002814 Really, it is unlikely that netif_tx_lock protection is necessary
2815 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816 counters.)
2817 However, it is possible, that they rely on protection
2818 made by us here.
2819
2820 Check this and shot the lock. It is not prone from deadlocks.
2821 Either shot noqueue qdisc, it is even simpler 8)
2822 */
2823 if (dev->flags & IFF_UP) {
2824 int cpu = smp_processor_id(); /* ok because BHs are off */
2825
David S. Millerc773e842008-07-08 23:13:53 -07002826 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827
Eric Dumazet745e20f2010-09-29 13:23:09 -07002828 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2829 goto recursion_alert;
2830
David S. Millerc773e842008-07-08 23:13:53 -07002831 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832
Tom Herbert734664982011-11-28 16:32:44 +00002833 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07002834 __this_cpu_inc(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002835 rc = dev_hard_start_xmit(skb, dev, txq);
Eric Dumazet745e20f2010-09-29 13:23:09 -07002836 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002837 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002838 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 goto out;
2840 }
2841 }
David S. Millerc773e842008-07-08 23:13:53 -07002842 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00002843 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2844 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845 } else {
2846 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07002847 * unfortunately
2848 */
2849recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00002850 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2851 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 }
2853 }
2854
2855 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002856 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 kfree_skb(skb);
2859 return rc;
2860out:
Herbert Xud4828d82006-06-22 02:28:18 -07002861 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 return rc;
2863}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002864EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865
2866
2867/*=======================================================================
2868 Receiver routines
2869 =======================================================================*/
2870
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002871int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00002872EXPORT_SYMBOL(netdev_max_backlog);
2873
Eric Dumazet3b098e22010-05-15 23:57:10 -07002874int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002875int netdev_budget __read_mostly = 300;
2876int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07002878/* Called with irq disabled */
2879static inline void ____napi_schedule(struct softnet_data *sd,
2880 struct napi_struct *napi)
2881{
2882 list_add_tail(&napi->poll_list, &sd->poll_list);
2883 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2884}
2885
Eric Dumazetdf334542010-03-24 19:13:54 +00002886#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07002887
2888/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002889struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07002890EXPORT_SYMBOL(rps_sock_flow_table);
2891
Ingo Molnarc5905af2012-02-24 08:31:31 +01002892struct static_key rps_needed __read_mostly;
Eric Dumazetadc93002011-11-17 03:13:26 +00002893
Ben Hutchingsc4454772011-01-19 11:03:53 +00002894static struct rps_dev_flow *
2895set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2896 struct rps_dev_flow *rflow, u16 next_cpu)
2897{
Ben Hutchings09994d12011-10-03 04:42:46 +00002898 if (next_cpu != RPS_NO_CPU) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00002899#ifdef CONFIG_RFS_ACCEL
2900 struct netdev_rx_queue *rxqueue;
2901 struct rps_dev_flow_table *flow_table;
2902 struct rps_dev_flow *old_rflow;
2903 u32 flow_id;
2904 u16 rxq_index;
2905 int rc;
2906
2907 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00002908 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2909 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00002910 goto out;
2911 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2912 if (rxq_index == skb_get_rx_queue(skb))
2913 goto out;
2914
2915 rxqueue = dev->_rx + rxq_index;
2916 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2917 if (!flow_table)
2918 goto out;
2919 flow_id = skb->rxhash & flow_table->mask;
2920 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2921 rxq_index, flow_id);
2922 if (rc < 0)
2923 goto out;
2924 old_rflow = rflow;
2925 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00002926 rflow->filter = rc;
2927 if (old_rflow->filter == rflow->filter)
2928 old_rflow->filter = RPS_NO_FILTER;
2929 out:
2930#endif
2931 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00002932 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002933 }
2934
Ben Hutchings09994d12011-10-03 04:42:46 +00002935 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002936 return rflow;
2937}
2938
Tom Herbert0a9627f2010-03-16 08:03:29 +00002939/*
2940 * get_rps_cpu is called from netif_receive_skb and returns the target
2941 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07002942 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00002943 */
Tom Herbertfec5e652010-04-16 16:01:27 -07002944static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2945 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00002946{
Tom Herbert0a9627f2010-03-16 08:03:29 +00002947 struct netdev_rx_queue *rxqueue;
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002948 struct rps_map *map;
Tom Herbertfec5e652010-04-16 16:01:27 -07002949 struct rps_dev_flow_table *flow_table;
2950 struct rps_sock_flow_table *sock_flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002951 int cpu = -1;
Tom Herbertfec5e652010-04-16 16:01:27 -07002952 u16 tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002953
Tom Herbert0a9627f2010-03-16 08:03:29 +00002954 if (skb_rx_queue_recorded(skb)) {
2955 u16 index = skb_get_rx_queue(skb);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002956 if (unlikely(index >= dev->real_num_rx_queues)) {
2957 WARN_ONCE(dev->real_num_rx_queues > 1,
2958 "%s received packet on queue %u, but number "
2959 "of RX queues is %u\n",
2960 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00002961 goto done;
2962 }
2963 rxqueue = dev->_rx + index;
2964 } else
2965 rxqueue = dev->_rx;
2966
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002967 map = rcu_dereference(rxqueue->rps_map);
2968 if (map) {
Tom Herbert85875232011-01-31 16:23:42 -08002969 if (map->len == 1 &&
Eric Dumazet33d480c2011-08-11 19:30:52 +00002970 !rcu_access_pointer(rxqueue->rps_flow_table)) {
Changli Gao6febfca2010-09-03 23:12:37 +00002971 tcpu = map->cpus[0];
2972 if (cpu_online(tcpu))
2973 cpu = tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002974 goto done;
Eric Dumazetb249dcb2010-04-19 21:56:38 +00002975 }
Eric Dumazet33d480c2011-08-11 19:30:52 +00002976 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00002977 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002978 }
2979
Changli Gao2d47b452010-08-17 19:00:56 +00002980 skb_reset_network_header(skb);
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002981 if (!skb_get_rxhash(skb))
Tom Herbert0a9627f2010-03-16 08:03:29 +00002982 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002983
Tom Herbertfec5e652010-04-16 16:01:27 -07002984 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2985 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2986 if (flow_table && sock_flow_table) {
2987 u16 next_cpu;
2988 struct rps_dev_flow *rflow;
2989
2990 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2991 tcpu = rflow->cpu;
2992
2993 next_cpu = sock_flow_table->ents[skb->rxhash &
2994 sock_flow_table->mask];
2995
2996 /*
2997 * If the desired CPU (where last recvmsg was done) is
2998 * different from current CPU (one in the rx-queue flow
2999 * table entry), switch if one of the following holds:
3000 * - Current CPU is unset (equal to RPS_NO_CPU).
3001 * - Current CPU is offline.
3002 * - The current CPU's queue tail has advanced beyond the
3003 * last packet that was enqueued using this table entry.
3004 * This guarantees that all previous packets for the flow
3005 * have been dequeued, thus preserving in order delivery.
3006 */
3007 if (unlikely(tcpu != next_cpu) &&
3008 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3009 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00003010 rflow->last_qtail)) >= 0)) {
3011 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003012 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003013 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003014
Tom Herbertfec5e652010-04-16 16:01:27 -07003015 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3016 *rflowp = rflow;
3017 cpu = tcpu;
3018 goto done;
3019 }
3020 }
3021
Tom Herbert0a9627f2010-03-16 08:03:29 +00003022 if (map) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003023 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003024
3025 if (cpu_online(tcpu)) {
3026 cpu = tcpu;
3027 goto done;
3028 }
3029 }
3030
3031done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003032 return cpu;
3033}
3034
Ben Hutchingsc4454772011-01-19 11:03:53 +00003035#ifdef CONFIG_RFS_ACCEL
3036
3037/**
3038 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3039 * @dev: Device on which the filter was set
3040 * @rxq_index: RX queue index
3041 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3042 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3043 *
3044 * Drivers that implement ndo_rx_flow_steer() should periodically call
3045 * this function for each installed filter and remove the filters for
3046 * which it returns %true.
3047 */
3048bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3049 u32 flow_id, u16 filter_id)
3050{
3051 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3052 struct rps_dev_flow_table *flow_table;
3053 struct rps_dev_flow *rflow;
3054 bool expire = true;
3055 int cpu;
3056
3057 rcu_read_lock();
3058 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3059 if (flow_table && flow_id <= flow_table->mask) {
3060 rflow = &flow_table->flows[flow_id];
3061 cpu = ACCESS_ONCE(rflow->cpu);
3062 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3063 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3064 rflow->last_qtail) <
3065 (int)(10 * flow_table->mask)))
3066 expire = false;
3067 }
3068 rcu_read_unlock();
3069 return expire;
3070}
3071EXPORT_SYMBOL(rps_may_expire_flow);
3072
3073#endif /* CONFIG_RFS_ACCEL */
3074
Tom Herbert0a9627f2010-03-16 08:03:29 +00003075/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003076static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003077{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003078 struct softnet_data *sd = data;
3079
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003080 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003081 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003082}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003083
Tom Herbertfec5e652010-04-16 16:01:27 -07003084#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003085
3086/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003087 * Check if this softnet_data structure is another cpu one
3088 * If yes, queue it to our IPI list and return 1
3089 * If no, return 0
3090 */
3091static int rps_ipi_queued(struct softnet_data *sd)
3092{
3093#ifdef CONFIG_RPS
3094 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3095
3096 if (sd != mysd) {
3097 sd->rps_ipi_next = mysd->rps_ipi_list;
3098 mysd->rps_ipi_list = sd;
3099
3100 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3101 return 1;
3102 }
3103#endif /* CONFIG_RPS */
3104 return 0;
3105}
3106
3107/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003108 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3109 * queue (may be a remote CPU queue).
3110 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003111static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3112 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003113{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003114 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003115 unsigned long flags;
3116
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003117 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003118
3119 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003120
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003121 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003122 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
3123 if (skb_queue_len(&sd->input_pkt_queue)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003124enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003125 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003126 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003127 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003128 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003129 return NET_RX_SUCCESS;
3130 }
3131
Eric Dumazetebda37c22010-05-06 23:51:21 +00003132 /* Schedule NAPI for backlog device
3133 * We can use non atomic operation since we own the queue lock
3134 */
3135 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003136 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003137 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003138 }
3139 goto enqueue;
3140 }
3141
Changli Gaodee42872010-05-02 05:42:16 +00003142 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003143 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003144
Tom Herbert0a9627f2010-03-16 08:03:29 +00003145 local_irq_restore(flags);
3146
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003147 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003148 kfree_skb(skb);
3149 return NET_RX_DROP;
3150}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151
Linus Torvalds1da177e2005-04-16 15:20:36 -07003152/**
3153 * netif_rx - post buffer to the network code
3154 * @skb: buffer to post
3155 *
3156 * This function receives a packet from a device driver and queues it for
3157 * the upper (protocol) levels to process. It always succeeds. The buffer
3158 * may be dropped during processing for congestion control or by the
3159 * protocol layers.
3160 *
3161 * return values:
3162 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163 * NET_RX_DROP (packet was dropped)
3164 *
3165 */
3166
3167int netif_rx(struct sk_buff *skb)
3168{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003169 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170
3171 /* if netpoll wants it, pretend we never saw it */
3172 if (netpoll_rx(skb))
3173 return NET_RX_DROP;
3174
Eric Dumazet588f0332011-11-15 04:12:55 +00003175 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176
Koki Sanagicf66ba52010-08-23 18:45:02 +09003177 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003178#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003179 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003180 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003181 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182
Changli Gaocece1942010-08-07 20:35:43 -07003183 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003184 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003185
3186 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003187 if (cpu < 0)
3188 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003189
3190 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3191
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003192 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003193 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003194 } else
3195#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003196 {
3197 unsigned int qtail;
3198 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3199 put_cpu();
3200 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003201 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003203EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204
3205int netif_rx_ni(struct sk_buff *skb)
3206{
3207 int err;
3208
3209 preempt_disable();
3210 err = netif_rx(skb);
3211 if (local_softirq_pending())
3212 do_softirq();
3213 preempt_enable();
3214
3215 return err;
3216}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003217EXPORT_SYMBOL(netif_rx_ni);
3218
Linus Torvalds1da177e2005-04-16 15:20:36 -07003219static void net_tx_action(struct softirq_action *h)
3220{
3221 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3222
3223 if (sd->completion_queue) {
3224 struct sk_buff *clist;
3225
3226 local_irq_disable();
3227 clist = sd->completion_queue;
3228 sd->completion_queue = NULL;
3229 local_irq_enable();
3230
3231 while (clist) {
3232 struct sk_buff *skb = clist;
3233 clist = clist->next;
3234
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003235 WARN_ON(atomic_read(&skb->users));
Koki Sanagi07dc22e2010-08-23 18:46:12 +09003236 trace_kfree_skb(skb, net_tx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003237 __kfree_skb(skb);
3238 }
3239 }
3240
3241 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003242 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243
3244 local_irq_disable();
3245 head = sd->output_queue;
3246 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003247 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248 local_irq_enable();
3249
3250 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003251 struct Qdisc *q = head;
3252 spinlock_t *root_lock;
3253
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254 head = head->next_sched;
3255
David S. Miller5fb66222008-08-02 20:02:43 -07003256 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07003257 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003258 smp_mb__before_clear_bit();
3259 clear_bit(__QDISC_STATE_SCHED,
3260 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07003261 qdisc_run(q);
3262 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263 } else {
David S. Miller195648b2008-08-19 04:00:36 -07003264 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003265 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07003266 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003267 } else {
3268 smp_mb__before_clear_bit();
3269 clear_bit(__QDISC_STATE_SCHED,
3270 &q->state);
3271 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003272 }
3273 }
3274 }
3275}
3276
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003277#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3278 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
Michał Mirosławda678292009-06-05 05:35:28 +00003279/* This hook is defined here for ATM LANE */
3280int (*br_fdb_test_addr_hook)(struct net_device *dev,
3281 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07003282EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00003283#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285#ifdef CONFIG_NET_CLS_ACT
3286/* TODO: Maybe we should just force sch_ingress to be compiled in
3287 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3288 * a compare and 2 stores extra right now if we dont have it on
3289 * but have CONFIG_NET_CLS_ACT
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003290 * NOTE: This doesn't stop any functionality; if you dont have
3291 * the ingress scheduler, you just can't add policies on ingress.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292 *
3293 */
Eric Dumazet24824a02010-10-02 06:11:55 +00003294static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003297 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07003298 int result = TC_ACT_OK;
3299 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003300
Stephen Hemmingerde384832010-08-01 00:33:23 -07003301 if (unlikely(MAX_RED_LOOP < ttl++)) {
Joe Perchese87cc472012-05-13 21:56:26 +00003302 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3303 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07003304 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305 }
3306
Herbert Xuf697c3e2007-10-14 00:38:47 -07003307 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3308 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3309
David S. Miller83874002008-07-17 00:53:03 -07003310 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07003311 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07003312 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07003313 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3314 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07003315 spin_unlock(qdisc_lock(q));
3316 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07003317
Linus Torvalds1da177e2005-04-16 15:20:36 -07003318 return result;
3319}
Herbert Xuf697c3e2007-10-14 00:38:47 -07003320
3321static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3322 struct packet_type **pt_prev,
3323 int *ret, struct net_device *orig_dev)
3324{
Eric Dumazet24824a02010-10-02 06:11:55 +00003325 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3326
3327 if (!rxq || rxq->qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07003328 goto out;
3329
3330 if (*pt_prev) {
3331 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3332 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003333 }
3334
Eric Dumazet24824a02010-10-02 06:11:55 +00003335 switch (ing_filter(skb, rxq)) {
Herbert Xuf697c3e2007-10-14 00:38:47 -07003336 case TC_ACT_SHOT:
3337 case TC_ACT_STOLEN:
3338 kfree_skb(skb);
3339 return NULL;
3340 }
3341
3342out:
3343 skb->tc_verd = 0;
3344 return skb;
3345}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346#endif
3347
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003348/**
3349 * netdev_rx_handler_register - register receive handler
3350 * @dev: device to register a handler for
3351 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00003352 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003353 *
3354 * Register a receive hander for a device. This handler will then be
3355 * called from __netif_receive_skb. A negative errno code is returned
3356 * on a failure.
3357 *
3358 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003359 *
3360 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003361 */
3362int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00003363 rx_handler_func_t *rx_handler,
3364 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003365{
3366 ASSERT_RTNL();
3367
3368 if (dev->rx_handler)
3369 return -EBUSY;
3370
Eric Dumazet00cfec32013-03-29 03:01:22 +00003371 /* Note: rx_handler_data must be set before rx_handler */
Jiri Pirko93e2c322010-06-10 03:34:59 +00003372 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003373 rcu_assign_pointer(dev->rx_handler, rx_handler);
3374
3375 return 0;
3376}
3377EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3378
3379/**
3380 * netdev_rx_handler_unregister - unregister receive handler
3381 * @dev: device to unregister a handler from
3382 *
Kusanagi Kouichi166ec362013-03-18 02:59:52 +00003383 * Unregister a receive handler from a device.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003384 *
3385 * The caller must hold the rtnl_mutex.
3386 */
3387void netdev_rx_handler_unregister(struct net_device *dev)
3388{
3389
3390 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003391 RCU_INIT_POINTER(dev->rx_handler, NULL);
Eric Dumazet00cfec32013-03-29 03:01:22 +00003392 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3393 * section has a guarantee to see a non NULL rx_handler_data
3394 * as well.
3395 */
3396 synchronize_net();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003397 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003398}
3399EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3400
Mel Gormanb4b9e352012-07-31 16:44:26 -07003401/*
3402 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3403 * the special handling of PFMEMALLOC skbs.
3404 */
3405static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3406{
3407 switch (skb->protocol) {
3408 case __constant_htons(ETH_P_ARP):
3409 case __constant_htons(ETH_P_IP):
3410 case __constant_htons(ETH_P_IPV6):
3411 case __constant_htons(ETH_P_8021Q):
Patrick McHardy8ad227f2013-04-19 02:04:31 +00003412 case __constant_htons(ETH_P_8021AD):
Mel Gormanb4b9e352012-07-31 16:44:26 -07003413 return true;
3414 default:
3415 return false;
3416 }
3417}
3418
David S. Miller9754e292013-02-14 15:57:38 -05003419static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420{
3421 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003422 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003423 struct net_device *orig_dev;
David S. Miller63d8ea72011-02-28 10:48:59 -08003424 struct net_device *null_or_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003425 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003426 int ret = NET_RX_DROP;
Al Viro252e33462006-11-14 20:48:11 -08003427 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003428
Eric Dumazet588f0332011-11-15 04:12:55 +00003429 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07003430
Koki Sanagicf66ba52010-08-23 18:45:02 +09003431 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08003432
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003434 if (netpoll_receive_skb(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003435 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07003437 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00003438
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07003439 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00003440 if (!skb_transport_header_was_set(skb))
3441 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00003442 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443
3444 pt_prev = NULL;
3445
3446 rcu_read_lock();
3447
David S. Miller63d8ea72011-02-28 10:48:59 -08003448another_round:
David S. Millerb6858172012-07-23 16:27:54 -07003449 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08003450
3451 __this_cpu_inc(softnet_data.processed);
3452
Patrick McHardy8ad227f2013-04-19 02:04:31 +00003453 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3454 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003455 skb = vlan_untag(skb);
3456 if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003457 goto unlock;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003458 }
3459
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460#ifdef CONFIG_NET_CLS_ACT
3461 if (skb->tc_verd & TC_NCLS) {
3462 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3463 goto ncls;
3464 }
3465#endif
3466
David S. Miller9754e292013-02-14 15:57:38 -05003467 if (pfmemalloc)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003468 goto skip_taps;
3469
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470 list_for_each_entry_rcu(ptype, &ptype_all, list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003471 if (!ptype->dev || ptype->dev == skb->dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003472 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003473 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003474 pt_prev = ptype;
3475 }
3476 }
3477
Mel Gormanb4b9e352012-07-31 16:44:26 -07003478skip_taps:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07003480 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3481 if (!skb)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003482 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483ncls:
3484#endif
3485
David S. Miller9754e292013-02-14 15:57:38 -05003486 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003487 goto drop;
3488
John Fastabend24257172011-10-10 09:16:41 +00003489 if (vlan_tx_tag_present(skb)) {
3490 if (pt_prev) {
3491 ret = deliver_skb(skb, pt_prev, orig_dev);
3492 pt_prev = NULL;
3493 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003494 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00003495 goto another_round;
3496 else if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003497 goto unlock;
John Fastabend24257172011-10-10 09:16:41 +00003498 }
3499
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003500 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003501 if (rx_handler) {
3502 if (pt_prev) {
3503 ret = deliver_skb(skb, pt_prev, orig_dev);
3504 pt_prev = NULL;
3505 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003506 switch (rx_handler(&skb)) {
3507 case RX_HANDLER_CONSUMED:
Cristian Bercaru3bc1b1a2013-03-08 07:03:38 +00003508 ret = NET_RX_SUCCESS;
Mel Gormanb4b9e352012-07-31 16:44:26 -07003509 goto unlock;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003510 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08003511 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003512 case RX_HANDLER_EXACT:
3513 deliver_exact = true;
3514 case RX_HANDLER_PASS:
3515 break;
3516 default:
3517 BUG();
3518 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003519 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520
Eric Dumazet37b25f32013-07-18 07:19:26 -07003521 if (unlikely(vlan_tx_tag_present(skb))) {
3522 if (vlan_tx_tag_get_id(skb))
3523 skb->pkt_type = PACKET_OTHERHOST;
3524 /* Note: we might in the future use prio bits
3525 * and set skb->priority like in vlan_do_receive()
3526 * For the time being, just ignore Priority Code Point
3527 */
3528 skb->vlan_tci = 0;
3529 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003530
David S. Miller63d8ea72011-02-28 10:48:59 -08003531 /* deliver only exact match when indicated */
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003532 null_or_dev = deliver_exact ? skb->dev : NULL;
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00003533
Linus Torvalds1da177e2005-04-16 15:20:36 -07003534 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003535 list_for_each_entry_rcu(ptype,
3536 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003537 if (ptype->type == type &&
Jiri Pirkoe3f48d32011-02-28 20:26:31 +00003538 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3539 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003540 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003541 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003542 pt_prev = ptype;
3543 }
3544 }
3545
3546 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003547 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00003548 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003549 else
3550 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07003552drop:
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003553 atomic_long_inc(&skb->dev->rx_dropped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003554 kfree_skb(skb);
3555 /* Jamal, now you will not able to escape explaining
3556 * me how you were going to use this. :-)
3557 */
3558 ret = NET_RX_DROP;
3559 }
3560
Mel Gormanb4b9e352012-07-31 16:44:26 -07003561unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003562 rcu_read_unlock();
Mel Gormanb4b9e352012-07-31 16:44:26 -07003563out:
David S. Miller9754e292013-02-14 15:57:38 -05003564 return ret;
3565}
3566
3567static int __netif_receive_skb(struct sk_buff *skb)
3568{
3569 int ret;
3570
3571 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3572 unsigned long pflags = current->flags;
3573
3574 /*
3575 * PFMEMALLOC skbs are special, they should
3576 * - be delivered to SOCK_MEMALLOC sockets only
3577 * - stay away from userspace
3578 * - have bounded memory usage
3579 *
3580 * Use PF_MEMALLOC as this saves us from propagating the allocation
3581 * context down to all allocation sites.
3582 */
3583 current->flags |= PF_MEMALLOC;
3584 ret = __netif_receive_skb_core(skb, true);
3585 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3586 } else
3587 ret = __netif_receive_skb_core(skb, false);
3588
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589 return ret;
3590}
Tom Herbert0a9627f2010-03-16 08:03:29 +00003591
3592/**
3593 * netif_receive_skb - process receive buffer from network
3594 * @skb: buffer to process
3595 *
3596 * netif_receive_skb() is the main receive data processing function.
3597 * It always succeeds. The buffer may be dropped during processing
3598 * for congestion control or by the protocol layers.
3599 *
3600 * This function may only be called from softirq context and interrupts
3601 * should be enabled.
3602 *
3603 * Return values (usually ignored):
3604 * NET_RX_SUCCESS: no congestion
3605 * NET_RX_DROP: packet was dropped
3606 */
3607int netif_receive_skb(struct sk_buff *skb)
3608{
Eric Dumazet588f0332011-11-15 04:12:55 +00003609 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07003610
Richard Cochranc1f19b52010-07-17 08:49:36 +00003611 if (skb_defer_rx_timestamp(skb))
3612 return NET_RX_SUCCESS;
3613
Eric Dumazetdf334542010-03-24 19:13:54 +00003614#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003615 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07003616 struct rps_dev_flow voidflow, *rflow = &voidflow;
3617 int cpu, ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003618
Eric Dumazet3b098e22010-05-15 23:57:10 -07003619 rcu_read_lock();
Tom Herbert0a9627f2010-03-16 08:03:29 +00003620
Eric Dumazet3b098e22010-05-15 23:57:10 -07003621 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07003622
Eric Dumazet3b098e22010-05-15 23:57:10 -07003623 if (cpu >= 0) {
3624 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3625 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00003626 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07003627 }
Eric Dumazetadc93002011-11-17 03:13:26 +00003628 rcu_read_unlock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003629 }
Tom Herbert1e94d722010-03-18 17:45:44 -07003630#endif
Eric Dumazetadc93002011-11-17 03:13:26 +00003631 return __netif_receive_skb(skb);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003632}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003633EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634
Eric Dumazet88751272010-04-19 05:07:33 +00003635/* Network device is going away, flush any packets still pending
3636 * Called with irqs disabled.
3637 */
Changli Gao152102c2010-03-30 20:16:22 +00003638static void flush_backlog(void *arg)
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003639{
Changli Gao152102c2010-03-30 20:16:22 +00003640 struct net_device *dev = arg;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003641 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003642 struct sk_buff *skb, *tmp;
3643
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003644 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003645 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003646 if (skb->dev == dev) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003647 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003648 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003649 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003650 }
Changli Gao6e7676c2010-04-27 15:07:33 -07003651 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003652 rps_unlock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003653
3654 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3655 if (skb->dev == dev) {
3656 __skb_unlink(skb, &sd->process_queue);
3657 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003658 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003659 }
3660 }
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003661}
3662
Herbert Xud565b0a2008-12-15 23:38:52 -08003663static int napi_gro_complete(struct sk_buff *skb)
3664{
Vlad Yasevich22061d82012-11-15 08:49:11 +00003665 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003666 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003667 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08003668 int err = -ENOENT;
3669
Eric Dumazetc3c7c252012-12-06 13:54:59 +00003670 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3671
Herbert Xufc59f9a2009-04-14 15:11:06 -07003672 if (NAPI_GRO_CB(skb)->count == 1) {
3673 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003674 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07003675 }
Herbert Xud565b0a2008-12-15 23:38:52 -08003676
3677 rcu_read_lock();
3678 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003679 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08003680 continue;
3681
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003682 err = ptype->callbacks.gro_complete(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003683 break;
3684 }
3685 rcu_read_unlock();
3686
3687 if (err) {
3688 WARN_ON(&ptype->list == head);
3689 kfree_skb(skb);
3690 return NET_RX_SUCCESS;
3691 }
3692
3693out:
Herbert Xud565b0a2008-12-15 23:38:52 -08003694 return netif_receive_skb(skb);
3695}
3696
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003697/* napi->gro_list contains packets ordered by age.
3698 * youngest packets at the head of it.
3699 * Complete skbs in reverse order to reduce latencies.
3700 */
3701void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08003702{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003703 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08003704
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003705 /* scan list and build reverse chain */
3706 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3707 skb->prev = prev;
3708 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08003709 }
3710
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003711 for (skb = prev; skb; skb = prev) {
3712 skb->next = NULL;
3713
3714 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3715 return;
3716
3717 prev = skb->prev;
3718 napi_gro_complete(skb);
3719 napi->gro_count--;
3720 }
3721
Herbert Xud565b0a2008-12-15 23:38:52 -08003722 napi->gro_list = NULL;
3723}
Eric Dumazet86cac582010-08-31 18:25:32 +00003724EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08003725
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003726static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3727{
3728 struct sk_buff *p;
3729 unsigned int maclen = skb->dev->hard_header_len;
3730
3731 for (p = napi->gro_list; p; p = p->next) {
3732 unsigned long diffs;
3733
3734 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3735 diffs |= p->vlan_tci ^ skb->vlan_tci;
3736 if (maclen == ETH_HLEN)
3737 diffs |= compare_ether_header(skb_mac_header(p),
3738 skb_gro_mac_header(skb));
3739 else if (!diffs)
3740 diffs = memcmp(skb_mac_header(p),
3741 skb_gro_mac_header(skb),
3742 maclen);
3743 NAPI_GRO_CB(p)->same_flow = !diffs;
3744 NAPI_GRO_CB(p)->flush = 0;
3745 }
3746}
3747
Rami Rosenbb728822012-11-28 21:55:25 +00003748static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08003749{
3750 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003751 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003752 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003753 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003754 int same_flow;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003755 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003756
Jarek Poplawskice9e76c2010-08-05 01:19:11 +00003757 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
Herbert Xud565b0a2008-12-15 23:38:52 -08003758 goto normal;
3759
David S. Miller21dc3302010-08-23 00:13:46 -07003760 if (skb_is_gso(skb) || skb_has_frag_list(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08003761 goto normal;
3762
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003763 gro_list_prepare(napi, skb);
3764
Herbert Xud565b0a2008-12-15 23:38:52 -08003765 rcu_read_lock();
3766 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003767 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08003768 continue;
3769
Herbert Xu86911732009-01-29 14:19:50 +00003770 skb_set_network_header(skb, skb_gro_offset(skb));
Eric Dumazetefd94502013-02-14 17:31:48 +00003771 skb_reset_mac_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003772 NAPI_GRO_CB(skb)->same_flow = 0;
3773 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08003774 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003775
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003776 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003777 break;
3778 }
3779 rcu_read_unlock();
3780
3781 if (&ptype->list == head)
3782 goto normal;
3783
Herbert Xu0da2afd52008-12-26 14:57:42 -08003784 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003785 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003786
Herbert Xud565b0a2008-12-15 23:38:52 -08003787 if (pp) {
3788 struct sk_buff *nskb = *pp;
3789
3790 *pp = nskb->next;
3791 nskb->next = NULL;
3792 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00003793 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08003794 }
3795
Herbert Xu0da2afd52008-12-26 14:57:42 -08003796 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08003797 goto ok;
3798
Herbert Xu4ae55442009-02-08 18:00:36 +00003799 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08003800 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08003801
Herbert Xu4ae55442009-02-08 18:00:36 +00003802 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08003803 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003804 NAPI_GRO_CB(skb)->age = jiffies;
Herbert Xu86911732009-01-29 14:19:50 +00003805 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003806 skb->next = napi->gro_list;
3807 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003808 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08003809
Herbert Xuad0f9902009-02-01 01:24:55 -08003810pull:
Herbert Xucb189782009-05-26 18:50:31 +00003811 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3812 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3813
3814 BUG_ON(skb->end - skb->tail < grow);
3815
3816 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3817
3818 skb->tail += grow;
3819 skb->data_len -= grow;
3820
3821 skb_shinfo(skb)->frags[0].page_offset += grow;
Eric Dumazet9e903e02011-10-18 21:00:24 +00003822 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
Herbert Xucb189782009-05-26 18:50:31 +00003823
Eric Dumazet9e903e02011-10-18 21:00:24 +00003824 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
Ian Campbellea2ab692011-08-22 23:44:58 +00003825 skb_frag_unref(skb, 0);
Herbert Xucb189782009-05-26 18:50:31 +00003826 memmove(skb_shinfo(skb)->frags,
3827 skb_shinfo(skb)->frags + 1,
Jarek Poplawskie5093ae2010-08-11 02:02:10 +00003828 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
Herbert Xucb189782009-05-26 18:50:31 +00003829 }
Herbert Xuad0f9902009-02-01 01:24:55 -08003830 }
3831
Herbert Xud565b0a2008-12-15 23:38:52 -08003832ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003833 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003834
3835normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08003836 ret = GRO_NORMAL;
3837 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08003838}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003839
Herbert Xu96e93ea2009-01-06 10:49:34 -08003840
Rami Rosenbb728822012-11-28 21:55:25 +00003841static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08003842{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003843 switch (ret) {
3844 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003845 if (netif_receive_skb(skb))
3846 ret = GRO_DROP;
3847 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003848
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003849 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08003850 kfree_skb(skb);
3851 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003852
Eric Dumazetdaa86542012-04-19 07:07:40 +00003853 case GRO_MERGED_FREE:
Eric Dumazetd7e88832012-04-30 08:10:34 +00003854 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3855 kmem_cache_free(skbuff_head_cache, skb);
3856 else
3857 __kfree_skb(skb);
Eric Dumazetdaa86542012-04-19 07:07:40 +00003858 break;
3859
Ben Hutchings5b252f02009-10-29 07:17:09 +00003860 case GRO_HELD:
3861 case GRO_MERGED:
3862 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003863 }
3864
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003865 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003866}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003867
Eric Dumazetca07e432012-10-06 22:28:06 +00003868static void skb_gro_reset_offset(struct sk_buff *skb)
Herbert Xu78a478d2009-05-26 18:50:21 +00003869{
Eric Dumazetca07e432012-10-06 22:28:06 +00003870 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3871 const skb_frag_t *frag0 = &pinfo->frags[0];
3872
Herbert Xu78a478d2009-05-26 18:50:21 +00003873 NAPI_GRO_CB(skb)->data_offset = 0;
3874 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00003875 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00003876
Herbert Xu78d3fd02009-05-26 18:50:23 +00003877 if (skb->mac_header == skb->tail &&
Eric Dumazetca07e432012-10-06 22:28:06 +00003878 pinfo->nr_frags &&
3879 !PageHighMem(skb_frag_page(frag0))) {
3880 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3881 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
Herbert Xu74895942009-05-26 18:50:27 +00003882 }
Herbert Xu78a478d2009-05-26 18:50:21 +00003883}
Herbert Xu78a478d2009-05-26 18:50:21 +00003884
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003885gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003886{
Herbert Xu86911732009-01-29 14:19:50 +00003887 skb_gro_reset_offset(skb);
3888
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003889 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003890}
3891EXPORT_SYMBOL(napi_gro_receive);
3892
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00003893static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08003894{
Herbert Xu96e93ea2009-01-06 10:49:34 -08003895 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00003896 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3897 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00003898 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08003899 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08003900 skb->skb_iif = 0;
Herbert Xu96e93ea2009-01-06 10:49:34 -08003901
3902 napi->skb = skb;
3903}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003904
Herbert Xu76620aa2009-04-16 02:02:07 -07003905struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08003906{
Herbert Xu5d38a072009-01-04 16:13:40 -08003907 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003908
3909 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00003910 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3911 if (skb)
3912 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003913 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08003914 return skb;
3915}
Herbert Xu76620aa2009-04-16 02:02:07 -07003916EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08003917
Rami Rosenbb728822012-11-28 21:55:25 +00003918static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003919 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003920{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003921 switch (ret) {
3922 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00003923 case GRO_HELD:
Ajit Khapardee76b69c2010-02-16 20:25:43 +00003924 skb->protocol = eth_type_trans(skb, skb->dev);
Herbert Xu86911732009-01-29 14:19:50 +00003925
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003926 if (ret == GRO_HELD)
3927 skb_gro_pull(skb, -ETH_HLEN);
3928 else if (netif_receive_skb(skb))
3929 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00003930 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003931
3932 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003933 case GRO_MERGED_FREE:
3934 napi_reuse_skb(napi, skb);
3935 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003936
3937 case GRO_MERGED:
3938 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003939 }
3940
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003941 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003942}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003943
Eric Dumazet4adb9c42012-05-18 20:49:06 +00003944static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08003945{
Herbert Xu76620aa2009-04-16 02:02:07 -07003946 struct sk_buff *skb = napi->skb;
3947 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00003948 unsigned int hlen;
3949 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07003950
3951 napi->skb = NULL;
3952
3953 skb_reset_mac_header(skb);
3954 skb_gro_reset_offset(skb);
3955
Herbert Xua5b1cf22009-05-26 18:50:28 +00003956 off = skb_gro_offset(skb);
3957 hlen = off + sizeof(*eth);
3958 eth = skb_gro_header_fast(skb, off);
3959 if (skb_gro_header_hard(skb, hlen)) {
3960 eth = skb_gro_header_slow(skb, hlen, off);
3961 if (unlikely(!eth)) {
3962 napi_reuse_skb(napi, skb);
3963 skb = NULL;
3964 goto out;
3965 }
Herbert Xu76620aa2009-04-16 02:02:07 -07003966 }
3967
3968 skb_gro_pull(skb, sizeof(*eth));
3969
3970 /*
3971 * This works because the only protocols we care about don't require
3972 * special handling. We'll fix it up properly at the end.
3973 */
3974 skb->protocol = eth->h_proto;
3975
3976out:
3977 return skb;
3978}
Herbert Xu76620aa2009-04-16 02:02:07 -07003979
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003980gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07003981{
3982 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08003983
3984 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003985 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08003986
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003987 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08003988}
3989EXPORT_SYMBOL(napi_gro_frags);
3990
Eric Dumazete326bed2010-04-22 00:22:45 -07003991/*
3992 * net_rps_action sends any pending IPI's for rps.
3993 * Note: called with local irq disabled, but exits with local irq enabled.
3994 */
3995static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3996{
3997#ifdef CONFIG_RPS
3998 struct softnet_data *remsd = sd->rps_ipi_list;
3999
4000 if (remsd) {
4001 sd->rps_ipi_list = NULL;
4002
4003 local_irq_enable();
4004
4005 /* Send pending IPI's to kick RPS processing on remote cpus. */
4006 while (remsd) {
4007 struct softnet_data *next = remsd->rps_ipi_next;
4008
4009 if (cpu_online(remsd->cpu))
4010 __smp_call_function_single(remsd->cpu,
4011 &remsd->csd, 0);
4012 remsd = next;
4013 }
4014 } else
4015#endif
4016 local_irq_enable();
4017}
4018
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004019static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004020{
4021 int work = 0;
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004022 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023
Eric Dumazete326bed2010-04-22 00:22:45 -07004024#ifdef CONFIG_RPS
4025 /* Check if we have pending ipi, its better to send them now,
4026 * not waiting net_rx_action() end.
4027 */
4028 if (sd->rps_ipi_list) {
4029 local_irq_disable();
4030 net_rps_action_and_irq_enable(sd);
4031 }
4032#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004033 napi->weight = weight_p;
Changli Gao6e7676c2010-04-27 15:07:33 -07004034 local_irq_disable();
4035 while (work < quota) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004036 struct sk_buff *skb;
Changli Gao6e7676c2010-04-27 15:07:33 -07004037 unsigned int qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004038
Changli Gao6e7676c2010-04-27 15:07:33 -07004039 while ((skb = __skb_dequeue(&sd->process_queue))) {
Eric Dumazete4008272010-04-05 15:42:39 -07004040 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004041 __netif_receive_skb(skb);
Changli Gao6e7676c2010-04-27 15:07:33 -07004042 local_irq_disable();
Tom Herbert76cc8b12010-05-20 18:37:59 +00004043 input_queue_head_incr(sd);
4044 if (++work >= quota) {
4045 local_irq_enable();
4046 return work;
4047 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004048 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004049
Changli Gao6e7676c2010-04-27 15:07:33 -07004050 rps_lock(sd);
4051 qlen = skb_queue_len(&sd->input_pkt_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004052 if (qlen)
Changli Gao6e7676c2010-04-27 15:07:33 -07004053 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4054 &sd->process_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004055
Changli Gao6e7676c2010-04-27 15:07:33 -07004056 if (qlen < quota - work) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004057 /*
4058 * Inline a custom version of __napi_complete().
4059 * only current cpu owns and manipulates this napi,
4060 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
4061 * we can use a plain write instead of clear_bit(),
4062 * and we dont need an smp_mb() memory barrier.
4063 */
4064 list_del(&napi->poll_list);
4065 napi->state = 0;
4066
Changli Gao6e7676c2010-04-27 15:07:33 -07004067 quota = work + qlen;
4068 }
4069 rps_unlock(sd);
4070 }
4071 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004072
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004073 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074}
4075
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004076/**
4077 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004078 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004079 *
4080 * The entry's receive function will be scheduled to run
4081 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08004082void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004083{
4084 unsigned long flags;
4085
4086 local_irq_save(flags);
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004087 ____napi_schedule(&__get_cpu_var(softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004088 local_irq_restore(flags);
4089}
4090EXPORT_SYMBOL(__napi_schedule);
4091
Herbert Xud565b0a2008-12-15 23:38:52 -08004092void __napi_complete(struct napi_struct *n)
4093{
4094 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4095 BUG_ON(n->gro_list);
4096
4097 list_del(&n->poll_list);
4098 smp_mb__before_clear_bit();
4099 clear_bit(NAPI_STATE_SCHED, &n->state);
4100}
4101EXPORT_SYMBOL(__napi_complete);
4102
4103void napi_complete(struct napi_struct *n)
4104{
4105 unsigned long flags;
4106
4107 /*
4108 * don't let napi dequeue from the cpu poll list
4109 * just in case its running on a different cpu
4110 */
4111 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4112 return;
4113
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004114 napi_gro_flush(n, false);
Herbert Xud565b0a2008-12-15 23:38:52 -08004115 local_irq_save(flags);
4116 __napi_complete(n);
4117 local_irq_restore(flags);
4118}
4119EXPORT_SYMBOL(napi_complete);
4120
4121void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4122 int (*poll)(struct napi_struct *, int), int weight)
4123{
4124 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00004125 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004126 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08004127 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004128 napi->poll = poll;
Eric Dumazet82dc3c62013-03-05 15:57:22 +00004129 if (weight > NAPI_POLL_WEIGHT)
4130 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4131 weight, dev->name);
Herbert Xud565b0a2008-12-15 23:38:52 -08004132 napi->weight = weight;
4133 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004134 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08004135#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08004136 spin_lock_init(&napi->poll_lock);
4137 napi->poll_owner = -1;
4138#endif
4139 set_bit(NAPI_STATE_SCHED, &napi->state);
4140}
4141EXPORT_SYMBOL(netif_napi_add);
4142
4143void netif_napi_del(struct napi_struct *napi)
4144{
4145 struct sk_buff *skb, *next;
4146
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08004147 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07004148 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08004149
4150 for (skb = napi->gro_list; skb; skb = next) {
4151 next = skb->next;
4152 skb->next = NULL;
4153 kfree_skb(skb);
4154 }
4155
4156 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00004157 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004158}
4159EXPORT_SYMBOL(netif_napi_del);
4160
Linus Torvalds1da177e2005-04-16 15:20:36 -07004161static void net_rx_action(struct softirq_action *h)
4162{
Eric Dumazete326bed2010-04-22 00:22:45 -07004163 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004164 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07004165 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07004166 void *have;
4167
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168 local_irq_disable();
4169
Eric Dumazete326bed2010-04-22 00:22:45 -07004170 while (!list_empty(&sd->poll_list)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004171 struct napi_struct *n;
4172 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004173
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004174 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004175 * Allow this to run for 2 jiffies since which will allow
4176 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004177 */
Eric Dumazetd1f41b62013-03-05 07:15:13 +00004178 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004179 goto softnet_break;
4180
4181 local_irq_enable();
4182
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004183 /* Even though interrupts have been re-enabled, this
4184 * access is safe because interrupts can only add new
4185 * entries to the tail of this list, and only ->poll()
4186 * calls can remove this head entry from the list.
4187 */
Eric Dumazete326bed2010-04-22 00:22:45 -07004188 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004189
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004190 have = netpoll_poll_lock(n);
4191
4192 weight = n->weight;
4193
David S. Miller0a7606c2007-10-29 21:28:47 -07004194 /* This NAPI_STATE_SCHED test is for avoiding a race
4195 * with netpoll's poll_napi(). Only the entity which
4196 * obtains the lock and sees NAPI_STATE_SCHED set will
4197 * actually make the ->poll() call. Therefore we avoid
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004198 * accidentally calling ->poll() when NAPI is not scheduled.
David S. Miller0a7606c2007-10-29 21:28:47 -07004199 */
4200 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00004201 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07004202 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00004203 trace_napi_poll(n);
4204 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004205
4206 WARN_ON_ONCE(work > weight);
4207
4208 budget -= work;
4209
4210 local_irq_disable();
4211
4212 /* Drivers must not modify the NAPI state if they
4213 * consume the entire weight. In such cases this code
4214 * still "owns" the NAPI instance and therefore can
4215 * move the instance around on the list at-will.
4216 */
David S. Millerfed17f32008-01-07 21:00:40 -08004217 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07004218 if (unlikely(napi_disable_pending(n))) {
4219 local_irq_enable();
4220 napi_complete(n);
4221 local_irq_disable();
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004222 } else {
4223 if (n->gro_list) {
4224 /* flush too old packets
4225 * If HZ < 1000, flush all packets.
4226 */
4227 local_irq_enable();
4228 napi_gro_flush(n, HZ >= 1000);
4229 local_irq_disable();
4230 }
Eric Dumazete326bed2010-04-22 00:22:45 -07004231 list_move_tail(&n->poll_list, &sd->poll_list);
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004232 }
David S. Millerfed17f32008-01-07 21:00:40 -08004233 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004234
4235 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236 }
4237out:
Eric Dumazete326bed2010-04-22 00:22:45 -07004238 net_rps_action_and_irq_enable(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004239
Chris Leechdb217332006-06-17 21:24:58 -07004240#ifdef CONFIG_NET_DMA
4241 /*
4242 * There may not be any more sk_buffs coming right now, so push
4243 * any pending DMA copies to hardware
4244 */
Dan Williams2ba05622009-01-06 11:38:14 -07004245 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07004246#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004247
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248 return;
4249
4250softnet_break:
Changli Gaodee42872010-05-02 05:42:16 +00004251 sd->time_squeeze++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4253 goto out;
4254}
4255
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004256struct netdev_upper {
4257 struct net_device *dev;
4258 bool master;
4259 struct list_head list;
4260 struct rcu_head rcu;
4261 struct list_head search_list;
4262};
4263
4264static void __append_search_uppers(struct list_head *search_list,
4265 struct net_device *dev)
4266{
4267 struct netdev_upper *upper;
4268
4269 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4270 /* check if this upper is not already in search list */
4271 if (list_empty(&upper->search_list))
4272 list_add_tail(&upper->search_list, search_list);
4273 }
4274}
4275
4276static bool __netdev_search_upper_dev(struct net_device *dev,
4277 struct net_device *upper_dev)
4278{
4279 LIST_HEAD(search_list);
4280 struct netdev_upper *upper;
4281 struct netdev_upper *tmp;
4282 bool ret = false;
4283
4284 __append_search_uppers(&search_list, dev);
4285 list_for_each_entry(upper, &search_list, search_list) {
4286 if (upper->dev == upper_dev) {
4287 ret = true;
4288 break;
4289 }
4290 __append_search_uppers(&search_list, upper->dev);
4291 }
4292 list_for_each_entry_safe(upper, tmp, &search_list, search_list)
4293 INIT_LIST_HEAD(&upper->search_list);
4294 return ret;
4295}
4296
4297static struct netdev_upper *__netdev_find_upper(struct net_device *dev,
4298 struct net_device *upper_dev)
4299{
4300 struct netdev_upper *upper;
4301
4302 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4303 if (upper->dev == upper_dev)
4304 return upper;
4305 }
4306 return NULL;
4307}
4308
4309/**
4310 * netdev_has_upper_dev - Check if device is linked to an upper device
4311 * @dev: device
4312 * @upper_dev: upper device to check
4313 *
4314 * Find out if a device is linked to specified upper device and return true
4315 * in case it is. Note that this checks only immediate upper device,
4316 * not through a complete stack of devices. The caller must hold the RTNL lock.
4317 */
4318bool netdev_has_upper_dev(struct net_device *dev,
4319 struct net_device *upper_dev)
4320{
4321 ASSERT_RTNL();
4322
4323 return __netdev_find_upper(dev, upper_dev);
4324}
4325EXPORT_SYMBOL(netdev_has_upper_dev);
4326
4327/**
4328 * netdev_has_any_upper_dev - Check if device is linked to some device
4329 * @dev: device
4330 *
4331 * Find out if a device is linked to an upper device and return true in case
4332 * it is. The caller must hold the RTNL lock.
4333 */
4334bool netdev_has_any_upper_dev(struct net_device *dev)
4335{
4336 ASSERT_RTNL();
4337
4338 return !list_empty(&dev->upper_dev_list);
4339}
4340EXPORT_SYMBOL(netdev_has_any_upper_dev);
4341
4342/**
4343 * netdev_master_upper_dev_get - Get master upper device
4344 * @dev: device
4345 *
4346 * Find a master upper device and return pointer to it or NULL in case
4347 * it's not there. The caller must hold the RTNL lock.
4348 */
4349struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4350{
4351 struct netdev_upper *upper;
4352
4353 ASSERT_RTNL();
4354
4355 if (list_empty(&dev->upper_dev_list))
4356 return NULL;
4357
4358 upper = list_first_entry(&dev->upper_dev_list,
4359 struct netdev_upper, list);
4360 if (likely(upper->master))
4361 return upper->dev;
4362 return NULL;
4363}
4364EXPORT_SYMBOL(netdev_master_upper_dev_get);
4365
4366/**
4367 * netdev_master_upper_dev_get_rcu - Get master upper device
4368 * @dev: device
4369 *
4370 * Find a master upper device and return pointer to it or NULL in case
4371 * it's not there. The caller must hold the RCU read lock.
4372 */
4373struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4374{
4375 struct netdev_upper *upper;
4376
4377 upper = list_first_or_null_rcu(&dev->upper_dev_list,
4378 struct netdev_upper, list);
4379 if (upper && likely(upper->master))
4380 return upper->dev;
4381 return NULL;
4382}
4383EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4384
4385static int __netdev_upper_dev_link(struct net_device *dev,
4386 struct net_device *upper_dev, bool master)
4387{
4388 struct netdev_upper *upper;
4389
4390 ASSERT_RTNL();
4391
4392 if (dev == upper_dev)
4393 return -EBUSY;
4394
4395 /* To prevent loops, check if dev is not upper device to upper_dev. */
4396 if (__netdev_search_upper_dev(upper_dev, dev))
4397 return -EBUSY;
4398
4399 if (__netdev_find_upper(dev, upper_dev))
4400 return -EEXIST;
4401
4402 if (master && netdev_master_upper_dev_get(dev))
4403 return -EBUSY;
4404
4405 upper = kmalloc(sizeof(*upper), GFP_KERNEL);
4406 if (!upper)
4407 return -ENOMEM;
4408
4409 upper->dev = upper_dev;
4410 upper->master = master;
4411 INIT_LIST_HEAD(&upper->search_list);
4412
4413 /* Ensure that master upper link is always the first item in list. */
4414 if (master)
4415 list_add_rcu(&upper->list, &dev->upper_dev_list);
4416 else
4417 list_add_tail_rcu(&upper->list, &dev->upper_dev_list);
4418 dev_hold(upper_dev);
4419
4420 return 0;
4421}
4422
4423/**
4424 * netdev_upper_dev_link - Add a link to the upper device
4425 * @dev: device
4426 * @upper_dev: new upper device
4427 *
4428 * Adds a link to device which is upper to this one. The caller must hold
4429 * the RTNL lock. On a failure a negative errno code is returned.
4430 * On success the reference counts are adjusted and the function
4431 * returns zero.
4432 */
4433int netdev_upper_dev_link(struct net_device *dev,
4434 struct net_device *upper_dev)
4435{
4436 return __netdev_upper_dev_link(dev, upper_dev, false);
4437}
4438EXPORT_SYMBOL(netdev_upper_dev_link);
4439
4440/**
4441 * netdev_master_upper_dev_link - Add a master link to the upper device
4442 * @dev: device
4443 * @upper_dev: new upper device
4444 *
4445 * Adds a link to device which is upper to this one. In this case, only
4446 * one master upper device can be linked, although other non-master devices
4447 * might be linked as well. The caller must hold the RTNL lock.
4448 * On a failure a negative errno code is returned. On success the reference
4449 * counts are adjusted and the function returns zero.
4450 */
4451int netdev_master_upper_dev_link(struct net_device *dev,
4452 struct net_device *upper_dev)
4453{
4454 return __netdev_upper_dev_link(dev, upper_dev, true);
4455}
4456EXPORT_SYMBOL(netdev_master_upper_dev_link);
4457
4458/**
4459 * netdev_upper_dev_unlink - Removes a link to upper device
4460 * @dev: device
4461 * @upper_dev: new upper device
4462 *
4463 * Removes a link to device which is upper to this one. The caller must hold
4464 * the RTNL lock.
4465 */
4466void netdev_upper_dev_unlink(struct net_device *dev,
4467 struct net_device *upper_dev)
4468{
4469 struct netdev_upper *upper;
4470
4471 ASSERT_RTNL();
4472
4473 upper = __netdev_find_upper(dev, upper_dev);
4474 if (!upper)
4475 return;
4476 list_del_rcu(&upper->list);
4477 dev_put(upper_dev);
4478 kfree_rcu(upper, rcu);
4479}
4480EXPORT_SYMBOL(netdev_upper_dev_unlink);
4481
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004482static void dev_change_rx_flags(struct net_device *dev, int flags)
4483{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004484 const struct net_device_ops *ops = dev->netdev_ops;
4485
Vlad Yasevich05cf2142013-11-19 20:47:15 -05004486 if (ops->ndo_change_rx_flags)
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004487 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004488}
4489
Wang Chendad9b332008-06-18 01:48:28 -07004490static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07004491{
Eric Dumazetb536db92011-11-30 21:42:26 +00004492 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06004493 kuid_t uid;
4494 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07004495
Patrick McHardy24023452007-07-14 18:51:31 -07004496 ASSERT_RTNL();
4497
Wang Chendad9b332008-06-18 01:48:28 -07004498 dev->flags |= IFF_PROMISC;
4499 dev->promiscuity += inc;
4500 if (dev->promiscuity == 0) {
4501 /*
4502 * Avoid overflow.
4503 * If inc causes overflow, untouch promisc and return error.
4504 */
4505 if (inc < 0)
4506 dev->flags &= ~IFF_PROMISC;
4507 else {
4508 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00004509 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4510 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07004511 return -EOVERFLOW;
4512 }
4513 }
Patrick McHardy4417da62007-06-27 01:28:10 -07004514 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00004515 pr_info("device %s %s promiscuous mode\n",
4516 dev->name,
4517 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11004518 if (audit_enabled) {
4519 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05004520 audit_log(current->audit_context, GFP_ATOMIC,
4521 AUDIT_ANOM_PROMISCUOUS,
4522 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4523 dev->name, (dev->flags & IFF_PROMISC),
4524 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07004525 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06004526 from_kuid(&init_user_ns, uid),
4527 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05004528 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11004529 }
Patrick McHardy24023452007-07-14 18:51:31 -07004530
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004531 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07004532 }
Wang Chendad9b332008-06-18 01:48:28 -07004533 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07004534}
4535
Linus Torvalds1da177e2005-04-16 15:20:36 -07004536/**
4537 * dev_set_promiscuity - update promiscuity count on a device
4538 * @dev: device
4539 * @inc: modifier
4540 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07004541 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07004542 * remains above zero the interface remains promiscuous. Once it hits zero
4543 * the device reverts back to normal filtering operation. A negative inc
4544 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07004545 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004546 */
Wang Chendad9b332008-06-18 01:48:28 -07004547int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004548{
Eric Dumazetb536db92011-11-30 21:42:26 +00004549 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07004550 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004551
Wang Chendad9b332008-06-18 01:48:28 -07004552 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07004553 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07004554 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07004555 if (dev->flags != old_flags)
4556 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07004557 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004558}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004559EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004560
4561/**
4562 * dev_set_allmulti - update allmulti count on a device
4563 * @dev: device
4564 * @inc: modifier
4565 *
4566 * Add or remove reception of all multicast frames to a device. While the
4567 * count in the device remains above zero the interface remains listening
4568 * to all interfaces. Once it hits zero the device reverts back to normal
4569 * filtering operation. A negative @inc value is used to drop the counter
4570 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07004571 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004572 */
4573
Wang Chendad9b332008-06-18 01:48:28 -07004574int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004575{
Eric Dumazetb536db92011-11-30 21:42:26 +00004576 unsigned int old_flags = dev->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004577
Patrick McHardy24023452007-07-14 18:51:31 -07004578 ASSERT_RTNL();
4579
Linus Torvalds1da177e2005-04-16 15:20:36 -07004580 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07004581 dev->allmulti += inc;
4582 if (dev->allmulti == 0) {
4583 /*
4584 * Avoid overflow.
4585 * If inc causes overflow, untouch allmulti and return error.
4586 */
4587 if (inc < 0)
4588 dev->flags &= ~IFF_ALLMULTI;
4589 else {
4590 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00004591 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4592 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07004593 return -EOVERFLOW;
4594 }
4595 }
Patrick McHardy24023452007-07-14 18:51:31 -07004596 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004597 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07004598 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07004599 }
Wang Chendad9b332008-06-18 01:48:28 -07004600 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07004601}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004602EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07004603
4604/*
4605 * Upload unicast and multicast address lists to device and
4606 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08004607 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07004608 * are present.
4609 */
4610void __dev_set_rx_mode(struct net_device *dev)
4611{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004612 const struct net_device_ops *ops = dev->netdev_ops;
4613
Patrick McHardy4417da62007-06-27 01:28:10 -07004614 /* dev_open will call this function so the list will stay sane. */
4615 if (!(dev->flags&IFF_UP))
4616 return;
4617
4618 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09004619 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07004620
Jiri Pirko01789342011-08-16 06:29:00 +00004621 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07004622 /* Unicast addresses changes may only happen under the rtnl,
4623 * therefore calling __dev_set_promiscuity here is safe.
4624 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08004625 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07004626 __dev_set_promiscuity(dev, 1);
Joe Perches2d348d12011-07-25 16:17:35 -07004627 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08004628 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07004629 __dev_set_promiscuity(dev, -1);
Joe Perches2d348d12011-07-25 16:17:35 -07004630 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07004631 }
Patrick McHardy4417da62007-06-27 01:28:10 -07004632 }
Jiri Pirko01789342011-08-16 06:29:00 +00004633
4634 if (ops->ndo_set_rx_mode)
4635 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004636}
4637
4638void dev_set_rx_mode(struct net_device *dev)
4639{
David S. Millerb9e40852008-07-15 00:15:08 -07004640 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004641 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07004642 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004643}
4644
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004645/**
4646 * dev_get_flags - get flags reported to userspace
4647 * @dev: device
4648 *
4649 * Get the combination of flag bits exported through APIs to userspace.
4650 */
Eric Dumazet95c96172012-04-15 05:58:06 +00004651unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004652{
Eric Dumazet95c96172012-04-15 05:58:06 +00004653 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004654
4655 flags = (dev->flags & ~(IFF_PROMISC |
4656 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004657 IFF_RUNNING |
4658 IFF_LOWER_UP |
4659 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004660 (dev->gflags & (IFF_PROMISC |
4661 IFF_ALLMULTI));
4662
Stefan Rompfb00055a2006-03-20 17:09:11 -08004663 if (netif_running(dev)) {
4664 if (netif_oper_up(dev))
4665 flags |= IFF_RUNNING;
4666 if (netif_carrier_ok(dev))
4667 flags |= IFF_LOWER_UP;
4668 if (netif_dormant(dev))
4669 flags |= IFF_DORMANT;
4670 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004671
4672 return flags;
4673}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004674EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004675
Patrick McHardybd380812010-02-26 06:34:53 +00004676int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004677{
Eric Dumazetb536db92011-11-30 21:42:26 +00004678 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00004679 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004680
Patrick McHardy24023452007-07-14 18:51:31 -07004681 ASSERT_RTNL();
4682
Linus Torvalds1da177e2005-04-16 15:20:36 -07004683 /*
4684 * Set the flags on our device.
4685 */
4686
4687 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4688 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4689 IFF_AUTOMEDIA)) |
4690 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4691 IFF_ALLMULTI));
4692
4693 /*
4694 * Load in the correct multicast list now the flags have changed.
4695 */
4696
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004697 if ((old_flags ^ flags) & IFF_MULTICAST)
4698 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004699
Patrick McHardy4417da62007-06-27 01:28:10 -07004700 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004701
4702 /*
4703 * Have we downed the interface. We handle IFF_UP ourselves
4704 * according to user attempts to set it, rather than blindly
4705 * setting it.
4706 */
4707
4708 ret = 0;
4709 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
Patrick McHardybd380812010-02-26 06:34:53 +00004710 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004711
4712 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004713 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004714 }
4715
Linus Torvalds1da177e2005-04-16 15:20:36 -07004716 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004717 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4718
Linus Torvalds1da177e2005-04-16 15:20:36 -07004719 dev->gflags ^= IFF_PROMISC;
4720 dev_set_promiscuity(dev, inc);
4721 }
4722
4723 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4724 is important. Some (broken) drivers set IFF_PROMISC, when
4725 IFF_ALLMULTI is requested not asking us and not reporting.
4726 */
4727 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004728 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4729
Linus Torvalds1da177e2005-04-16 15:20:36 -07004730 dev->gflags ^= IFF_ALLMULTI;
4731 dev_set_allmulti(dev, inc);
4732 }
4733
Patrick McHardybd380812010-02-26 06:34:53 +00004734 return ret;
4735}
4736
4737void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4738{
4739 unsigned int changes = dev->flags ^ old_flags;
4740
4741 if (changes & IFF_UP) {
4742 if (dev->flags & IFF_UP)
4743 call_netdevice_notifiers(NETDEV_UP, dev);
4744 else
4745 call_netdevice_notifiers(NETDEV_DOWN, dev);
4746 }
4747
4748 if (dev->flags & IFF_UP &&
4749 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4750 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4751}
4752
4753/**
4754 * dev_change_flags - change device settings
4755 * @dev: device
4756 * @flags: device state flags
4757 *
4758 * Change settings on device based state flags. The flags are
4759 * in the userspace exported format.
4760 */
Eric Dumazetb536db92011-11-30 21:42:26 +00004761int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00004762{
Eric Dumazetb536db92011-11-30 21:42:26 +00004763 int ret;
4764 unsigned int changes, old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00004765
4766 ret = __dev_change_flags(dev, flags);
4767 if (ret < 0)
4768 return ret;
4769
4770 changes = old_flags ^ dev->flags;
Thomas Graf7c355f52007-06-05 16:03:03 -07004771 if (changes)
4772 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004773
Patrick McHardybd380812010-02-26 06:34:53 +00004774 __dev_notify_flags(dev, old_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004775 return ret;
4776}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004777EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004778
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004779/**
4780 * dev_set_mtu - Change maximum transfer unit
4781 * @dev: device
4782 * @new_mtu: new transfer unit
4783 *
4784 * Change the maximum transfer size of the network device.
4785 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004786int dev_set_mtu(struct net_device *dev, int new_mtu)
4787{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004788 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004789 int err;
4790
4791 if (new_mtu == dev->mtu)
4792 return 0;
4793
4794 /* MTU must be positive. */
4795 if (new_mtu < 0)
4796 return -EINVAL;
4797
4798 if (!netif_device_present(dev))
4799 return -ENODEV;
4800
4801 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004802 if (ops->ndo_change_mtu)
4803 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804 else
4805 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004806
Jiri Pirkoe3d8fab2012-12-03 01:16:32 +00004807 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004808 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004809 return err;
4810}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004811EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004812
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004813/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00004814 * dev_set_group - Change group this device belongs to
4815 * @dev: device
4816 * @new_group: group this device should belong to
4817 */
4818void dev_set_group(struct net_device *dev, int new_group)
4819{
4820 dev->group = new_group;
4821}
4822EXPORT_SYMBOL(dev_set_group);
4823
4824/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004825 * dev_set_mac_address - Change Media Access Control Address
4826 * @dev: device
4827 * @sa: new address
4828 *
4829 * Change the hardware (MAC) address of the device
4830 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004831int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4832{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004833 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004834 int err;
4835
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004836 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004837 return -EOPNOTSUPP;
4838 if (sa->sa_family != dev->type)
4839 return -EINVAL;
4840 if (!netif_device_present(dev))
4841 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004842 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00004843 if (err)
4844 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00004845 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00004846 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04004847 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00004848 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004849}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004850EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004851
Jiri Pirko4bf84c32012-12-27 23:49:37 +00004852/**
4853 * dev_change_carrier - Change device carrier
4854 * @dev: device
Randy Dunlap691b3b72013-03-04 12:32:43 +00004855 * @new_carrier: new value
Jiri Pirko4bf84c32012-12-27 23:49:37 +00004856 *
4857 * Change device carrier
4858 */
4859int dev_change_carrier(struct net_device *dev, bool new_carrier)
4860{
4861 const struct net_device_ops *ops = dev->netdev_ops;
4862
4863 if (!ops->ndo_change_carrier)
4864 return -EOPNOTSUPP;
4865 if (!netif_device_present(dev))
4866 return -ENODEV;
4867 return ops->ndo_change_carrier(dev, new_carrier);
4868}
4869EXPORT_SYMBOL(dev_change_carrier);
4870
Linus Torvalds1da177e2005-04-16 15:20:36 -07004871/**
4872 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004873 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004874 *
4875 * Returns a suitable unique value for a new device interface
4876 * number. The caller must hold the rtnl semaphore or the
4877 * dev_base_lock to be sure it remains unique.
4878 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004879static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004880{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00004881 int ifindex = net->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004882 for (;;) {
4883 if (++ifindex <= 0)
4884 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004885 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00004886 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004887 }
4888}
4889
Linus Torvalds1da177e2005-04-16 15:20:36 -07004890/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004891static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004892
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004893static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004894{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004895 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004896}
4897
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004898static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004899{
Krishna Kumare93737b2009-12-08 22:26:02 +00004900 struct net_device *dev, *tmp;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004901
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004902 BUG_ON(dev_boot_phase);
4903 ASSERT_RTNL();
4904
Krishna Kumare93737b2009-12-08 22:26:02 +00004905 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004906 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00004907 * for initialization unwind. Remove those
4908 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004909 */
4910 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00004911 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
4912 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004913
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004914 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00004915 list_del(&dev->unreg_list);
4916 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004917 }
Eric Dumazet449f4542011-05-19 12:24:16 +00004918 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004919 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00004920 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004921
Octavian Purdila44345722010-12-13 12:44:07 +00004922 /* If device is running, close it first. */
4923 dev_close_many(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004924
Octavian Purdila44345722010-12-13 12:44:07 +00004925 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004926 /* And unlink it from device chain. */
4927 unlist_netdevice(dev);
4928
4929 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004930 }
4931
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004932 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004933
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004934 list_for_each_entry(dev, head, unreg_list) {
4935 /* Shutdown queueing discipline. */
4936 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004937
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004938
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004939 /* Notify protocols, that we are about to destroy
4940 this device. They should clean all the things.
4941 */
4942 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4943
Patrick McHardya2835762010-02-26 06:34:51 +00004944 if (!dev->rtnl_link_ops ||
4945 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4946 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4947
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004948 /*
4949 * Flush the unicast and multicast chains
4950 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00004951 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004952 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004953
4954 if (dev->netdev_ops->ndo_uninit)
4955 dev->netdev_ops->ndo_uninit(dev);
4956
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004957 /* Notifier chain MUST detach us all upper devices. */
4958 WARN_ON(netdev_has_any_upper_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004959
4960 /* Remove entries from kobject tree */
4961 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00004962#ifdef CONFIG_XPS
4963 /* Remove XPS queueing entries */
4964 netif_reset_xps_queues_gt(dev, 0);
4965#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004966 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004967
Eric W. Biederman850a5452011-10-13 22:25:23 +00004968 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004969
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00004970 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004971 dev_put(dev);
4972}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004973
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004974static void rollback_registered(struct net_device *dev)
4975{
4976 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004977
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004978 list_add(&dev->unreg_list, &single);
4979 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00004980 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004981}
4982
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004983static netdev_features_t netdev_fix_features(struct net_device *dev,
4984 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07004985{
Michał Mirosław57422dc2011-01-22 12:14:12 +00004986 /* Fix illegal checksum combinations */
4987 if ((features & NETIF_F_HW_CSUM) &&
4988 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04004989 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00004990 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4991 }
4992
Herbert Xub63365a2008-10-23 01:11:29 -07004993 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00004994 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04004995 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00004996 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07004997 }
4998
Pravin B Shelarec5f0612013-03-07 09:28:01 +00004999 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
5000 !(features & NETIF_F_IP_CSUM)) {
5001 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
5002 features &= ~NETIF_F_TSO;
5003 features &= ~NETIF_F_TSO_ECN;
5004 }
5005
5006 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
5007 !(features & NETIF_F_IPV6_CSUM)) {
5008 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
5009 features &= ~NETIF_F_TSO6;
5010 }
5011
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00005012 /* TSO ECN requires that TSO is present as well. */
5013 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5014 features &= ~NETIF_F_TSO_ECN;
5015
Michał Mirosław212b5732011-02-15 16:59:16 +00005016 /* Software GSO depends on SG. */
5017 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005018 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00005019 features &= ~NETIF_F_GSO;
5020 }
5021
Michał Mirosławacd11302011-01-24 15:45:15 -08005022 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07005023 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00005024 /* maybe split UFO into V4 and V6? */
5025 if (!((features & NETIF_F_GEN_CSUM) ||
5026 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5027 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005028 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005029 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005030 features &= ~NETIF_F_UFO;
5031 }
5032
5033 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005034 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005035 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005036 features &= ~NETIF_F_UFO;
5037 }
5038 }
5039
5040 return features;
5041}
Herbert Xub63365a2008-10-23 01:11:29 -07005042
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005043int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00005044{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005045 netdev_features_t features;
Michał Mirosław5455c692011-02-15 16:59:17 +00005046 int err = 0;
5047
Michał Mirosław87267482011-04-12 09:56:38 +00005048 ASSERT_RTNL();
5049
Michał Mirosław5455c692011-02-15 16:59:17 +00005050 features = netdev_get_wanted_features(dev);
5051
5052 if (dev->netdev_ops->ndo_fix_features)
5053 features = dev->netdev_ops->ndo_fix_features(dev, features);
5054
5055 /* driver might be less strict about feature dependencies */
5056 features = netdev_fix_features(dev, features);
5057
5058 if (dev->features == features)
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005059 return 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00005060
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005061 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5062 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00005063
5064 if (dev->netdev_ops->ndo_set_features)
5065 err = dev->netdev_ops->ndo_set_features(dev, features);
5066
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005067 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00005068 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005069 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5070 err, &features, &dev->features);
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005071 return -1;
5072 }
5073
5074 if (!err)
5075 dev->features = features;
5076
5077 return 1;
5078}
5079
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005080/**
5081 * netdev_update_features - recalculate device features
5082 * @dev: the device to check
5083 *
5084 * Recalculate dev->features set and send notifications if it
5085 * has changed. Should be called after driver or hardware dependent
5086 * conditions might have changed that influence the features.
5087 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005088void netdev_update_features(struct net_device *dev)
5089{
5090 if (__netdev_update_features(dev))
5091 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00005092}
5093EXPORT_SYMBOL(netdev_update_features);
5094
Linus Torvalds1da177e2005-04-16 15:20:36 -07005095/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005096 * netdev_change_features - recalculate device features
5097 * @dev: the device to check
5098 *
5099 * Recalculate dev->features set and send notifications even
5100 * if they have not changed. Should be called instead of
5101 * netdev_update_features() if also dev->vlan_features might
5102 * have changed to allow the changes to be propagated to stacked
5103 * VLAN devices.
5104 */
5105void netdev_change_features(struct net_device *dev)
5106{
5107 __netdev_update_features(dev);
5108 netdev_features_change(dev);
5109}
5110EXPORT_SYMBOL(netdev_change_features);
5111
5112/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005113 * netif_stacked_transfer_operstate - transfer operstate
5114 * @rootdev: the root or lower level device to transfer state from
5115 * @dev: the device to transfer operstate to
5116 *
5117 * Transfer operational state from root to device. This is normally
5118 * called when a stacking relationship exists between the root
5119 * device and the device(a leaf device).
5120 */
5121void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5122 struct net_device *dev)
5123{
5124 if (rootdev->operstate == IF_OPER_DORMANT)
5125 netif_dormant_on(dev);
5126 else
5127 netif_dormant_off(dev);
5128
5129 if (netif_carrier_ok(rootdev)) {
5130 if (!netif_carrier_ok(dev))
5131 netif_carrier_on(dev);
5132 } else {
5133 if (netif_carrier_ok(dev))
5134 netif_carrier_off(dev);
5135 }
5136}
5137EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5138
Tom Herbertbf264142010-11-26 08:36:09 +00005139#ifdef CONFIG_RPS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005140static int netif_alloc_rx_queues(struct net_device *dev)
5141{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005142 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00005143 struct netdev_rx_queue *rx;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005144
Tom Herbertbd25fa72010-10-18 18:00:16 +00005145 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005146
Tom Herbertbd25fa72010-10-18 18:00:16 +00005147 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00005148 if (!rx)
Tom Herbertbd25fa72010-10-18 18:00:16 +00005149 return -ENOMEM;
Joe Perches62b59422013-02-04 16:48:16 +00005150
Tom Herbertbd25fa72010-10-18 18:00:16 +00005151 dev->_rx = rx;
5152
Tom Herbertbd25fa72010-10-18 18:00:16 +00005153 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00005154 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005155 return 0;
5156}
Tom Herbertbf264142010-11-26 08:36:09 +00005157#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005158
Changli Gaoaa942102010-12-04 02:31:41 +00005159static void netdev_init_one_queue(struct net_device *dev,
5160 struct netdev_queue *queue, void *_unused)
5161{
5162 /* Initialize queue lock */
5163 spin_lock_init(&queue->_xmit_lock);
5164 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5165 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00005166 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00005167 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00005168#ifdef CONFIG_BQL
5169 dql_init(&queue->dql, HZ);
5170#endif
Changli Gaoaa942102010-12-04 02:31:41 +00005171}
5172
Tom Herberte6484932010-10-18 18:04:39 +00005173static int netif_alloc_netdev_queues(struct net_device *dev)
5174{
5175 unsigned int count = dev->num_tx_queues;
5176 struct netdev_queue *tx;
5177
5178 BUG_ON(count < 1);
5179
5180 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00005181 if (!tx)
Tom Herberte6484932010-10-18 18:04:39 +00005182 return -ENOMEM;
Joe Perches62b59422013-02-04 16:48:16 +00005183
Tom Herberte6484932010-10-18 18:04:39 +00005184 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00005185
Tom Herberte6484932010-10-18 18:04:39 +00005186 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5187 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00005188
5189 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00005190}
5191
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005192/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005193 * register_netdevice - register a network device
5194 * @dev: device to register
5195 *
5196 * Take a completed network device structure and add it to the kernel
5197 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5198 * chain. 0 is returned on success. A negative errno code is returned
5199 * on a failure to set up the device, or if the name is a duplicate.
5200 *
5201 * Callers must hold the rtnl semaphore. You may want
5202 * register_netdev() instead of this.
5203 *
5204 * BUGS:
5205 * The locking appears insufficient to guarantee two parallel registers
5206 * will not get the same name.
5207 */
5208
5209int register_netdevice(struct net_device *dev)
5210{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005211 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005212 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005213
5214 BUG_ON(dev_boot_phase);
5215 ASSERT_RTNL();
5216
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005217 might_sleep();
5218
Linus Torvalds1da177e2005-04-16 15:20:36 -07005219 /* When net_device's are persistent, this will be fatal. */
5220 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005221 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005222
David S. Millerf1f28aa2008-07-15 00:08:33 -07005223 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07005224 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005225
Linus Torvalds1da177e2005-04-16 15:20:36 -07005226 dev->iflink = -1;
5227
Gao feng828de4f2012-09-13 20:58:27 +00005228 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00005229 if (ret < 0)
5230 goto out;
5231
Linus Torvalds1da177e2005-04-16 15:20:36 -07005232 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005233 if (dev->netdev_ops->ndo_init) {
5234 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005235 if (ret) {
5236 if (ret > 0)
5237 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08005238 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005239 }
5240 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005241
Patrick McHardyf6469682013-04-19 02:04:27 +00005242 if (((dev->hw_features | dev->features) &
5243 NETIF_F_HW_VLAN_CTAG_FILTER) &&
Michał Mirosławd2ed2732013-01-29 15:14:16 +00005244 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
5245 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
5246 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
5247 ret = -EINVAL;
5248 goto err_uninit;
5249 }
5250
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00005251 ret = -EBUSY;
5252 if (!dev->ifindex)
5253 dev->ifindex = dev_new_index(net);
5254 else if (__dev_get_by_index(net, dev->ifindex))
5255 goto err_uninit;
5256
Linus Torvalds1da177e2005-04-16 15:20:36 -07005257 if (dev->iflink == -1)
5258 dev->iflink = dev->ifindex;
5259
Michał Mirosław5455c692011-02-15 16:59:17 +00005260 /* Transfer changeable features to wanted_features and enable
5261 * software offloads (GSO and GRO).
5262 */
5263 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00005264 dev->features |= NETIF_F_SOFT_FEATURES;
5265 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005266
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07005267 /* Turn on no cache copy if HW is doing checksum */
Michał Mirosław34324dc2011-11-15 15:29:55 +00005268 if (!(dev->flags & IFF_LOOPBACK)) {
5269 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5270 if (dev->features & NETIF_F_ALL_CSUM) {
5271 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5272 dev->features |= NETIF_F_NOCACHE_COPY;
5273 }
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07005274 }
5275
Michał Mirosław1180e7d2011-07-14 14:41:11 -07005276 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00005277 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07005278 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00005279
Pravin B Shelaree579672013-03-07 09:28:08 +00005280 /* Make NETIF_F_SG inheritable to tunnel devices.
5281 */
5282 dev->hw_enc_features |= NETIF_F_SG;
5283
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00005284 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5285 ret = notifier_to_errno(ret);
5286 if (ret)
5287 goto err_uninit;
5288
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005289 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005290 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005291 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005292 dev->reg_state = NETREG_REGISTERED;
5293
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005294 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00005295
Linus Torvalds1da177e2005-04-16 15:20:36 -07005296 /*
5297 * Default initial state at registry is that the
5298 * device is present.
5299 */
5300
5301 set_bit(__LINK_STATE_PRESENT, &dev->state);
5302
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01005303 linkwatch_init_dev(dev);
5304
Linus Torvalds1da177e2005-04-16 15:20:36 -07005305 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005306 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005307 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04005308 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005309
Jiri Pirko948b3372013-01-08 01:38:25 +00005310 /* If the device has permanent device address, driver should
5311 * set dev_addr and also addr_assign_type should be set to
5312 * NET_ADDR_PERM (default value).
5313 */
5314 if (dev->addr_assign_type == NET_ADDR_PERM)
5315 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5316
Linus Torvalds1da177e2005-04-16 15:20:36 -07005317 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005318 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07005319 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005320 if (ret) {
5321 rollback_registered(dev);
5322 dev->reg_state = NETREG_UNREGISTERED;
5323 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005324 /*
5325 * Prevent userspace races by waiting until the network
5326 * device is fully setup before sending notifications.
5327 */
Patrick McHardya2835762010-02-26 06:34:51 +00005328 if (!dev->rtnl_link_ops ||
5329 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5330 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005331
5332out:
5333 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005334
5335err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005336 if (dev->netdev_ops->ndo_uninit)
5337 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005338 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005339}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005340EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005341
5342/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005343 * init_dummy_netdev - init a dummy network device for NAPI
5344 * @dev: device to init
5345 *
5346 * This takes a network device structure and initialize the minimum
5347 * amount of fields so it can be used to schedule NAPI polls without
5348 * registering a full blown interface. This is to be used by drivers
5349 * that need to tie several hardware interfaces to a single NAPI
5350 * poll scheduler due to HW limitations.
5351 */
5352int init_dummy_netdev(struct net_device *dev)
5353{
5354 /* Clear everything. Note we don't initialize spinlocks
5355 * are they aren't supposed to be taken by any of the
5356 * NAPI code and this dummy netdev is supposed to be
5357 * only ever used for NAPI polls
5358 */
5359 memset(dev, 0, sizeof(struct net_device));
5360
5361 /* make sure we BUG if trying to hit standard
5362 * register/unregister code path
5363 */
5364 dev->reg_state = NETREG_DUMMY;
5365
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005366 /* NAPI wants this */
5367 INIT_LIST_HEAD(&dev->napi_list);
5368
5369 /* a dummy interface is started by default */
5370 set_bit(__LINK_STATE_PRESENT, &dev->state);
5371 set_bit(__LINK_STATE_START, &dev->state);
5372
Eric Dumazet29b44332010-10-11 10:22:12 +00005373 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5374 * because users of this 'device' dont need to change
5375 * its refcount.
5376 */
5377
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005378 return 0;
5379}
5380EXPORT_SYMBOL_GPL(init_dummy_netdev);
5381
5382
5383/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005384 * register_netdev - register a network device
5385 * @dev: device to register
5386 *
5387 * Take a completed network device structure and add it to the kernel
5388 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5389 * chain. 0 is returned on success. A negative errno code is returned
5390 * on a failure to set up the device, or if the name is a duplicate.
5391 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07005392 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07005393 * and expands the device name if you passed a format string to
5394 * alloc_netdev.
5395 */
5396int register_netdev(struct net_device *dev)
5397{
5398 int err;
5399
5400 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005401 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005402 rtnl_unlock();
5403 return err;
5404}
5405EXPORT_SYMBOL(register_netdev);
5406
Eric Dumazet29b44332010-10-11 10:22:12 +00005407int netdev_refcnt_read(const struct net_device *dev)
5408{
5409 int i, refcnt = 0;
5410
5411 for_each_possible_cpu(i)
5412 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5413 return refcnt;
5414}
5415EXPORT_SYMBOL(netdev_refcnt_read);
5416
Ben Hutchings2c530402012-07-10 10:55:09 +00005417/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005418 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00005419 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005420 *
5421 * This is called when unregistering network devices.
5422 *
5423 * Any protocol or device that holds a reference should register
5424 * for netdevice notification, and cleanup and put back the
5425 * reference if they receive an UNREGISTER event.
5426 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005427 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005428 */
5429static void netdev_wait_allrefs(struct net_device *dev)
5430{
5431 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00005432 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005433
Eric Dumazete014deb2009-11-17 05:59:21 +00005434 linkwatch_forget_dev(dev);
5435
Linus Torvalds1da177e2005-04-16 15:20:36 -07005436 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00005437 refcnt = netdev_refcnt_read(dev);
5438
5439 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005440 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005441 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005442
5443 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005444 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005445
Eric Dumazet748e2d92012-08-22 21:50:59 +00005446 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005447 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00005448 rtnl_lock();
5449
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005450 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005451 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5452 &dev->state)) {
5453 /* We must not have linkwatch events
5454 * pending on unregister. If this
5455 * happens, we simply run the queue
5456 * unscheduled, resulting in a noop
5457 * for this device.
5458 */
5459 linkwatch_run_queue();
5460 }
5461
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005462 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005463
5464 rebroadcast_time = jiffies;
5465 }
5466
5467 msleep(250);
5468
Eric Dumazet29b44332010-10-11 10:22:12 +00005469 refcnt = netdev_refcnt_read(dev);
5470
Linus Torvalds1da177e2005-04-16 15:20:36 -07005471 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005472 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5473 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005474 warning_time = jiffies;
5475 }
5476 }
5477}
5478
5479/* The sequence is:
5480 *
5481 * rtnl_lock();
5482 * ...
5483 * register_netdevice(x1);
5484 * register_netdevice(x2);
5485 * ...
5486 * unregister_netdevice(y1);
5487 * unregister_netdevice(y2);
5488 * ...
5489 * rtnl_unlock();
5490 * free_netdev(y1);
5491 * free_netdev(y2);
5492 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07005493 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07005494 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005495 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07005496 * without deadlocking with linkwatch via keventd.
5497 * 2) Since we run with the RTNL semaphore not held, we can sleep
5498 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07005499 *
5500 * We must not return until all unregister events added during
5501 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005502 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005503void netdev_run_todo(void)
5504{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005505 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005506
Linus Torvalds1da177e2005-04-16 15:20:36 -07005507 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005508 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07005509
5510 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005511
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005512
5513 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00005514 if (!list_empty(&list))
5515 rcu_barrier();
5516
Linus Torvalds1da177e2005-04-16 15:20:36 -07005517 while (!list_empty(&list)) {
5518 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00005519 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005520 list_del(&dev->todo_list);
5521
Eric Dumazet748e2d92012-08-22 21:50:59 +00005522 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005523 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00005524 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005525
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005526 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005527 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07005528 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005529 dump_stack();
5530 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005531 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005532
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005533 dev->reg_state = NETREG_UNREGISTERED;
5534
Changli Gao152102c2010-03-30 20:16:22 +00005535 on_each_cpu(flush_backlog, dev, 1);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005536
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005537 netdev_wait_allrefs(dev);
5538
5539 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00005540 BUG_ON(netdev_refcnt_read(dev));
Eric Dumazet33d480c2011-08-11 19:30:52 +00005541 WARN_ON(rcu_access_pointer(dev->ip_ptr));
5542 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07005543 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005544
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005545 if (dev->destructor)
5546 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07005547
5548 /* Free network device */
5549 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005550 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005551}
5552
Ben Hutchings3cfde792010-07-09 09:11:52 +00005553/* Convert net_device_stats to rtnl_link_stats64. They have the same
5554 * fields in the same order, with only the type differing.
5555 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00005556void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5557 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00005558{
5559#if BITS_PER_LONG == 64
Eric Dumazet77a1abf2012-03-05 04:50:09 +00005560 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5561 memcpy(stats64, netdev_stats, sizeof(*stats64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00005562#else
5563 size_t i, n = sizeof(*stats64) / sizeof(u64);
5564 const unsigned long *src = (const unsigned long *)netdev_stats;
5565 u64 *dst = (u64 *)stats64;
5566
5567 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5568 sizeof(*stats64) / sizeof(u64));
5569 for (i = 0; i < n; i++)
5570 dst[i] = src[i];
5571#endif
5572}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00005573EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00005574
Eric Dumazetd83345a2009-11-16 03:36:51 +00005575/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005576 * dev_get_stats - get network device statistics
5577 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07005578 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005579 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00005580 * Get network statistics from device. Return @storage.
5581 * The device driver may provide its own method by setting
5582 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5583 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005584 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00005585struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5586 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005587{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005588 const struct net_device_ops *ops = dev->netdev_ops;
5589
Eric Dumazet28172732010-07-07 14:58:56 -07005590 if (ops->ndo_get_stats64) {
5591 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00005592 ops->ndo_get_stats64(dev, storage);
5593 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00005594 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00005595 } else {
5596 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07005597 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00005598 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet28172732010-07-07 14:58:56 -07005599 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07005600}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005601EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005602
Eric Dumazet24824a02010-10-02 06:11:55 +00005603struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07005604{
Eric Dumazet24824a02010-10-02 06:11:55 +00005605 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07005606
Eric Dumazet24824a02010-10-02 06:11:55 +00005607#ifdef CONFIG_NET_CLS_ACT
5608 if (queue)
5609 return queue;
5610 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
5611 if (!queue)
5612 return NULL;
5613 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet24824a02010-10-02 06:11:55 +00005614 queue->qdisc = &noop_qdisc;
5615 queue->qdisc_sleeping = &noop_qdisc;
5616 rcu_assign_pointer(dev->ingress_queue, queue);
5617#endif
5618 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07005619}
5620
Eric Dumazet2c60db02012-09-16 09:17:26 +00005621static const struct ethtool_ops default_ethtool_ops;
5622
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00005623void netdev_set_default_ethtool_ops(struct net_device *dev,
5624 const struct ethtool_ops *ops)
5625{
5626 if (dev->ethtool_ops == &default_ethtool_ops)
5627 dev->ethtool_ops = ops;
5628}
5629EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
5630
Linus Torvalds1da177e2005-04-16 15:20:36 -07005631/**
Tom Herbert36909ea2011-01-09 19:36:31 +00005632 * alloc_netdev_mqs - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005633 * @sizeof_priv: size of private data to allocate space for
5634 * @name: device name format string
5635 * @setup: callback to initialize device
Tom Herbert36909ea2011-01-09 19:36:31 +00005636 * @txqs: the number of TX subqueues to allocate
5637 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005638 *
5639 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005640 * and performs basic initialization. Also allocates subquue structs
Tom Herbert36909ea2011-01-09 19:36:31 +00005641 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005642 */
Tom Herbert36909ea2011-01-09 19:36:31 +00005643struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5644 void (*setup)(struct net_device *),
5645 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005646{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005647 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005648 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005649 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005650
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005651 BUG_ON(strlen(name) >= sizeof(dev->name));
5652
Tom Herbert36909ea2011-01-09 19:36:31 +00005653 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005654 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00005655 return NULL;
5656 }
5657
Tom Herbert36909ea2011-01-09 19:36:31 +00005658#ifdef CONFIG_RPS
5659 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005660 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00005661 return NULL;
5662 }
5663#endif
5664
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005665 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005666 if (sizeof_priv) {
5667 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005668 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005669 alloc_size += sizeof_priv;
5670 }
5671 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005672 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005673
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005674 p = kzalloc(alloc_size, GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00005675 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005676 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005677
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005678 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005679 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005680
Eric Dumazet29b44332010-10-11 10:22:12 +00005681 dev->pcpu_refcnt = alloc_percpu(int);
5682 if (!dev->pcpu_refcnt)
Tom Herberte6484932010-10-18 18:04:39 +00005683 goto free_p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005684
Linus Torvalds1da177e2005-04-16 15:20:36 -07005685 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00005686 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005687
Jiri Pirko22bedad32010-04-01 21:22:57 +00005688 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005689 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00005690
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005691 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005692
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005693 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00005694 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005695
Herbert Xud565b0a2008-12-15 23:38:52 -08005696 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005697 INIT_LIST_HEAD(&dev->unreg_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00005698 INIT_LIST_HEAD(&dev->link_watch_list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005699 INIT_LIST_HEAD(&dev->upper_dev_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005700 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005701 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08005702
5703 dev->num_tx_queues = txqs;
5704 dev->real_num_tx_queues = txqs;
5705 if (netif_alloc_netdev_queues(dev))
5706 goto free_all;
5707
5708#ifdef CONFIG_RPS
5709 dev->num_rx_queues = rxqs;
5710 dev->real_num_rx_queues = rxqs;
5711 if (netif_alloc_rx_queues(dev))
5712 goto free_all;
5713#endif
5714
Linus Torvalds1da177e2005-04-16 15:20:36 -07005715 strcpy(dev->name, name);
Vlad Dogarucbda10f2011-01-13 23:38:30 +00005716 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00005717 if (!dev->ethtool_ops)
5718 dev->ethtool_ops = &default_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005719 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005720
David S. Miller8d3bdbd2011-02-08 15:02:50 -08005721free_all:
5722 free_netdev(dev);
5723 return NULL;
5724
Eric Dumazet29b44332010-10-11 10:22:12 +00005725free_pcpu:
5726 free_percpu(dev->pcpu_refcnt);
Tom Herberted9af2e2010-11-09 10:47:30 +00005727 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00005728#ifdef CONFIG_RPS
5729 kfree(dev->_rx);
5730#endif
5731
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005732free_p:
5733 kfree(p);
5734 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005735}
Tom Herbert36909ea2011-01-09 19:36:31 +00005736EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005737
5738/**
5739 * free_netdev - free network device
5740 * @dev: device
5741 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005742 * This function does the last stage of destroying an allocated device
5743 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005744 * If this is the last reference then it will be freed.
5745 */
5746void free_netdev(struct net_device *dev)
5747{
Herbert Xud565b0a2008-12-15 23:38:52 -08005748 struct napi_struct *p, *n;
5749
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005750 release_net(dev_net(dev));
5751
David S. Millere8a04642008-07-17 00:34:19 -07005752 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00005753#ifdef CONFIG_RPS
5754 kfree(dev->_rx);
5755#endif
David S. Millere8a04642008-07-17 00:34:19 -07005756
Eric Dumazet33d480c2011-08-11 19:30:52 +00005757 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00005758
Jiri Pirkof001fde2009-05-05 02:48:28 +00005759 /* Flush device addresses */
5760 dev_addr_flush(dev);
5761
Herbert Xud565b0a2008-12-15 23:38:52 -08005762 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5763 netif_napi_del(p);
5764
Eric Dumazet29b44332010-10-11 10:22:12 +00005765 free_percpu(dev->pcpu_refcnt);
5766 dev->pcpu_refcnt = NULL;
5767
Stephen Hemminger3041a062006-05-26 13:25:24 -07005768 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005769 if (dev->reg_state == NETREG_UNINITIALIZED) {
5770 kfree((char *)dev - dev->padded);
5771 return;
5772 }
5773
5774 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5775 dev->reg_state = NETREG_RELEASED;
5776
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005777 /* will free via device release */
5778 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005779}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005780EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005781
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005782/**
5783 * synchronize_net - Synchronize with packet receive processing
5784 *
5785 * Wait for packets currently being received to be done.
5786 * Does not block later packets from starting.
5787 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005788void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005789{
5790 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00005791 if (rtnl_is_locked())
5792 synchronize_rcu_expedited();
5793 else
5794 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005795}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005796EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005797
5798/**
Eric Dumazet44a08732009-10-27 07:03:04 +00005799 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07005800 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00005801 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08005802 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005803 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005804 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00005805 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005806 *
5807 * Callers must hold the rtnl semaphore. You may want
5808 * unregister_netdev() instead of this.
5809 */
5810
Eric Dumazet44a08732009-10-27 07:03:04 +00005811void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005812{
Herbert Xua6620712007-12-12 19:21:56 -08005813 ASSERT_RTNL();
5814
Eric Dumazet44a08732009-10-27 07:03:04 +00005815 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005816 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00005817 } else {
5818 rollback_registered(dev);
5819 /* Finish processing unregister after unlock */
5820 net_set_todo(dev);
5821 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005822}
Eric Dumazet44a08732009-10-27 07:03:04 +00005823EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005824
5825/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005826 * unregister_netdevice_many - unregister many devices
5827 * @head: list of devices
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005828 */
5829void unregister_netdevice_many(struct list_head *head)
5830{
5831 struct net_device *dev;
5832
5833 if (!list_empty(head)) {
5834 rollback_registered_many(head);
5835 list_for_each_entry(dev, head, unreg_list)
5836 net_set_todo(dev);
5837 }
5838}
Eric Dumazet63c80992009-10-27 07:06:49 +00005839EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005840
5841/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005842 * unregister_netdev - remove device from the kernel
5843 * @dev: device
5844 *
5845 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005846 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005847 *
5848 * This is just a wrapper for unregister_netdevice that takes
5849 * the rtnl semaphore. In general you want to use this and not
5850 * unregister_netdevice.
5851 */
5852void unregister_netdev(struct net_device *dev)
5853{
5854 rtnl_lock();
5855 unregister_netdevice(dev);
5856 rtnl_unlock();
5857}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005858EXPORT_SYMBOL(unregister_netdev);
5859
Eric W. Biedermance286d32007-09-12 13:53:49 +02005860/**
5861 * dev_change_net_namespace - move device to different nethost namespace
5862 * @dev: device
5863 * @net: network namespace
5864 * @pat: If not NULL name pattern to try if the current device name
5865 * is already taken in the destination network namespace.
5866 *
5867 * This function shuts down a device interface and moves it
5868 * to a new network namespace. On success 0 is returned, on
5869 * a failure a netagive errno code is returned.
5870 *
5871 * Callers must hold the rtnl semaphore.
5872 */
5873
5874int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5875{
Eric W. Biedermance286d32007-09-12 13:53:49 +02005876 int err;
5877
5878 ASSERT_RTNL();
5879
5880 /* Don't allow namespace local devices to be moved. */
5881 err = -EINVAL;
5882 if (dev->features & NETIF_F_NETNS_LOCAL)
5883 goto out;
5884
5885 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02005886 if (dev->reg_state != NETREG_REGISTERED)
5887 goto out;
5888
5889 /* Get out if there is nothing todo */
5890 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005891 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005892 goto out;
5893
5894 /* Pick the destination device name, and ensure
5895 * we can use it in the destination network namespace.
5896 */
5897 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00005898 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005899 /* We get here if we can't use the current device name */
5900 if (!pat)
5901 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00005902 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005903 goto out;
5904 }
5905
5906 /*
5907 * And now a mini version of register_netdevice unregister_netdevice.
5908 */
5909
5910 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005911 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005912
5913 /* And unlink it from device chain */
5914 err = -ENODEV;
5915 unlist_netdevice(dev);
5916
5917 synchronize_net();
5918
5919 /* Shutdown queueing discipline. */
5920 dev_shutdown(dev);
5921
5922 /* Notify protocols, that we are about to destroy
5923 this device. They should clean all the things.
David Lamparter3b27e102010-09-17 03:22:19 +00005924
5925 Note that dev->reg_state stays at NETREG_REGISTERED.
5926 This is wanted because this way 8021q and macvlan know
5927 the device is just moving and can keep their slaves up.
Eric W. Biedermance286d32007-09-12 13:53:49 +02005928 */
5929 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00005930 rcu_barrier();
5931 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric W. Biedermand2237d32011-10-21 06:24:20 +00005932 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005933
5934 /*
5935 * Flush the unicast and multicast chains
5936 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005937 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00005938 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005939
Serge Hallyn4e66ae22012-12-03 16:17:12 +00005940 /* Send a netdev-removed uevent to the old namespace */
5941 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
5942
Eric W. Biedermance286d32007-09-12 13:53:49 +02005943 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005944 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005945
Eric W. Biedermance286d32007-09-12 13:53:49 +02005946 /* If there is an ifindex conflict assign a new one */
5947 if (__dev_get_by_index(net, dev->ifindex)) {
5948 int iflink = (dev->iflink == dev->ifindex);
5949 dev->ifindex = dev_new_index(net);
5950 if (iflink)
5951 dev->iflink = dev->ifindex;
5952 }
5953
Serge Hallyn4e66ae22012-12-03 16:17:12 +00005954 /* Send a netdev-add uevent to the new namespace */
5955 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
5956
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005957 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07005958 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005959 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005960
5961 /* Add the device back in the hashes */
5962 list_netdevice(dev);
5963
5964 /* Notify protocols, that a new device appeared. */
5965 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5966
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005967 /*
5968 * Prevent userspace races by waiting until the network
5969 * device is fully setup before sending notifications.
5970 */
5971 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5972
Eric W. Biedermance286d32007-09-12 13:53:49 +02005973 synchronize_net();
5974 err = 0;
5975out:
5976 return err;
5977}
Johannes Berg463d0182009-07-14 00:33:35 +02005978EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005979
Linus Torvalds1da177e2005-04-16 15:20:36 -07005980static int dev_cpu_callback(struct notifier_block *nfb,
5981 unsigned long action,
5982 void *ocpu)
5983{
5984 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005985 struct sk_buff *skb;
5986 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5987 struct softnet_data *sd, *oldsd;
5988
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005989 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005990 return NOTIFY_OK;
5991
5992 local_irq_disable();
5993 cpu = smp_processor_id();
5994 sd = &per_cpu(softnet_data, cpu);
5995 oldsd = &per_cpu(softnet_data, oldcpu);
5996
5997 /* Find end of our completion_queue. */
5998 list_skb = &sd->completion_queue;
5999 while (*list_skb)
6000 list_skb = &(*list_skb)->next;
6001 /* Append completion queue from offline CPU. */
6002 *list_skb = oldsd->completion_queue;
6003 oldsd->completion_queue = NULL;
6004
Linus Torvalds1da177e2005-04-16 15:20:36 -07006005 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00006006 if (oldsd->output_queue) {
6007 *sd->output_queue_tailp = oldsd->output_queue;
6008 sd->output_queue_tailp = oldsd->output_queue_tailp;
6009 oldsd->output_queue = NULL;
6010 oldsd->output_queue_tailp = &oldsd->output_queue;
6011 }
Heiko Carstens264524d2011-06-06 20:50:03 +00006012 /* Append NAPI poll list from offline CPU. */
6013 if (!list_empty(&oldsd->poll_list)) {
6014 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6015 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6016 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006017
6018 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6019 local_irq_enable();
6020
6021 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00006022 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6023 netif_rx(skb);
6024 input_queue_head_incr(oldsd);
6025 }
Tom Herbertfec5e652010-04-16 16:01:27 -07006026 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006027 netif_rx(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00006028 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07006029 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006030
6031 return NOTIFY_OK;
6032}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006033
6034
Herbert Xu7f353bf2007-08-10 15:47:58 -07006035/**
Herbert Xub63365a2008-10-23 01:11:29 -07006036 * netdev_increment_features - increment feature set by one
6037 * @all: current feature set
6038 * @one: new feature set
6039 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07006040 *
6041 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07006042 * @one to the master device with current feature set @all. Will not
6043 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07006044 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006045netdev_features_t netdev_increment_features(netdev_features_t all,
6046 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07006047{
Michał Mirosław1742f182011-04-22 06:31:16 +00006048 if (mask & NETIF_F_GEN_CSUM)
6049 mask |= NETIF_F_ALL_CSUM;
6050 mask |= NETIF_F_VLAN_CHALLENGED;
6051
6052 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6053 all &= one | ~NETIF_F_ALL_FOR_ALL;
6054
Michał Mirosław1742f182011-04-22 06:31:16 +00006055 /* If one device supports hw checksumming, set for all. */
6056 if (all & NETIF_F_GEN_CSUM)
6057 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006058
6059 return all;
6060}
Herbert Xub63365a2008-10-23 01:11:29 -07006061EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006062
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006063static struct hlist_head *netdev_create_hash(void)
6064{
6065 int i;
6066 struct hlist_head *hash;
6067
6068 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6069 if (hash != NULL)
6070 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6071 INIT_HLIST_HEAD(&hash[i]);
6072
6073 return hash;
6074}
6075
Eric W. Biederman881d9662007-09-17 11:56:21 -07006076/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07006077static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006078{
Rustad, Mark D734b6542012-07-18 09:06:07 +00006079 if (net != &init_net)
6080 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07006081
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006082 net->dev_name_head = netdev_create_hash();
6083 if (net->dev_name_head == NULL)
6084 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006085
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006086 net->dev_index_head = netdev_create_hash();
6087 if (net->dev_index_head == NULL)
6088 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006089
6090 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006091
6092err_idx:
6093 kfree(net->dev_name_head);
6094err_name:
6095 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006096}
6097
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006098/**
6099 * netdev_drivername - network driver for the device
6100 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006101 *
6102 * Determine network driver for device.
6103 */
David S. Miller3019de12011-06-06 16:41:33 -07006104const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07006105{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07006106 const struct device_driver *driver;
6107 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07006108 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07006109
6110 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006111 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07006112 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006113
6114 driver = parent->driver;
6115 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07006116 return driver->name;
6117 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006118}
6119
Joe Perchesb004ff42012-09-12 20:12:19 -07006120static int __netdev_printk(const char *level, const struct net_device *dev,
Joe Perches256df2f2010-06-27 01:02:35 +00006121 struct va_format *vaf)
6122{
6123 int r;
6124
Joe Perchesb004ff42012-09-12 20:12:19 -07006125 if (dev && dev->dev.parent) {
Joe Perches666f3552012-09-12 20:14:11 -07006126 r = dev_printk_emit(level[1] - '0',
6127 dev->dev.parent,
6128 "%s %s %s: %pV",
6129 dev_driver_string(dev->dev.parent),
6130 dev_name(dev->dev.parent),
6131 netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006132 } else if (dev) {
Joe Perches256df2f2010-06-27 01:02:35 +00006133 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006134 } else {
Joe Perches256df2f2010-06-27 01:02:35 +00006135 r = printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006136 }
Joe Perches256df2f2010-06-27 01:02:35 +00006137
6138 return r;
6139}
6140
6141int netdev_printk(const char *level, const struct net_device *dev,
6142 const char *format, ...)
6143{
6144 struct va_format vaf;
6145 va_list args;
6146 int r;
6147
6148 va_start(args, format);
6149
6150 vaf.fmt = format;
6151 vaf.va = &args;
6152
6153 r = __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006154
Joe Perches256df2f2010-06-27 01:02:35 +00006155 va_end(args);
6156
6157 return r;
6158}
6159EXPORT_SYMBOL(netdev_printk);
6160
6161#define define_netdev_printk_level(func, level) \
6162int func(const struct net_device *dev, const char *fmt, ...) \
6163{ \
6164 int r; \
6165 struct va_format vaf; \
6166 va_list args; \
6167 \
6168 va_start(args, fmt); \
6169 \
6170 vaf.fmt = fmt; \
6171 vaf.va = &args; \
6172 \
6173 r = __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07006174 \
Joe Perches256df2f2010-06-27 01:02:35 +00006175 va_end(args); \
6176 \
6177 return r; \
6178} \
6179EXPORT_SYMBOL(func);
6180
6181define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6182define_netdev_printk_level(netdev_alert, KERN_ALERT);
6183define_netdev_printk_level(netdev_crit, KERN_CRIT);
6184define_netdev_printk_level(netdev_err, KERN_ERR);
6185define_netdev_printk_level(netdev_warn, KERN_WARNING);
6186define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6187define_netdev_printk_level(netdev_info, KERN_INFO);
6188
Pavel Emelyanov46650792007-10-08 20:38:39 -07006189static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006190{
6191 kfree(net->dev_name_head);
6192 kfree(net->dev_index_head);
6193}
6194
Denis V. Lunev022cbae2007-11-13 03:23:50 -08006195static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07006196 .init = netdev_init,
6197 .exit = netdev_exit,
6198};
6199
Pavel Emelyanov46650792007-10-08 20:38:39 -07006200static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02006201{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006202 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02006203 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006204 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02006205 * initial network namespace
6206 */
6207 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006208 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006209 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006210 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02006211
6212 /* Ignore unmoveable devices (i.e. loopback) */
6213 if (dev->features & NETIF_F_NETNS_LOCAL)
6214 continue;
6215
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006216 /* Leave virtual devices for the generic cleanup */
6217 if (dev->rtnl_link_ops)
6218 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08006219
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006220 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006221 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6222 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006223 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006224 pr_emerg("%s: failed to move %s to init_net: %d\n",
6225 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006226 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02006227 }
6228 }
6229 rtnl_unlock();
6230}
6231
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006232static void __net_exit default_device_exit_batch(struct list_head *net_list)
6233{
6234 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04006235 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006236 * Do this across as many network namespaces as possible to
6237 * improve batching efficiency.
6238 */
6239 struct net_device *dev;
6240 struct net *net;
6241 LIST_HEAD(dev_kill_list);
6242
6243 rtnl_lock();
6244 list_for_each_entry(net, net_list, exit_list) {
6245 for_each_netdev_reverse(net, dev) {
6246 if (dev->rtnl_link_ops)
6247 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6248 else
6249 unregister_netdevice_queue(dev, &dev_kill_list);
6250 }
6251 }
6252 unregister_netdevice_many(&dev_kill_list);
Eric Dumazetceaaec92011-02-17 22:59:19 +00006253 list_del(&dev_kill_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006254 rtnl_unlock();
6255}
6256
Denis V. Lunev022cbae2007-11-13 03:23:50 -08006257static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006258 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006259 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02006260};
6261
Linus Torvalds1da177e2005-04-16 15:20:36 -07006262/*
6263 * Initialize the DEV module. At boot time this walks the device list and
6264 * unhooks any devices that fail to initialise (normally hardware not
6265 * present) and leaves us with a valid list of present and active devices.
6266 *
6267 */
6268
6269/*
6270 * This is called single threaded during boot, so no need
6271 * to take the rtnl semaphore.
6272 */
6273static int __init net_dev_init(void)
6274{
6275 int i, rc = -ENOMEM;
6276
6277 BUG_ON(!dev_boot_phase);
6278
Linus Torvalds1da177e2005-04-16 15:20:36 -07006279 if (dev_proc_init())
6280 goto out;
6281
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006282 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07006283 goto out;
6284
6285 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08006286 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006287 INIT_LIST_HEAD(&ptype_base[i]);
6288
Vlad Yasevich62532da2012-11-15 08:49:10 +00006289 INIT_LIST_HEAD(&offload_base);
6290
Eric W. Biederman881d9662007-09-17 11:56:21 -07006291 if (register_pernet_subsys(&netdev_net_ops))
6292 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006293
6294 /*
6295 * Initialise the packet receive queues.
6296 */
6297
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07006298 for_each_possible_cpu(i) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006299 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006300
Changli Gaodee42872010-05-02 05:42:16 +00006301 memset(sd, 0, sizeof(*sd));
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006302 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07006303 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006304 sd->completion_queue = NULL;
6305 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00006306 sd->output_queue = NULL;
6307 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00006308#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006309 sd->csd.func = rps_trigger_softirq;
6310 sd->csd.info = sd;
6311 sd->csd.flags = 0;
6312 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07006313#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00006314
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006315 sd->backlog.poll = process_backlog;
6316 sd->backlog.weight = weight_p;
6317 sd->backlog.gro_list = NULL;
6318 sd->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006319 }
6320
Linus Torvalds1da177e2005-04-16 15:20:36 -07006321 dev_boot_phase = 0;
6322
Eric W. Biederman505d4f72008-11-07 22:54:20 -08006323 /* The loopback device is special if any other network devices
6324 * is present in a network namespace the loopback device must
6325 * be present. Since we now dynamically allocate and free the
6326 * loopback device ensure this invariant is maintained by
6327 * keeping the loopback device as the first device on the
6328 * list of network devices. Ensuring the loopback devices
6329 * is the first device that appears and the last network device
6330 * that disappears.
6331 */
6332 if (register_pernet_device(&loopback_net_ops))
6333 goto out;
6334
6335 if (register_pernet_device(&default_device_ops))
6336 goto out;
6337
Carlos R. Mafra962cf362008-05-15 11:15:37 -03006338 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6339 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006340
6341 hotcpu_notifier(dev_cpu_callback, 0);
6342 dst_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006343 rc = 0;
6344out:
6345 return rc;
6346}
6347
6348subsys_initcall(net_dev_init);