blob: 7c30dcecee1df7244f665483e30683e31eaf4d46 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <net/dst.h>
102#include <net/pkt_sched.h>
103#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000104#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <linux/highmem.h>
106#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#include <linux/netpoll.h>
109#include <linux/rcupdate.h>
110#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500113#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700114#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700115#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700116#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700117#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700118#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700119#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700120#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700121#include <linux/ipv6.h>
122#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700123#include <linux/jhash.h>
124#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700125#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900126#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900127#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000128#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700129#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000130#include <linux/cpu_rmap.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100131#include <linux/static_key.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700133#include "net-sysfs.h"
134
Herbert Xud565b0a2008-12-15 23:38:52 -0800135/* Instead of increasing this, you should create a hash table. */
136#define MAX_GRO_SKBS 8
137
Herbert Xu5d38a072009-01-04 16:13:40 -0800138/* This should be increased if a protocol with a bigger head is added. */
139#define GRO_MAX_HEAD (MAX_HEADER + 128)
140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000142static DEFINE_SPINLOCK(offload_lock);
Cong Wang900ff8c2013-02-18 19:20:33 +0000143struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
144struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000145static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700148 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 * semaphore.
150 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800151 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 *
153 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700154 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 * actual updates. This allows pure readers to access the list even
156 * while a writer is preparing to update it.
157 *
158 * To put it another way, dev_base_lock is held for writing only to
159 * protect against pure readers; the rtnl semaphore provides the
160 * protection against other writers.
161 *
162 * See, for example usages, register_netdevice() and
163 * unregister_netdevice(), which must be called with the rtnl
164 * semaphore held.
165 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167EXPORT_SYMBOL(dev_base_lock);
168
Eric Dumazet30e6c9f2012-12-20 17:25:08 +0000169seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000170
Thomas Graf4e985ad2011-06-21 03:11:20 +0000171static inline void dev_base_seq_inc(struct net *net)
172{
173 while (++net->dev_base_seq == 0);
174}
175
Eric W. Biederman881d9662007-09-17 11:56:21 -0700176static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177{
Eric Dumazet95c96172012-04-15 05:58:06 +0000178 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
179
stephen hemminger08e98972009-11-10 07:20:34 +0000180 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181}
182
Eric W. Biederman881d9662007-09-17 11:56:21 -0700183static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700185 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186}
187
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000188static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000189{
190#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000191 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000192#endif
193}
194
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000195static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000196{
197#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000198 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000199#endif
200}
201
Eric W. Biedermance286d32007-09-12 13:53:49 +0200202/* Device list insertion */
dingtianhong53759be2013-04-17 22:17:50 +0000203static void list_netdevice(struct net_device *dev)
Eric W. Biedermance286d32007-09-12 13:53:49 +0200204{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900205 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200206
207 ASSERT_RTNL();
208
209 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800210 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000211 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000212 hlist_add_head_rcu(&dev->index_hlist,
213 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200214 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000215
216 dev_base_seq_inc(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200217}
218
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000219/* Device list removal
220 * caller must respect a RCU grace period before freeing/reusing dev
221 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200222static void unlist_netdevice(struct net_device *dev)
223{
224 ASSERT_RTNL();
225
226 /* Unlink dev from the device chain */
227 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800228 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000229 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000230 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200231 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000232
233 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200234}
235
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236/*
237 * Our notifier list
238 */
239
Alan Sternf07d5b92006-05-09 15:23:03 -0700240static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
242/*
243 * Device drivers call our routines to queue packets here. We empty the
244 * queue in the local softnet handler.
245 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700246
Eric Dumazet9958da02010-04-17 04:17:02 +0000247DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700248EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
David S. Millercf508b12008-07-22 14:16:42 -0700250#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700251/*
David S. Millerc773e842008-07-08 23:13:53 -0700252 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700253 * according to dev->type
254 */
255static const unsigned short netdev_lock_type[] =
256 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
257 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
258 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
259 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
260 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
261 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
262 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
263 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
264 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
265 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
266 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
267 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400268 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
269 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
270 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700271
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700272static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700273 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
274 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
275 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
276 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
277 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
278 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
279 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
280 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
281 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
282 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
283 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
284 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
Paul Gortmaker211ed862012-05-10 17:14:35 -0400285 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
286 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
287 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700288
289static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700290static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700291
292static inline unsigned short netdev_lock_pos(unsigned short dev_type)
293{
294 int i;
295
296 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
297 if (netdev_lock_type[i] == dev_type)
298 return i;
299 /* the last key is used by default */
300 return ARRAY_SIZE(netdev_lock_type) - 1;
301}
302
David S. Millercf508b12008-07-22 14:16:42 -0700303static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
304 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700305{
306 int i;
307
308 i = netdev_lock_pos(dev_type);
309 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
310 netdev_lock_name[i]);
311}
David S. Millercf508b12008-07-22 14:16:42 -0700312
313static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
314{
315 int i;
316
317 i = netdev_lock_pos(dev->type);
318 lockdep_set_class_and_name(&dev->addr_list_lock,
319 &netdev_addr_lock_key[i],
320 netdev_lock_name[i]);
321}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700322#else
David S. Millercf508b12008-07-22 14:16:42 -0700323static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
324 unsigned short dev_type)
325{
326}
327static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700328{
329}
330#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
332/*******************************************************************************
333
334 Protocol management and registration routines
335
336*******************************************************************************/
337
338/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 * Add a protocol ID to the list. Now that the input handler is
340 * smarter we can dispense with all the messy stuff that used to be
341 * here.
342 *
343 * BEWARE!!! Protocol handlers, mangling input packets,
344 * MUST BE last in hash buckets and checking protocol handlers
345 * MUST start from promiscuous ptype_all chain in net_bh.
346 * It is true now, do not change it.
347 * Explanation follows: if protocol handler, mangling packet, will
348 * be the first on list, it is not able to sense, that packet
349 * is cloned and should be copied-on-write, so that it will
350 * change it and subsequent readers will get broken packet.
351 * --ANK (980803)
352 */
353
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000354static inline struct list_head *ptype_head(const struct packet_type *pt)
355{
356 if (pt->type == htons(ETH_P_ALL))
357 return &ptype_all;
358 else
359 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
360}
361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362/**
363 * dev_add_pack - add packet handler
364 * @pt: packet type declaration
365 *
366 * Add a protocol handler to the networking stack. The passed &packet_type
367 * is linked into kernel lists and may not be freed until it has been
368 * removed from the kernel lists.
369 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900370 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 * guarantee all CPU's that are in middle of receiving packets
372 * will see the new packet type (until the next received packet).
373 */
374
375void dev_add_pack(struct packet_type *pt)
376{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000377 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000379 spin_lock(&ptype_lock);
380 list_add_rcu(&pt->list, head);
381 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700383EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385/**
386 * __dev_remove_pack - remove packet handler
387 * @pt: packet type declaration
388 *
389 * Remove a protocol handler that was previously added to the kernel
390 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
391 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900392 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 *
394 * The packet type might still be in use by receivers
395 * and must not be freed until after all the CPU's have gone
396 * through a quiescent state.
397 */
398void __dev_remove_pack(struct packet_type *pt)
399{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000400 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 struct packet_type *pt1;
402
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000403 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
405 list_for_each_entry(pt1, head, list) {
406 if (pt == pt1) {
407 list_del_rcu(&pt->list);
408 goto out;
409 }
410 }
411
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000412 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000414 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700416EXPORT_SYMBOL(__dev_remove_pack);
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418/**
419 * dev_remove_pack - remove packet handler
420 * @pt: packet type declaration
421 *
422 * Remove a protocol handler that was previously added to the kernel
423 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
424 * from the kernel lists and can be freed or reused once this function
425 * returns.
426 *
427 * This call sleeps to guarantee that no CPU is looking at the packet
428 * type after return.
429 */
430void dev_remove_pack(struct packet_type *pt)
431{
432 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900433
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 synchronize_net();
435}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700436EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
Vlad Yasevich62532da2012-11-15 08:49:10 +0000438
439/**
440 * dev_add_offload - register offload handlers
441 * @po: protocol offload declaration
442 *
443 * Add protocol offload handlers to the networking stack. The passed
444 * &proto_offload is linked into kernel lists and may not be freed until
445 * it has been removed from the kernel lists.
446 *
447 * This call does not sleep therefore it can not
448 * guarantee all CPU's that are in middle of receiving packets
449 * will see the new offload handlers (until the next received packet).
450 */
451void dev_add_offload(struct packet_offload *po)
452{
453 struct list_head *head = &offload_base;
454
455 spin_lock(&offload_lock);
456 list_add_rcu(&po->list, head);
457 spin_unlock(&offload_lock);
458}
459EXPORT_SYMBOL(dev_add_offload);
460
461/**
462 * __dev_remove_offload - remove offload handler
463 * @po: packet offload declaration
464 *
465 * Remove a protocol offload handler that was previously added to the
466 * kernel offload handlers by dev_add_offload(). The passed &offload_type
467 * is removed from the kernel lists and can be freed or reused once this
468 * function returns.
469 *
470 * The packet type might still be in use by receivers
471 * and must not be freed until after all the CPU's have gone
472 * through a quiescent state.
473 */
474void __dev_remove_offload(struct packet_offload *po)
475{
476 struct list_head *head = &offload_base;
477 struct packet_offload *po1;
478
Eric Dumazetc53aa502012-11-16 08:08:23 +0000479 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000480
481 list_for_each_entry(po1, head, list) {
482 if (po == po1) {
483 list_del_rcu(&po->list);
484 goto out;
485 }
486 }
487
488 pr_warn("dev_remove_offload: %p not found\n", po);
489out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000490 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000491}
492EXPORT_SYMBOL(__dev_remove_offload);
493
494/**
495 * dev_remove_offload - remove packet offload handler
496 * @po: packet offload declaration
497 *
498 * Remove a packet offload handler that was previously added to the kernel
499 * offload handlers by dev_add_offload(). The passed &offload_type is
500 * removed from the kernel lists and can be freed or reused once this
501 * function returns.
502 *
503 * This call sleeps to guarantee that no CPU is looking at the packet
504 * type after return.
505 */
506void dev_remove_offload(struct packet_offload *po)
507{
508 __dev_remove_offload(po);
509
510 synchronize_net();
511}
512EXPORT_SYMBOL(dev_remove_offload);
513
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514/******************************************************************************
515
516 Device Boot-time Settings Routines
517
518*******************************************************************************/
519
520/* Boot time configuration table */
521static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
522
523/**
524 * netdev_boot_setup_add - add new setup entry
525 * @name: name of the device
526 * @map: configured settings for the device
527 *
528 * Adds new setup entry to the dev_boot_setup list. The function
529 * returns 0 on error and 1 on success. This is a generic routine to
530 * all netdevices.
531 */
532static int netdev_boot_setup_add(char *name, struct ifmap *map)
533{
534 struct netdev_boot_setup *s;
535 int i;
536
537 s = dev_boot_setup;
538 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
539 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
540 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700541 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 memcpy(&s[i].map, map, sizeof(s[i].map));
543 break;
544 }
545 }
546
547 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
548}
549
550/**
551 * netdev_boot_setup_check - check boot time settings
552 * @dev: the netdevice
553 *
554 * Check boot time settings for the device.
555 * The found settings are set for the device to be used
556 * later in the device probing.
557 * Returns 0 if no settings found, 1 if they are.
558 */
559int netdev_boot_setup_check(struct net_device *dev)
560{
561 struct netdev_boot_setup *s = dev_boot_setup;
562 int i;
563
564 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
565 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700566 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 dev->irq = s[i].map.irq;
568 dev->base_addr = s[i].map.base_addr;
569 dev->mem_start = s[i].map.mem_start;
570 dev->mem_end = s[i].map.mem_end;
571 return 1;
572 }
573 }
574 return 0;
575}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700576EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
578
579/**
580 * netdev_boot_base - get address from boot time settings
581 * @prefix: prefix for network device
582 * @unit: id for network device
583 *
584 * Check boot time settings for the base address of device.
585 * The found settings are set for the device to be used
586 * later in the device probing.
587 * Returns 0 if no settings found.
588 */
589unsigned long netdev_boot_base(const char *prefix, int unit)
590{
591 const struct netdev_boot_setup *s = dev_boot_setup;
592 char name[IFNAMSIZ];
593 int i;
594
595 sprintf(name, "%s%d", prefix, unit);
596
597 /*
598 * If device already registered then return base of 1
599 * to indicate not to probe for this interface
600 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700601 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 return 1;
603
604 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
605 if (!strcmp(name, s[i].name))
606 return s[i].map.base_addr;
607 return 0;
608}
609
610/*
611 * Saves at boot time configured settings for any netdevice.
612 */
613int __init netdev_boot_setup(char *str)
614{
615 int ints[5];
616 struct ifmap map;
617
618 str = get_options(str, ARRAY_SIZE(ints), ints);
619 if (!str || !*str)
620 return 0;
621
622 /* Save settings */
623 memset(&map, 0, sizeof(map));
624 if (ints[0] > 0)
625 map.irq = ints[1];
626 if (ints[0] > 1)
627 map.base_addr = ints[2];
628 if (ints[0] > 2)
629 map.mem_start = ints[3];
630 if (ints[0] > 3)
631 map.mem_end = ints[4];
632
633 /* Add new entry to the list */
634 return netdev_boot_setup_add(str, &map);
635}
636
637__setup("netdev=", netdev_boot_setup);
638
639/*******************************************************************************
640
641 Device Interface Subroutines
642
643*******************************************************************************/
644
645/**
646 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700647 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 * @name: name to find
649 *
650 * Find an interface by name. Must be called under RTNL semaphore
651 * or @dev_base_lock. If the name is found a pointer to the device
652 * is returned. If the name is not found then %NULL is returned. The
653 * reference counters are not incremented so the caller must be
654 * careful with locks.
655 */
656
Eric W. Biederman881d9662007-09-17 11:56:21 -0700657struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700659 struct net_device *dev;
660 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661
Sasha Levinb67bfe02013-02-27 17:06:00 -0800662 hlist_for_each_entry(dev, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 if (!strncmp(dev->name, name, IFNAMSIZ))
664 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700665
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 return NULL;
667}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700668EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
670/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000671 * dev_get_by_name_rcu - find a device by its name
672 * @net: the applicable net namespace
673 * @name: name to find
674 *
675 * Find an interface by name.
676 * If the name is found a pointer to the device is returned.
677 * If the name is not found then %NULL is returned.
678 * The reference counters are not incremented so the caller must be
679 * careful with locks. The caller must hold RCU lock.
680 */
681
682struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
683{
Eric Dumazet72c95282009-10-30 07:11:27 +0000684 struct net_device *dev;
685 struct hlist_head *head = dev_name_hash(net, name);
686
Sasha Levinb67bfe02013-02-27 17:06:00 -0800687 hlist_for_each_entry_rcu(dev, head, name_hlist)
Eric Dumazet72c95282009-10-30 07:11:27 +0000688 if (!strncmp(dev->name, name, IFNAMSIZ))
689 return dev;
690
691 return NULL;
692}
693EXPORT_SYMBOL(dev_get_by_name_rcu);
694
695/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700697 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 * @name: name to find
699 *
700 * Find an interface by name. This can be called from any
701 * context and does its own locking. The returned handle has
702 * the usage count incremented and the caller must use dev_put() to
703 * release it when it is no longer needed. %NULL is returned if no
704 * matching device is found.
705 */
706
Eric W. Biederman881d9662007-09-17 11:56:21 -0700707struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708{
709 struct net_device *dev;
710
Eric Dumazet72c95282009-10-30 07:11:27 +0000711 rcu_read_lock();
712 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 if (dev)
714 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000715 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 return dev;
717}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700718EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719
720/**
721 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700722 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 * @ifindex: index of device
724 *
725 * Search for an interface by index. Returns %NULL if the device
726 * is not found or a pointer to the device. The device has not
727 * had its reference counter increased so the caller must be careful
728 * about locking. The caller must hold either the RTNL semaphore
729 * or @dev_base_lock.
730 */
731
Eric W. Biederman881d9662007-09-17 11:56:21 -0700732struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700734 struct net_device *dev;
735 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
Sasha Levinb67bfe02013-02-27 17:06:00 -0800737 hlist_for_each_entry(dev, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 if (dev->ifindex == ifindex)
739 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700740
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 return NULL;
742}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700743EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000745/**
746 * dev_get_by_index_rcu - find a device by its ifindex
747 * @net: the applicable net namespace
748 * @ifindex: index of device
749 *
750 * Search for an interface by index. Returns %NULL if the device
751 * is not found or a pointer to the device. The device has not
752 * had its reference counter increased so the caller must be careful
753 * about locking. The caller must hold RCU lock.
754 */
755
756struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
757{
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000758 struct net_device *dev;
759 struct hlist_head *head = dev_index_hash(net, ifindex);
760
Sasha Levinb67bfe02013-02-27 17:06:00 -0800761 hlist_for_each_entry_rcu(dev, head, index_hlist)
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000762 if (dev->ifindex == ifindex)
763 return dev;
764
765 return NULL;
766}
767EXPORT_SYMBOL(dev_get_by_index_rcu);
768
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
770/**
771 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700772 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 * @ifindex: index of device
774 *
775 * Search for an interface by index. Returns NULL if the device
776 * is not found or a pointer to the device. The device returned has
777 * had a reference added and the pointer is safe until the user calls
778 * dev_put to indicate they have finished with it.
779 */
780
Eric W. Biederman881d9662007-09-17 11:56:21 -0700781struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782{
783 struct net_device *dev;
784
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000785 rcu_read_lock();
786 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 if (dev)
788 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000789 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 return dev;
791}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700792EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
794/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000795 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700796 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 * @type: media type of device
798 * @ha: hardware address
799 *
800 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800801 * is not found or a pointer to the device.
802 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000803 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 * and the caller must therefore be careful about locking
805 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 */
807
Eric Dumazet941666c2010-12-05 01:23:53 +0000808struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
809 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810{
811 struct net_device *dev;
812
Eric Dumazet941666c2010-12-05 01:23:53 +0000813 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 if (dev->type == type &&
815 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700816 return dev;
817
818 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819}
Eric Dumazet941666c2010-12-05 01:23:53 +0000820EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300821
Eric W. Biederman881d9662007-09-17 11:56:21 -0700822struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700823{
824 struct net_device *dev;
825
826 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700827 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700828 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700829 return dev;
830
831 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700832}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700833EXPORT_SYMBOL(__dev_getfirstbyhwtype);
834
Eric W. Biederman881d9662007-09-17 11:56:21 -0700835struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000837 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000839 rcu_read_lock();
840 for_each_netdev_rcu(net, dev)
841 if (dev->type == type) {
842 dev_hold(dev);
843 ret = dev;
844 break;
845 }
846 rcu_read_unlock();
847 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849EXPORT_SYMBOL(dev_getfirstbyhwtype);
850
851/**
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000852 * dev_get_by_flags_rcu - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700853 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 * @if_flags: IFF_* values
855 * @mask: bitmask of bits in if_flags to check
856 *
857 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000858 * is not found or a pointer to the device. Must be called inside
859 * rcu_read_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 */
861
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000862struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700863 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700865 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
Pavel Emelianov7562f872007-05-03 15:13:45 -0700867 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800868 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -0700870 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 break;
872 }
873 }
Pavel Emelianov7562f872007-05-03 15:13:45 -0700874 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875}
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000876EXPORT_SYMBOL(dev_get_by_flags_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
878/**
879 * dev_valid_name - check if name is okay for network device
880 * @name: name string
881 *
882 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700883 * to allow sysfs to work. We also disallow any kind of
884 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 */
David S. Miller95f050b2012-03-06 16:12:15 -0500886bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700888 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -0500889 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700890 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -0500891 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700892 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -0500893 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700894
895 while (*name) {
896 if (*name == '/' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -0500897 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700898 name++;
899 }
David S. Miller95f050b2012-03-06 16:12:15 -0500900 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700902EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903
904/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200905 * __dev_alloc_name - allocate a name for a device
906 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200908 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 *
910 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700911 * id. It scans list of devices to build up a free map, then chooses
912 * the first empty slot. The caller must hold the dev_base or rtnl lock
913 * while allocating the name and adding the device in order to avoid
914 * duplicates.
915 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
916 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 */
918
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200919static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920{
921 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 const char *p;
923 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700924 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 struct net_device *d;
926
927 p = strnchr(name, IFNAMSIZ-1, '%');
928 if (p) {
929 /*
930 * Verify the string as this thing may have come from
931 * the user. There must be either one "%d" and no other "%"
932 * characters.
933 */
934 if (p[1] != 'd' || strchr(p + 2, '%'))
935 return -EINVAL;
936
937 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700938 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 if (!inuse)
940 return -ENOMEM;
941
Eric W. Biederman881d9662007-09-17 11:56:21 -0700942 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 if (!sscanf(d->name, name, &i))
944 continue;
945 if (i < 0 || i >= max_netdevices)
946 continue;
947
948 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200949 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 if (!strncmp(buf, d->name, IFNAMSIZ))
951 set_bit(i, inuse);
952 }
953
954 i = find_first_zero_bit(inuse, max_netdevices);
955 free_page((unsigned long) inuse);
956 }
957
Octavian Purdilad9031022009-11-18 02:36:59 +0000958 if (buf != name)
959 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200960 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962
963 /* It is possible to run out of possible slots
964 * when the name is long and there isn't enough space left
965 * for the digits, or if all bits are used.
966 */
967 return -ENFILE;
968}
969
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200970/**
971 * dev_alloc_name - allocate a name for a device
972 * @dev: device
973 * @name: name format string
974 *
975 * Passed a format string - eg "lt%d" it will try and find a suitable
976 * id. It scans list of devices to build up a free map, then chooses
977 * the first empty slot. The caller must hold the dev_base or rtnl lock
978 * while allocating the name and adding the device in order to avoid
979 * duplicates.
980 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
981 * Returns the number of the unit assigned or a negative errno code.
982 */
983
984int dev_alloc_name(struct net_device *dev, const char *name)
985{
986 char buf[IFNAMSIZ];
987 struct net *net;
988 int ret;
989
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900990 BUG_ON(!dev_net(dev));
991 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200992 ret = __dev_alloc_name(net, name, buf);
993 if (ret >= 0)
994 strlcpy(dev->name, buf, IFNAMSIZ);
995 return ret;
996}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700997EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200998
Gao feng828de4f2012-09-13 20:58:27 +0000999static int dev_alloc_name_ns(struct net *net,
1000 struct net_device *dev,
1001 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001002{
Gao feng828de4f2012-09-13 20:58:27 +00001003 char buf[IFNAMSIZ];
1004 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001005
Gao feng828de4f2012-09-13 20:58:27 +00001006 ret = __dev_alloc_name(net, name, buf);
1007 if (ret >= 0)
1008 strlcpy(dev->name, buf, IFNAMSIZ);
1009 return ret;
1010}
1011
1012static int dev_get_valid_name(struct net *net,
1013 struct net_device *dev,
1014 const char *name)
1015{
1016 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001017
Octavian Purdilad9031022009-11-18 02:36:59 +00001018 if (!dev_valid_name(name))
1019 return -EINVAL;
1020
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001021 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001022 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001023 else if (__dev_get_by_name(net, name))
1024 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001025 else if (dev->name != name)
1026 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001027
1028 return 0;
1029}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030
1031/**
1032 * dev_change_name - change name of a device
1033 * @dev: device
1034 * @newname: name (or format string) must be at least IFNAMSIZ
1035 *
1036 * Change name of a device, can pass format strings "eth%d".
1037 * for wildcarding.
1038 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001039int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040{
Herbert Xufcc5a032007-07-30 17:03:38 -07001041 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001043 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001044 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
1046 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001047 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001049 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 if (dev->flags & IFF_UP)
1051 return -EBUSY;
1052
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001053 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001054
1055 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001056 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001057 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001058 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001059
Herbert Xufcc5a032007-07-30 17:03:38 -07001060 memcpy(oldname, dev->name, IFNAMSIZ);
1061
Gao feng828de4f2012-09-13 20:58:27 +00001062 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001063 if (err < 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001064 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001065 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001066 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
Herbert Xufcc5a032007-07-30 17:03:38 -07001068rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001069 ret = device_rename(&dev->dev, dev->name);
1070 if (ret) {
1071 memcpy(dev->name, oldname, IFNAMSIZ);
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001072 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001073 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001074 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001075
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001076 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001077
Herbert Xu7f988ea2007-07-30 16:35:46 -07001078 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001079 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001080 write_unlock_bh(&dev_base_lock);
1081
1082 synchronize_rcu();
1083
1084 write_lock_bh(&dev_base_lock);
1085 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001086 write_unlock_bh(&dev_base_lock);
1087
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001088 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001089 ret = notifier_to_errno(ret);
1090
1091 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001092 /* err >= 0 after dev_alloc_name() or stores the first errno */
1093 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001094 err = ret;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001095 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001096 memcpy(dev->name, oldname, IFNAMSIZ);
1097 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001098 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001099 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001100 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001101 }
1102 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
1104 return err;
1105}
1106
1107/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001108 * dev_set_alias - change ifalias of a device
1109 * @dev: device
1110 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001111 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001112 *
1113 * Set ifalias for a device,
1114 */
1115int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1116{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001117 char *new_ifalias;
1118
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001119 ASSERT_RTNL();
1120
1121 if (len >= IFALIASZ)
1122 return -EINVAL;
1123
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001124 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001125 kfree(dev->ifalias);
1126 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001127 return 0;
1128 }
1129
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001130 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1131 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001132 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001133 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001134
1135 strlcpy(dev->ifalias, alias, len+1);
1136 return len;
1137}
1138
1139
1140/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001141 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001142 * @dev: device to cause notification
1143 *
1144 * Called to indicate a device has changed features.
1145 */
1146void netdev_features_change(struct net_device *dev)
1147{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001148 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001149}
1150EXPORT_SYMBOL(netdev_features_change);
1151
1152/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 * netdev_state_change - device changes state
1154 * @dev: device to cause notification
1155 *
1156 * Called to indicate a device has changed state. This function calls
1157 * the notifier chains for netdev_chain and sends a NEWLINK message
1158 * to the routing socket.
1159 */
1160void netdev_state_change(struct net_device *dev)
1161{
1162 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001163 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1165 }
1166}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001167EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168
Amerigo Wangee89bab2012-08-09 22:14:56 +00001169/**
1170 * netdev_notify_peers - notify network peers about existence of @dev
1171 * @dev: network device
1172 *
1173 * Generate traffic such that interested network peers are aware of
1174 * @dev, such as by generating a gratuitous ARP. This may be used when
1175 * a device wants to inform the rest of the network about some sort of
1176 * reconfiguration such as a failover event or virtual machine
1177 * migration.
1178 */
1179void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001180{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001181 rtnl_lock();
1182 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1183 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001184}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001185EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001186
Patrick McHardybd380812010-02-26 06:34:53 +00001187static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001189 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001190 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001192 ASSERT_RTNL();
1193
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 if (!netif_device_present(dev))
1195 return -ENODEV;
1196
Neil Hormanca99ca12013-02-05 08:05:43 +00001197 /* Block netpoll from trying to do any rx path servicing.
1198 * If we don't do this there is a chance ndo_poll_controller
1199 * or ndo_poll may be running while we open the device
1200 */
1201 ret = netpoll_rx_disable(dev);
1202 if (ret)
1203 return ret;
1204
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001205 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1206 ret = notifier_to_errno(ret);
1207 if (ret)
1208 return ret;
1209
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001211
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001212 if (ops->ndo_validate_addr)
1213 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001214
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001215 if (!ret && ops->ndo_open)
1216 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
Neil Hormanca99ca12013-02-05 08:05:43 +00001218 netpoll_rx_enable(dev);
1219
Jeff Garzikbada3392007-10-23 20:19:37 -07001220 if (ret)
1221 clear_bit(__LINK_STATE_START, &dev->state);
1222 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 dev->flags |= IFF_UP;
David S. Millerb4bd07c2009-02-06 22:06:43 -08001224 net_dmaengine_get();
Patrick McHardy4417da62007-06-27 01:28:10 -07001225 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001227 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001229
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 return ret;
1231}
Patrick McHardybd380812010-02-26 06:34:53 +00001232
1233/**
1234 * dev_open - prepare an interface for use.
1235 * @dev: device to open
1236 *
1237 * Takes a device from down to up state. The device's private open
1238 * function is invoked and then the multicast lists are loaded. Finally
1239 * the device is moved into the up state and a %NETDEV_UP message is
1240 * sent to the netdev notifier chain.
1241 *
1242 * Calling this function on an active interface is a nop. On a failure
1243 * a negative errno code is returned.
1244 */
1245int dev_open(struct net_device *dev)
1246{
1247 int ret;
1248
Patrick McHardybd380812010-02-26 06:34:53 +00001249 if (dev->flags & IFF_UP)
1250 return 0;
1251
Patrick McHardybd380812010-02-26 06:34:53 +00001252 ret = __dev_open(dev);
1253 if (ret < 0)
1254 return ret;
1255
Patrick McHardybd380812010-02-26 06:34:53 +00001256 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1257 call_netdevice_notifiers(NETDEV_UP, dev);
1258
1259 return ret;
1260}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001261EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262
Octavian Purdila44345722010-12-13 12:44:07 +00001263static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264{
Octavian Purdila44345722010-12-13 12:44:07 +00001265 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001266
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001267 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001268 might_sleep();
1269
Octavian Purdila44345722010-12-13 12:44:07 +00001270 list_for_each_entry(dev, head, unreg_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001271 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
Octavian Purdila44345722010-12-13 12:44:07 +00001273 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
Octavian Purdila44345722010-12-13 12:44:07 +00001275 /* Synchronize to scheduled poll. We cannot touch poll list, it
1276 * can be even on different cpu. So just clear netif_running().
1277 *
1278 * dev->stop() will invoke napi_disable() on all of it's
1279 * napi_struct instances on this device.
1280 */
1281 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1282 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283
Octavian Purdila44345722010-12-13 12:44:07 +00001284 dev_deactivate_many(head);
1285
1286 list_for_each_entry(dev, head, unreg_list) {
1287 const struct net_device_ops *ops = dev->netdev_ops;
1288
1289 /*
1290 * Call the device specific close. This cannot fail.
1291 * Only if device is UP
1292 *
1293 * We allow it to be called even after a DETACH hot-plug
1294 * event.
1295 */
1296 if (ops->ndo_stop)
1297 ops->ndo_stop(dev);
1298
Octavian Purdila44345722010-12-13 12:44:07 +00001299 dev->flags &= ~IFF_UP;
Octavian Purdila44345722010-12-13 12:44:07 +00001300 net_dmaengine_put();
1301 }
1302
1303 return 0;
1304}
1305
1306static int __dev_close(struct net_device *dev)
1307{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001308 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001309 LIST_HEAD(single);
1310
Neil Hormanca99ca12013-02-05 08:05:43 +00001311 /* Temporarily disable netpoll until the interface is down */
1312 retval = netpoll_rx_disable(dev);
1313 if (retval)
1314 return retval;
1315
Octavian Purdila44345722010-12-13 12:44:07 +00001316 list_add(&dev->unreg_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001317 retval = __dev_close_many(&single);
1318 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001319
1320 netpoll_rx_enable(dev);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001321 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001322}
1323
Eric Dumazet3fbd8752011-01-19 21:23:22 +00001324static int dev_close_many(struct list_head *head)
Octavian Purdila44345722010-12-13 12:44:07 +00001325{
1326 struct net_device *dev, *tmp;
1327 LIST_HEAD(tmp_list);
1328
1329 list_for_each_entry_safe(dev, tmp, head, unreg_list)
1330 if (!(dev->flags & IFF_UP))
1331 list_move(&dev->unreg_list, &tmp_list);
1332
1333 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001334
Octavian Purdila44345722010-12-13 12:44:07 +00001335 list_for_each_entry(dev, head, unreg_list) {
1336 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1337 call_netdevice_notifiers(NETDEV_DOWN, dev);
1338 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339
Octavian Purdila44345722010-12-13 12:44:07 +00001340 /* rollback_registered_many needs the complete original list */
1341 list_splice(&tmp_list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 return 0;
1343}
Patrick McHardybd380812010-02-26 06:34:53 +00001344
1345/**
1346 * dev_close - shutdown an interface.
1347 * @dev: device to shutdown
1348 *
1349 * This function moves an active device into down state. A
1350 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1351 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1352 * chain.
1353 */
1354int dev_close(struct net_device *dev)
1355{
Neil Hormanca99ca12013-02-05 08:05:43 +00001356 int ret = 0;
Eric Dumazete14a5992011-05-10 12:26:06 -07001357 if (dev->flags & IFF_UP) {
1358 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001359
Neil Hormanca99ca12013-02-05 08:05:43 +00001360 /* Block netpoll rx while the interface is going down */
1361 ret = netpoll_rx_disable(dev);
1362 if (ret)
1363 return ret;
1364
Eric Dumazete14a5992011-05-10 12:26:06 -07001365 list_add(&dev->unreg_list, &single);
1366 dev_close_many(&single);
1367 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001368
1369 netpoll_rx_enable(dev);
Eric Dumazete14a5992011-05-10 12:26:06 -07001370 }
Neil Hormanca99ca12013-02-05 08:05:43 +00001371 return ret;
Patrick McHardybd380812010-02-26 06:34:53 +00001372}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001373EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374
1375
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001376/**
1377 * dev_disable_lro - disable Large Receive Offload on a device
1378 * @dev: device
1379 *
1380 * Disable Large Receive Offload (LRO) on a net device. Must be
1381 * called under RTNL. This is needed if received packets may be
1382 * forwarded to another interface.
1383 */
1384void dev_disable_lro(struct net_device *dev)
1385{
Neil Hormanf11970e2011-05-24 08:31:09 +00001386 /*
1387 * If we're trying to disable lro on a vlan device
1388 * use the underlying physical device instead
1389 */
1390 if (is_vlan_dev(dev))
1391 dev = vlan_dev_real_dev(dev);
1392
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001393 dev->wanted_features &= ~NETIF_F_LRO;
1394 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001395
Michał Mirosław22d59692011-04-21 12:42:15 +00001396 if (unlikely(dev->features & NETIF_F_LRO))
1397 netdev_WARN(dev, "failed to disable LRO!\n");
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001398}
1399EXPORT_SYMBOL(dev_disable_lro);
1400
1401
Eric W. Biederman881d9662007-09-17 11:56:21 -07001402static int dev_boot_phase = 1;
1403
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404/**
1405 * register_netdevice_notifier - register a network notifier block
1406 * @nb: notifier
1407 *
1408 * Register a notifier to be called when network device events occur.
1409 * The notifier passed is linked into the kernel structures and must
1410 * not be reused until it has been unregistered. A negative errno code
1411 * is returned on a failure.
1412 *
1413 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001414 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 * view of the network device list.
1416 */
1417
1418int register_netdevice_notifier(struct notifier_block *nb)
1419{
1420 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001421 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001422 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 int err;
1424
1425 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001426 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001427 if (err)
1428 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001429 if (dev_boot_phase)
1430 goto unlock;
1431 for_each_net(net) {
1432 for_each_netdev(net, dev) {
1433 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1434 err = notifier_to_errno(err);
1435 if (err)
1436 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437
Eric W. Biederman881d9662007-09-17 11:56:21 -07001438 if (!(dev->flags & IFF_UP))
1439 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001440
Eric W. Biederman881d9662007-09-17 11:56:21 -07001441 nb->notifier_call(nb, NETDEV_UP, dev);
1442 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001444
1445unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 rtnl_unlock();
1447 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001448
1449rollback:
1450 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001451 for_each_net(net) {
1452 for_each_netdev(net, dev) {
1453 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001454 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001455
Eric W. Biederman881d9662007-09-17 11:56:21 -07001456 if (dev->flags & IFF_UP) {
1457 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1458 nb->notifier_call(nb, NETDEV_DOWN, dev);
1459 }
1460 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001461 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001462 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001463
RongQing.Li8f891482011-11-30 23:43:07 -05001464outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001465 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001466 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001468EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469
1470/**
1471 * unregister_netdevice_notifier - unregister a network notifier block
1472 * @nb: notifier
1473 *
1474 * Unregister a notifier previously registered by
1475 * register_netdevice_notifier(). The notifier is unlinked into the
1476 * kernel structures and may then be reused. A negative errno code
1477 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001478 *
1479 * After unregistering unregister and down device events are synthesized
1480 * for all devices on the device list to the removed notifier to remove
1481 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 */
1483
1484int unregister_netdevice_notifier(struct notifier_block *nb)
1485{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001486 struct net_device *dev;
1487 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001488 int err;
1489
1490 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001491 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001492 if (err)
1493 goto unlock;
1494
1495 for_each_net(net) {
1496 for_each_netdev(net, dev) {
1497 if (dev->flags & IFF_UP) {
1498 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1499 nb->notifier_call(nb, NETDEV_DOWN, dev);
1500 }
1501 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001502 }
1503 }
1504unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001505 rtnl_unlock();
1506 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001508EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509
1510/**
1511 * call_netdevice_notifiers - call all network notifier blocks
1512 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001513 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 *
1515 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001516 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 */
1518
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001519int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520{
Jiri Pirkoab930472010-04-20 01:45:37 -07001521 ASSERT_RTNL();
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001522 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001524EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525
Ingo Molnarc5905af2012-02-24 08:31:31 +01001526static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001527#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01001528/* We are not allowed to call static_key_slow_dec() from irq context
Eric Dumazetb90e5792011-11-28 11:16:50 +00001529 * If net_disable_timestamp() is called from irq context, defer the
Ingo Molnarc5905af2012-02-24 08:31:31 +01001530 * static_key_slow_dec() calls.
Eric Dumazetb90e5792011-11-28 11:16:50 +00001531 */
1532static atomic_t netstamp_needed_deferred;
1533#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534
1535void net_enable_timestamp(void)
1536{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001537#ifdef HAVE_JUMP_LABEL
1538 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1539
1540 if (deferred) {
1541 while (--deferred)
Ingo Molnarc5905af2012-02-24 08:31:31 +01001542 static_key_slow_dec(&netstamp_needed);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001543 return;
1544 }
1545#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001546 static_key_slow_inc(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001548EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549
1550void net_disable_timestamp(void)
1551{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001552#ifdef HAVE_JUMP_LABEL
1553 if (in_interrupt()) {
1554 atomic_inc(&netstamp_needed_deferred);
1555 return;
1556 }
1557#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001558 static_key_slow_dec(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001560EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561
Eric Dumazet3b098e22010-05-15 23:57:10 -07001562static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563{
Eric Dumazet588f0332011-11-15 04:12:55 +00001564 skb->tstamp.tv64 = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001565 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001566 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567}
1568
Eric Dumazet588f0332011-11-15 04:12:55 +00001569#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001570 if (static_key_false(&netstamp_needed)) { \
Eric Dumazet588f0332011-11-15 04:12:55 +00001571 if ((COND) && !(SKB)->tstamp.tv64) \
1572 __net_timestamp(SKB); \
1573 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001574
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001575static inline bool is_skb_forwardable(struct net_device *dev,
1576 struct sk_buff *skb)
1577{
1578 unsigned int len;
1579
1580 if (!(dev->flags & IFF_UP))
1581 return false;
1582
1583 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1584 if (skb->len <= len)
1585 return true;
1586
1587 /* if TSO is enabled, we don't care about the length as the packet
1588 * could be forwarded without being segmented before
1589 */
1590 if (skb_is_gso(skb))
1591 return true;
1592
1593 return false;
1594}
1595
Arnd Bergmann44540962009-11-26 06:07:08 +00001596/**
1597 * dev_forward_skb - loopback an skb to another netif
1598 *
1599 * @dev: destination network device
1600 * @skb: buffer to forward
1601 *
1602 * return values:
1603 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001604 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001605 *
1606 * dev_forward_skb can be used for injecting an skb from the
1607 * start_xmit function of one device into the receive queue
1608 * of another device.
1609 *
1610 * The receiving device may be in another namespace, so
1611 * we have to clear all information in the skb that could
1612 * impact namespace isolation.
1613 */
1614int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1615{
Michael S. Tsirkin48c83012011-08-31 08:03:29 +00001616 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1617 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1618 atomic_long_inc(&dev->rx_dropped);
1619 kfree_skb(skb);
1620 return NET_RX_DROP;
1621 }
1622 }
1623
Arnd Bergmann44540962009-11-26 06:07:08 +00001624 skb_orphan(skb);
1625
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001626 if (unlikely(!is_skb_forwardable(dev, skb))) {
Eric Dumazetcaf586e2010-09-30 21:06:55 +00001627 atomic_long_inc(&dev->rx_dropped);
Eric Dumazet6ec82562010-05-06 00:53:53 -07001628 kfree_skb(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001629 return NET_RX_DROP;
Eric Dumazet6ec82562010-05-06 00:53:53 -07001630 }
Benjamin LaHaise3b9785c2012-03-27 15:55:44 +00001631 skb->skb_iif = 0;
David S. Miller59b99972012-05-10 23:03:34 -04001632 skb->dev = dev;
1633 skb_dst_drop(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001634 skb->tstamp.tv64 = 0;
1635 skb->pkt_type = PACKET_HOST;
1636 skb->protocol = eth_type_trans(skb, dev);
David S. Miller59b99972012-05-10 23:03:34 -04001637 skb->mark = 0;
1638 secpath_reset(skb);
1639 nf_reset(skb);
Patrick McHardy124dff02013-04-05 20:42:05 +02001640 nf_reset_trace(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001641 return netif_rx(skb);
1642}
1643EXPORT_SYMBOL_GPL(dev_forward_skb);
1644
Changli Gao71d9dec2010-12-15 19:57:25 +00001645static inline int deliver_skb(struct sk_buff *skb,
1646 struct packet_type *pt_prev,
1647 struct net_device *orig_dev)
1648{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001649 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1650 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001651 atomic_inc(&skb->users);
1652 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1653}
1654
Eric Leblondc0de08d2012-08-16 22:02:58 +00001655static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1656{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001657 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001658 return false;
1659
1660 if (ptype->id_match)
1661 return ptype->id_match(ptype, skb->sk);
1662 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1663 return true;
1664
1665 return false;
1666}
1667
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668/*
1669 * Support routine. Sends outgoing frames to any network
1670 * taps currently in use.
1671 */
1672
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001673static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674{
1675 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001676 struct sk_buff *skb2 = NULL;
1677 struct packet_type *pt_prev = NULL;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001678
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 rcu_read_lock();
1680 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1681 /* Never send packets back to the socket
1682 * they originated from - MvS (miquels@drinkel.ow.org)
1683 */
1684 if ((ptype->dev == dev || !ptype->dev) &&
Eric Leblondc0de08d2012-08-16 22:02:58 +00001685 (!skb_loop_sk(ptype, skb))) {
Changli Gao71d9dec2010-12-15 19:57:25 +00001686 if (pt_prev) {
1687 deliver_skb(skb2, pt_prev, skb->dev);
1688 pt_prev = ptype;
1689 continue;
1690 }
1691
1692 skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 if (!skb2)
1694 break;
1695
Eric Dumazet70978182010-12-20 21:22:51 +00001696 net_timestamp_set(skb2);
1697
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 /* skb->nh should be correctly
1699 set by sender, so that the second statement is
1700 just protection against buggy protocols.
1701 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001702 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001704 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001705 skb2->network_header > skb2->tail) {
Joe Perchese87cc472012-05-13 21:56:26 +00001706 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1707 ntohs(skb2->protocol),
1708 dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001709 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 }
1711
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001712 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 skb2->pkt_type = PACKET_OUTGOING;
Changli Gao71d9dec2010-12-15 19:57:25 +00001714 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 }
1716 }
Changli Gao71d9dec2010-12-15 19:57:25 +00001717 if (pt_prev)
1718 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 rcu_read_unlock();
1720}
1721
Ben Hutchings2c530402012-07-10 10:55:09 +00001722/**
1723 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001724 * @dev: Network device
1725 * @txq: number of queues available
1726 *
1727 * If real_num_tx_queues is changed the tc mappings may no longer be
1728 * valid. To resolve this verify the tc mapping remains valid and if
1729 * not NULL the mapping. With no priorities mapping to this
1730 * offset/count pair it will no longer be used. In the worst case TC0
1731 * is invalid nothing can be done so disable priority mappings. If is
1732 * expected that drivers will fix this mapping if they can before
1733 * calling netif_set_real_num_tx_queues.
1734 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001735static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001736{
1737 int i;
1738 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1739
1740 /* If TC0 is invalidated disable TC mapping */
1741 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001742 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001743 dev->num_tc = 0;
1744 return;
1745 }
1746
1747 /* Invalidated prio to tc mappings set to TC0 */
1748 for (i = 1; i < TC_BITMASK + 1; i++) {
1749 int q = netdev_get_prio_tc_map(dev, i);
1750
1751 tc = &dev->tc_to_txq[q];
1752 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001753 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1754 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001755 netdev_set_prio_tc_map(dev, i, 0);
1756 }
1757 }
1758}
1759
Alexander Duyck537c00d2013-01-10 08:57:02 +00001760#ifdef CONFIG_XPS
1761static DEFINE_MUTEX(xps_map_mutex);
1762#define xmap_dereference(P) \
1763 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1764
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001765static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1766 int cpu, u16 index)
1767{
1768 struct xps_map *map = NULL;
1769 int pos;
1770
1771 if (dev_maps)
1772 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1773
1774 for (pos = 0; map && pos < map->len; pos++) {
1775 if (map->queues[pos] == index) {
1776 if (map->len > 1) {
1777 map->queues[pos] = map->queues[--map->len];
1778 } else {
1779 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1780 kfree_rcu(map, rcu);
1781 map = NULL;
1782 }
1783 break;
1784 }
1785 }
1786
1787 return map;
1788}
1789
Alexander Duyck024e9672013-01-10 08:57:46 +00001790static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00001791{
1792 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00001793 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001794 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001795
1796 mutex_lock(&xps_map_mutex);
1797 dev_maps = xmap_dereference(dev->xps_maps);
1798
1799 if (!dev_maps)
1800 goto out_no_maps;
1801
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001802 for_each_possible_cpu(cpu) {
Alexander Duyck024e9672013-01-10 08:57:46 +00001803 for (i = index; i < dev->num_tx_queues; i++) {
1804 if (!remove_xps_queue(dev_maps, cpu, i))
1805 break;
1806 }
1807 if (i == dev->num_tx_queues)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001808 active = true;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001809 }
1810
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001811 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00001812 RCU_INIT_POINTER(dev->xps_maps, NULL);
1813 kfree_rcu(dev_maps, rcu);
1814 }
1815
Alexander Duyck024e9672013-01-10 08:57:46 +00001816 for (i = index; i < dev->num_tx_queues; i++)
1817 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1818 NUMA_NO_NODE);
1819
Alexander Duyck537c00d2013-01-10 08:57:02 +00001820out_no_maps:
1821 mutex_unlock(&xps_map_mutex);
1822}
1823
Alexander Duyck01c5f862013-01-10 08:57:35 +00001824static struct xps_map *expand_xps_map(struct xps_map *map,
1825 int cpu, u16 index)
1826{
1827 struct xps_map *new_map;
1828 int alloc_len = XPS_MIN_MAP_ALLOC;
1829 int i, pos;
1830
1831 for (pos = 0; map && pos < map->len; pos++) {
1832 if (map->queues[pos] != index)
1833 continue;
1834 return map;
1835 }
1836
1837 /* Need to add queue to this CPU's existing map */
1838 if (map) {
1839 if (pos < map->alloc_len)
1840 return map;
1841
1842 alloc_len = map->alloc_len * 2;
1843 }
1844
1845 /* Need to allocate new map to store queue on this CPU's map */
1846 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1847 cpu_to_node(cpu));
1848 if (!new_map)
1849 return NULL;
1850
1851 for (i = 0; i < pos; i++)
1852 new_map->queues[i] = map->queues[i];
1853 new_map->alloc_len = alloc_len;
1854 new_map->len = pos;
1855
1856 return new_map;
1857}
1858
Alexander Duyck537c00d2013-01-10 08:57:02 +00001859int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
1860{
Alexander Duyck01c5f862013-01-10 08:57:35 +00001861 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001862 struct xps_map *map, *new_map;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001863 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001864 int cpu, numa_node_id = -2;
1865 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001866
1867 mutex_lock(&xps_map_mutex);
1868
1869 dev_maps = xmap_dereference(dev->xps_maps);
1870
Alexander Duyck01c5f862013-01-10 08:57:35 +00001871 /* allocate memory for queue storage */
1872 for_each_online_cpu(cpu) {
1873 if (!cpumask_test_cpu(cpu, mask))
1874 continue;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001875
Alexander Duyck01c5f862013-01-10 08:57:35 +00001876 if (!new_dev_maps)
1877 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00001878 if (!new_dev_maps) {
1879 mutex_unlock(&xps_map_mutex);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001880 return -ENOMEM;
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00001881 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00001882
1883 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1884 NULL;
1885
1886 map = expand_xps_map(map, cpu, index);
1887 if (!map)
1888 goto error;
1889
1890 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1891 }
1892
1893 if (!new_dev_maps)
1894 goto out_no_new_maps;
1895
1896 for_each_possible_cpu(cpu) {
1897 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1898 /* add queue to CPU maps */
1899 int pos = 0;
1900
1901 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1902 while ((pos < map->len) && (map->queues[pos] != index))
1903 pos++;
1904
1905 if (pos == map->len)
1906 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001907#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00001908 if (numa_node_id == -2)
1909 numa_node_id = cpu_to_node(cpu);
1910 else if (numa_node_id != cpu_to_node(cpu))
1911 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001912#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00001913 } else if (dev_maps) {
1914 /* fill in the new device map from the old device map */
1915 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1916 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00001917 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00001918
Alexander Duyck537c00d2013-01-10 08:57:02 +00001919 }
1920
Alexander Duyck01c5f862013-01-10 08:57:35 +00001921 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1922
Alexander Duyck537c00d2013-01-10 08:57:02 +00001923 /* Cleanup old maps */
Alexander Duyck01c5f862013-01-10 08:57:35 +00001924 if (dev_maps) {
1925 for_each_possible_cpu(cpu) {
1926 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1927 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1928 if (map && map != new_map)
1929 kfree_rcu(map, rcu);
1930 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00001931
Alexander Duyck537c00d2013-01-10 08:57:02 +00001932 kfree_rcu(dev_maps, rcu);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001933 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00001934
Alexander Duyck01c5f862013-01-10 08:57:35 +00001935 dev_maps = new_dev_maps;
1936 active = true;
1937
1938out_no_new_maps:
1939 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00001940 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
1941 (numa_node_id >= 0) ? numa_node_id :
1942 NUMA_NO_NODE);
1943
Alexander Duyck01c5f862013-01-10 08:57:35 +00001944 if (!dev_maps)
1945 goto out_no_maps;
1946
1947 /* removes queue from unused CPUs */
1948 for_each_possible_cpu(cpu) {
1949 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
1950 continue;
1951
1952 if (remove_xps_queue(dev_maps, cpu, index))
1953 active = true;
1954 }
1955
1956 /* free map if not active */
1957 if (!active) {
1958 RCU_INIT_POINTER(dev->xps_maps, NULL);
1959 kfree_rcu(dev_maps, rcu);
1960 }
1961
1962out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00001963 mutex_unlock(&xps_map_mutex);
1964
1965 return 0;
1966error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00001967 /* remove any maps that we added */
1968 for_each_possible_cpu(cpu) {
1969 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1970 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1971 NULL;
1972 if (new_map && new_map != map)
1973 kfree(new_map);
1974 }
1975
Alexander Duyck537c00d2013-01-10 08:57:02 +00001976 mutex_unlock(&xps_map_mutex);
1977
Alexander Duyck537c00d2013-01-10 08:57:02 +00001978 kfree(new_dev_maps);
1979 return -ENOMEM;
1980}
1981EXPORT_SYMBOL(netif_set_xps_queue);
1982
1983#endif
John Fastabendf0796d52010-07-01 13:21:57 +00001984/*
1985 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1986 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1987 */
Tom Herberte6484932010-10-18 18:04:39 +00001988int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00001989{
Tom Herbert1d24eb42010-11-21 13:17:27 +00001990 int rc;
1991
Tom Herberte6484932010-10-18 18:04:39 +00001992 if (txq < 1 || txq > dev->num_tx_queues)
1993 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00001994
Ben Hutchings5c565802011-02-15 19:39:21 +00001995 if (dev->reg_state == NETREG_REGISTERED ||
1996 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00001997 ASSERT_RTNL();
1998
Tom Herbert1d24eb42010-11-21 13:17:27 +00001999 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2000 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002001 if (rc)
2002 return rc;
2003
John Fastabend4f57c082011-01-17 08:06:04 +00002004 if (dev->num_tc)
2005 netif_setup_tc(dev, txq);
2006
Alexander Duyck024e9672013-01-10 08:57:46 +00002007 if (txq < dev->real_num_tx_queues) {
Tom Herberte6484932010-10-18 18:04:39 +00002008 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002009#ifdef CONFIG_XPS
2010 netif_reset_xps_queues_gt(dev, txq);
2011#endif
2012 }
John Fastabendf0796d52010-07-01 13:21:57 +00002013 }
Tom Herberte6484932010-10-18 18:04:39 +00002014
2015 dev->real_num_tx_queues = txq;
2016 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002017}
2018EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002019
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002020#ifdef CONFIG_RPS
2021/**
2022 * netif_set_real_num_rx_queues - set actual number of RX queues used
2023 * @dev: Network device
2024 * @rxq: Actual number of RX queues
2025 *
2026 * This must be called either with the rtnl_lock held or before
2027 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002028 * negative error code. If called before registration, it always
2029 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002030 */
2031int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2032{
2033 int rc;
2034
Tom Herbertbd25fa72010-10-18 18:00:16 +00002035 if (rxq < 1 || rxq > dev->num_rx_queues)
2036 return -EINVAL;
2037
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002038 if (dev->reg_state == NETREG_REGISTERED) {
2039 ASSERT_RTNL();
2040
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002041 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2042 rxq);
2043 if (rc)
2044 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002045 }
2046
2047 dev->real_num_rx_queues = rxq;
2048 return 0;
2049}
2050EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2051#endif
2052
Ben Hutchings2c530402012-07-10 10:55:09 +00002053/**
2054 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002055 *
2056 * This routine should set an upper limit on the number of RSS queues
2057 * used by default by multiqueue devices.
2058 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002059int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002060{
2061 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2062}
2063EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2064
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002065static inline void __netif_reschedule(struct Qdisc *q)
2066{
2067 struct softnet_data *sd;
2068 unsigned long flags;
2069
2070 local_irq_save(flags);
2071 sd = &__get_cpu_var(softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002072 q->next_sched = NULL;
2073 *sd->output_queue_tailp = q;
2074 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002075 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2076 local_irq_restore(flags);
2077}
2078
David S. Miller37437bb2008-07-16 02:15:04 -07002079void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002080{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002081 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2082 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002083}
2084EXPORT_SYMBOL(__netif_schedule);
2085
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002086void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002087{
David S. Miller3578b0c2010-08-03 00:24:04 -07002088 if (atomic_dec_and_test(&skb->users)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002089 struct softnet_data *sd;
2090 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08002091
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002092 local_irq_save(flags);
2093 sd = &__get_cpu_var(softnet_data);
2094 skb->next = sd->completion_queue;
2095 sd->completion_queue = skb;
2096 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2097 local_irq_restore(flags);
2098 }
Denis Vlasenko56079432006-03-29 15:57:29 -08002099}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002100EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08002101
2102void dev_kfree_skb_any(struct sk_buff *skb)
2103{
2104 if (in_irq() || irqs_disabled())
2105 dev_kfree_skb_irq(skb);
2106 else
2107 dev_kfree_skb(skb);
2108}
2109EXPORT_SYMBOL(dev_kfree_skb_any);
2110
2111
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002112/**
2113 * netif_device_detach - mark device as removed
2114 * @dev: network device
2115 *
2116 * Mark device as removed from system and therefore no longer available.
2117 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002118void netif_device_detach(struct net_device *dev)
2119{
2120 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2121 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002122 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002123 }
2124}
2125EXPORT_SYMBOL(netif_device_detach);
2126
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002127/**
2128 * netif_device_attach - mark device as attached
2129 * @dev: network device
2130 *
2131 * Mark device as attached from system and restart if needed.
2132 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002133void netif_device_attach(struct net_device *dev)
2134{
2135 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2136 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002137 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002138 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002139 }
2140}
2141EXPORT_SYMBOL(netif_device_attach);
2142
Ben Hutchings36c92472012-01-17 07:57:56 +00002143static void skb_warn_bad_offload(const struct sk_buff *skb)
2144{
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002145 static const netdev_features_t null_features = 0;
Ben Hutchings36c92472012-01-17 07:57:56 +00002146 struct net_device *dev = skb->dev;
2147 const char *driver = "";
2148
Ben Greearc846ad92013-04-19 10:45:52 +00002149 if (!net_ratelimit())
2150 return;
2151
Ben Hutchings36c92472012-01-17 07:57:56 +00002152 if (dev && dev->dev.parent)
2153 driver = dev_driver_string(dev->dev.parent);
2154
2155 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2156 "gso_type=%d ip_summed=%d\n",
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002157 driver, dev ? &dev->features : &null_features,
2158 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002159 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2160 skb_shinfo(skb)->gso_type, skb->ip_summed);
2161}
2162
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163/*
2164 * Invalidate hardware checksum when packet is to be mangled, and
2165 * complete checksum manually on outgoing path.
2166 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002167int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168{
Al Virod3bc23e2006-11-14 21:24:49 -08002169 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002170 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171
Patrick McHardy84fa7932006-08-29 16:44:56 -07002172 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002173 goto out_set_summed;
2174
2175 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002176 skb_warn_bad_offload(skb);
2177 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 }
2179
Eric Dumazetcef401d2013-01-25 20:34:37 +00002180 /* Before computing a checksum, we should make sure no frag could
2181 * be modified by an external entity : checksum could be wrong.
2182 */
2183 if (skb_has_shared_frag(skb)) {
2184 ret = __skb_linearize(skb);
2185 if (ret)
2186 goto out;
2187 }
2188
Michał Mirosław55508d62010-12-14 15:24:08 +00002189 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002190 BUG_ON(offset >= skb_headlen(skb));
2191 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2192
2193 offset += skb->csum_offset;
2194 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2195
2196 if (skb_cloned(skb) &&
2197 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2199 if (ret)
2200 goto out;
2201 }
2202
Herbert Xua0308472007-10-15 01:47:15 -07002203 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07002204out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002206out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 return ret;
2208}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002209EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002211__be16 skb_network_protocol(struct sk_buff *skb)
2212{
2213 __be16 type = skb->protocol;
David S. Miller61816592013-03-20 12:46:26 -04002214 int vlan_depth = ETH_HLEN;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002215
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002216 while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002217 struct vlan_hdr *vh;
2218
2219 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
2220 return 0;
2221
2222 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2223 type = vh->h_vlan_encapsulated_proto;
2224 vlan_depth += VLAN_HLEN;
2225 }
2226
2227 return type;
2228}
2229
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002230/**
2231 * skb_mac_gso_segment - mac layer segmentation handler.
2232 * @skb: buffer to segment
2233 * @features: features for the output path (see dev->features)
2234 */
2235struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2236 netdev_features_t features)
2237{
2238 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2239 struct packet_offload *ptype;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002240 __be16 type = skb_network_protocol(skb);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002241
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002242 if (unlikely(!type))
2243 return ERR_PTR(-EINVAL);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002244
2245 __skb_pull(skb, skb->mac_len);
2246
2247 rcu_read_lock();
2248 list_for_each_entry_rcu(ptype, &offload_base, list) {
2249 if (ptype->type == type && ptype->callbacks.gso_segment) {
2250 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2251 int err;
2252
2253 err = ptype->callbacks.gso_send_check(skb);
2254 segs = ERR_PTR(err);
2255 if (err || skb_gso_ok(skb, features))
2256 break;
2257 __skb_push(skb, (skb->data -
2258 skb_network_header(skb)));
2259 }
2260 segs = ptype->callbacks.gso_segment(skb, features);
2261 break;
2262 }
2263 }
2264 rcu_read_unlock();
2265
2266 __skb_push(skb, skb->data - skb_mac_header(skb));
2267
2268 return segs;
2269}
2270EXPORT_SYMBOL(skb_mac_gso_segment);
2271
2272
Cong Wang12b00042013-02-05 16:36:38 +00002273/* openvswitch calls this on rx path, so we need a different check.
2274 */
2275static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2276{
2277 if (tx_path)
2278 return skb->ip_summed != CHECKSUM_PARTIAL;
2279 else
2280 return skb->ip_summed == CHECKSUM_NONE;
2281}
2282
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002283/**
Cong Wang12b00042013-02-05 16:36:38 +00002284 * __skb_gso_segment - Perform segmentation on skb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002285 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002286 * @features: features for the output path (see dev->features)
Cong Wang12b00042013-02-05 16:36:38 +00002287 * @tx_path: whether it is called in TX path
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002288 *
2289 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002290 *
2291 * It may return NULL if the skb requires no segmentation. This is
2292 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002293 */
Cong Wang12b00042013-02-05 16:36:38 +00002294struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2295 netdev_features_t features, bool tx_path)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002296{
Cong Wang12b00042013-02-05 16:36:38 +00002297 if (unlikely(skb_needs_check(skb, tx_path))) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002298 int err;
2299
Ben Hutchings36c92472012-01-17 07:57:56 +00002300 skb_warn_bad_offload(skb);
Herbert Xu67fd1a72009-01-19 16:26:44 -08002301
Herbert Xua430a432006-07-08 13:34:56 -07002302 if (skb_header_cloned(skb) &&
2303 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2304 return ERR_PTR(err);
2305 }
2306
Pravin B Shelar68c33162013-02-14 14:02:41 +00002307 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002308 skb_reset_mac_header(skb);
2309 skb_reset_mac_len(skb);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002310
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002311 return skb_mac_gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002312}
Cong Wang12b00042013-02-05 16:36:38 +00002313EXPORT_SYMBOL(__skb_gso_segment);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002314
Herbert Xufb286bb2005-11-10 13:01:24 -08002315/* Take action when hardware reception checksum errors are detected. */
2316#ifdef CONFIG_BUG
2317void netdev_rx_csum_fault(struct net_device *dev)
2318{
2319 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002320 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002321 dump_stack();
2322 }
2323}
2324EXPORT_SYMBOL(netdev_rx_csum_fault);
2325#endif
2326
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327/* Actually, we should eliminate this check as soon as we know, that:
2328 * 1. IOMMU is present and allows to map all the memory.
2329 * 2. No high memory really exists on this machine.
2330 */
2331
Eric Dumazet9092c652010-04-02 13:34:49 -07002332static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002334#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 int i;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002336 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002337 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2338 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2339 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002340 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002341 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002342 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002344 if (PCI_DMA_BUS_IS_PHYS) {
2345 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346
Eric Dumazet9092c652010-04-02 13:34:49 -07002347 if (!pdev)
2348 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002349 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002350 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2351 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002352 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2353 return 1;
2354 }
2355 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002356#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357 return 0;
2358}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002360struct dev_gso_cb {
2361 void (*destructor)(struct sk_buff *skb);
2362};
2363
2364#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2365
2366static void dev_gso_skb_destructor(struct sk_buff *skb)
2367{
2368 struct dev_gso_cb *cb;
2369
2370 do {
2371 struct sk_buff *nskb = skb->next;
2372
2373 skb->next = nskb->next;
2374 nskb->next = NULL;
2375 kfree_skb(nskb);
2376 } while (skb->next);
2377
2378 cb = DEV_GSO_CB(skb);
2379 if (cb->destructor)
2380 cb->destructor(skb);
2381}
2382
2383/**
2384 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2385 * @skb: buffer to segment
Jesse Gross91ecb632011-01-09 06:23:33 +00002386 * @features: device features as applicable to this skb
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002387 *
2388 * This function segments the given skb and stores the list of segments
2389 * in skb->next.
2390 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002391static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002392{
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002393 struct sk_buff *segs;
2394
Herbert Xu576a30e2006-06-27 13:22:38 -07002395 segs = skb_gso_segment(skb, features);
2396
2397 /* Verifying header integrity only. */
2398 if (!segs)
2399 return 0;
2400
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07002401 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002402 return PTR_ERR(segs);
2403
2404 skb->next = segs;
2405 DEV_GSO_CB(skb)->destructor = skb->destructor;
2406 skb->destructor = dev_gso_skb_destructor;
2407
2408 return 0;
2409}
2410
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002411static netdev_features_t harmonize_features(struct sk_buff *skb,
2412 __be16 protocol, netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002413{
Ed Cashinc0d680e2012-09-19 15:49:00 +00002414 if (skb->ip_summed != CHECKSUM_NONE &&
2415 !can_checksum_protocol(features, protocol)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002416 features &= ~NETIF_F_ALL_CSUM;
Jesse Grossf01a5232011-01-09 06:23:31 +00002417 } else if (illegal_highdma(skb->dev, skb)) {
2418 features &= ~NETIF_F_SG;
2419 }
2420
2421 return features;
2422}
2423
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002424netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002425{
2426 __be16 protocol = skb->protocol;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002427 netdev_features_t features = skb->dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00002428
Ben Hutchings30b678d2012-07-30 15:57:00 +00002429 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2430 features &= ~NETIF_F_GSO_MASK;
2431
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002432 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
Jesse Gross58e998c2010-10-29 12:14:55 +00002433 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2434 protocol = veh->h_vlan_encapsulated_proto;
Jesse Grossf01a5232011-01-09 06:23:31 +00002435 } else if (!vlan_tx_tag_present(skb)) {
2436 return harmonize_features(skb, protocol, features);
2437 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002438
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002439 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
2440 NETIF_F_HW_VLAN_STAG_TX);
Jesse Grossf01a5232011-01-09 06:23:31 +00002441
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002442 if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002443 return harmonize_features(skb, protocol, features);
2444 } else {
2445 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002446 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2447 NETIF_F_HW_VLAN_STAG_TX;
Jesse Grossf01a5232011-01-09 06:23:31 +00002448 return harmonize_features(skb, protocol, features);
2449 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002450}
Jesse Grossf01a5232011-01-09 06:23:31 +00002451EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002452
John Fastabend6afff0c2010-06-16 14:18:12 +00002453/*
2454 * Returns true if either:
2455 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
Rami Rosend1a53df2012-08-27 23:39:24 +00002456 * 2. skb is fragmented and the device does not support SG.
John Fastabend6afff0c2010-06-16 14:18:12 +00002457 */
2458static inline int skb_needs_linearize(struct sk_buff *skb,
Jesse Gross02932ce2011-01-09 06:23:34 +00002459 int features)
John Fastabend6afff0c2010-06-16 14:18:12 +00002460{
Jesse Gross02932ce2011-01-09 06:23:34 +00002461 return skb_is_nonlinear(skb) &&
2462 ((skb_has_frag_list(skb) &&
2463 !(features & NETIF_F_FRAGLIST)) ||
Jesse Grosse1e78db2010-10-29 12:14:53 +00002464 (skb_shinfo(skb)->nr_frags &&
Jesse Gross02932ce2011-01-09 06:23:34 +00002465 !(features & NETIF_F_SG)));
John Fastabend6afff0c2010-06-16 14:18:12 +00002466}
2467
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002468int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2469 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002470{
Stephen Hemminger00829822008-11-20 20:14:53 -08002471 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00002472 int rc = NETDEV_TX_OK;
Koki Sanagiec764bf2011-05-30 21:48:34 +00002473 unsigned int skb_len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002474
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002475 if (likely(!skb->next)) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002476 netdev_features_t features;
Jesse Grossfc741212011-01-09 06:23:32 +00002477
Eric Dumazet93f154b2009-05-18 22:19:19 -07002478 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002479 * If device doesn't need skb->dst, release it right now while
Eric Dumazet93f154b2009-05-18 22:19:19 -07002480 * its hot in this cpu cache
2481 */
Eric Dumazetadf30902009-06-02 05:19:30 +00002482 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2483 skb_dst_drop(skb);
2484
Jesse Grossfc741212011-01-09 06:23:32 +00002485 features = netif_skb_features(skb);
2486
Jesse Gross7b9c6092010-10-20 13:56:04 +00002487 if (vlan_tx_tag_present(skb) &&
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002488 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
2489 skb = __vlan_put_tag(skb, skb->vlan_proto,
2490 vlan_tx_tag_get(skb));
Jesse Gross7b9c6092010-10-20 13:56:04 +00002491 if (unlikely(!skb))
2492 goto out;
2493
2494 skb->vlan_tci = 0;
2495 }
2496
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002497 /* If encapsulation offload request, verify we are testing
2498 * hardware encapsulation features instead of standard
2499 * features for the netdev
2500 */
2501 if (skb->encapsulation)
2502 features &= dev->hw_enc_features;
2503
Jesse Grossfc741212011-01-09 06:23:32 +00002504 if (netif_needs_gso(skb, features)) {
Jesse Gross91ecb632011-01-09 06:23:33 +00002505 if (unlikely(dev_gso_segment(skb, features)))
David S. Miller9ccb8972010-04-22 01:02:07 -07002506 goto out_kfree_skb;
2507 if (skb->next)
2508 goto gso;
John Fastabend6afff0c2010-06-16 14:18:12 +00002509 } else {
Jesse Gross02932ce2011-01-09 06:23:34 +00002510 if (skb_needs_linearize(skb, features) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002511 __skb_linearize(skb))
2512 goto out_kfree_skb;
2513
2514 /* If packet is not checksummed and device does not
2515 * support checksumming for this protocol, complete
2516 * checksumming here.
2517 */
2518 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002519 if (skb->encapsulation)
2520 skb_set_inner_transport_header(skb,
2521 skb_checksum_start_offset(skb));
2522 else
2523 skb_set_transport_header(skb,
2524 skb_checksum_start_offset(skb));
Jesse Gross03634662011-01-09 06:23:35 +00002525 if (!(features & NETIF_F_ALL_CSUM) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002526 skb_checksum_help(skb))
2527 goto out_kfree_skb;
2528 }
David S. Miller9ccb8972010-04-22 01:02:07 -07002529 }
2530
Eric Dumazetb40863c2012-09-18 20:44:49 +00002531 if (!list_empty(&ptype_all))
2532 dev_queue_xmit_nit(skb, dev);
2533
Koki Sanagiec764bf2011-05-30 21:48:34 +00002534 skb_len = skb->len;
Patrick Ohlyac45f602009-02-12 05:03:37 +00002535 rc = ops->ndo_start_xmit(skb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002536 trace_net_dev_xmit(skb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002537 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07002538 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00002539 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002540 }
2541
Herbert Xu576a30e2006-06-27 13:22:38 -07002542gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002543 do {
2544 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002545
2546 skb->next = nskb->next;
2547 nskb->next = NULL;
Krishna Kumar068a2de2009-12-09 20:59:58 +00002548
Eric Dumazetb40863c2012-09-18 20:44:49 +00002549 if (!list_empty(&ptype_all))
2550 dev_queue_xmit_nit(nskb, dev);
2551
Koki Sanagiec764bf2011-05-30 21:48:34 +00002552 skb_len = nskb->len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002553 rc = ops->ndo_start_xmit(nskb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002554 trace_net_dev_xmit(nskb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002555 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002556 if (rc & ~NETDEV_TX_MASK)
2557 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07002558 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002559 skb->next = nskb;
2560 return rc;
2561 }
Eric Dumazet08baf562009-05-25 22:58:01 -07002562 txq_trans_update(txq);
Tom Herbert734664982011-11-28 16:32:44 +00002563 if (unlikely(netif_xmit_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07002564 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002565 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002566
Patrick McHardy572a9d72009-11-10 06:14:14 +00002567out_kfree_gso_skb:
2568 if (likely(skb->next == NULL))
2569 skb->destructor = DEV_GSO_CB(skb)->destructor;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002570out_kfree_skb:
2571 kfree_skb(skb);
Jesse Gross7b9c6092010-10-20 13:56:04 +00002572out:
Patrick McHardy572a9d72009-11-10 06:14:14 +00002573 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002574}
2575
Eric Dumazet1def9232013-01-10 12:36:42 +00002576static void qdisc_pkt_len_init(struct sk_buff *skb)
2577{
2578 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2579
2580 qdisc_skb_cb(skb)->pkt_len = skb->len;
2581
2582 /* To get more precise estimation of bytes sent on wire,
2583 * we add to pkt_len the headers size of all segments
2584 */
2585 if (shinfo->gso_size) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08002586 unsigned int hdr_len;
Jason Wang15e5a032013-03-25 20:19:59 +00002587 u16 gso_segs = shinfo->gso_segs;
Eric Dumazet1def9232013-01-10 12:36:42 +00002588
Eric Dumazet757b8b12013-01-15 21:14:21 -08002589 /* mac layer + network layer */
2590 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2591
2592 /* + transport layer */
Eric Dumazet1def9232013-01-10 12:36:42 +00002593 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2594 hdr_len += tcp_hdrlen(skb);
2595 else
2596 hdr_len += sizeof(struct udphdr);
Jason Wang15e5a032013-03-25 20:19:59 +00002597
2598 if (shinfo->gso_type & SKB_GSO_DODGY)
2599 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2600 shinfo->gso_size);
2601
2602 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00002603 }
2604}
2605
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002606static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2607 struct net_device *dev,
2608 struct netdev_queue *txq)
2609{
2610 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002611 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002612 int rc;
2613
Eric Dumazet1def9232013-01-10 12:36:42 +00002614 qdisc_pkt_len_init(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002615 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002616 /*
2617 * Heuristic to force contended enqueues to serialize on a
2618 * separate lock before trying to get qdisc main lock.
2619 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2620 * and dequeue packets faster.
2621 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00002622 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002623 if (unlikely(contended))
2624 spin_lock(&q->busylock);
2625
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002626 spin_lock(root_lock);
2627 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2628 kfree_skb(skb);
2629 rc = NET_XMIT_DROP;
2630 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07002631 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002632 /*
2633 * This is a work-conserving queue; there are no old skbs
2634 * waiting to be sent out; and the qdisc is not running -
2635 * xmit the skb directly.
2636 */
Eric Dumazet7fee2262010-05-11 23:19:48 +00002637 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2638 skb_dst_force(skb);
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002639
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002640 qdisc_bstats_update(q, skb);
2641
Eric Dumazet79640a42010-06-02 05:09:29 -07002642 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2643 if (unlikely(contended)) {
2644 spin_unlock(&q->busylock);
2645 contended = false;
2646 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002647 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002648 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07002649 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002650
2651 rc = NET_XMIT_SUCCESS;
2652 } else {
Eric Dumazet7fee2262010-05-11 23:19:48 +00002653 skb_dst_force(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002654 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07002655 if (qdisc_run_begin(q)) {
2656 if (unlikely(contended)) {
2657 spin_unlock(&q->busylock);
2658 contended = false;
2659 }
2660 __qdisc_run(q);
2661 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002662 }
2663 spin_unlock(root_lock);
Eric Dumazet79640a42010-06-02 05:09:29 -07002664 if (unlikely(contended))
2665 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002666 return rc;
2667}
2668
Neil Horman5bc14212011-11-22 05:10:51 +00002669#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2670static void skb_update_prio(struct sk_buff *skb)
2671{
Igor Maravic6977a792011-11-25 07:44:54 +00002672 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00002673
Eric Dumazet91c68ce2012-07-08 21:45:10 +00002674 if (!skb->priority && skb->sk && map) {
2675 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2676
2677 if (prioidx < map->priomap_len)
2678 skb->priority = map->priomap[prioidx];
2679 }
Neil Horman5bc14212011-11-22 05:10:51 +00002680}
2681#else
2682#define skb_update_prio(skb)
2683#endif
2684
Eric Dumazet745e20f2010-09-29 13:23:09 -07002685static DEFINE_PER_CPU(int, xmit_recursion);
David S. Miller11a766c2010-10-25 12:51:55 -07002686#define RECURSION_LIMIT 10
Eric Dumazet745e20f2010-09-29 13:23:09 -07002687
Dave Jonesd29f7492008-07-22 14:09:06 -07002688/**
Michel Machado95603e22012-06-12 10:16:35 +00002689 * dev_loopback_xmit - loop back @skb
2690 * @skb: buffer to transmit
2691 */
2692int dev_loopback_xmit(struct sk_buff *skb)
2693{
2694 skb_reset_mac_header(skb);
2695 __skb_pull(skb, skb_network_offset(skb));
2696 skb->pkt_type = PACKET_LOOPBACK;
2697 skb->ip_summed = CHECKSUM_UNNECESSARY;
2698 WARN_ON(!skb_dst(skb));
2699 skb_dst_force(skb);
2700 netif_rx_ni(skb);
2701 return 0;
2702}
2703EXPORT_SYMBOL(dev_loopback_xmit);
2704
2705/**
Dave Jonesd29f7492008-07-22 14:09:06 -07002706 * dev_queue_xmit - transmit a buffer
2707 * @skb: buffer to transmit
2708 *
2709 * Queue a buffer for transmission to a network device. The caller must
2710 * have set the device and priority and built the buffer before calling
2711 * this function. The function can be called from an interrupt.
2712 *
2713 * A negative errno code is returned on a failure. A success does not
2714 * guarantee the frame will be transmitted as it may be dropped due
2715 * to congestion or traffic shaping.
2716 *
2717 * -----------------------------------------------------------------------------------
2718 * I notice this method can also return errors from the queue disciplines,
2719 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2720 * be positive.
2721 *
2722 * Regardless of the return value, the skb is consumed, so it is currently
2723 * difficult to retry a send to this method. (You can bump the ref count
2724 * before sending to hold a reference for retry if you are careful.)
2725 *
2726 * When calling this method, interrupts MUST be enabled. This is because
2727 * the BH enable code must have IRQs enabled so that it will not deadlock.
2728 * --BLG
2729 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730int dev_queue_xmit(struct sk_buff *skb)
2731{
2732 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002733 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734 struct Qdisc *q;
2735 int rc = -ENOMEM;
2736
Eric Dumazet6d1ccff2013-02-05 20:22:20 +00002737 skb_reset_mac_header(skb);
2738
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002739 /* Disable soft irqs for various locks below. Also
2740 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002742 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743
Neil Horman5bc14212011-11-22 05:10:51 +00002744 skb_update_prio(skb);
2745
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00002746 txq = netdev_pick_tx(dev, skb);
Paul E. McKenneya898def2010-02-22 17:04:49 -08002747 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002748
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002750 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751#endif
Koki Sanagicf66ba52010-08-23 18:45:02 +09002752 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002754 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002755 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 }
2757
2758 /* The device has no queue. Common case for software devices:
2759 loopback, all the sorts of tunnels...
2760
Herbert Xu932ff272006-06-09 12:20:56 -07002761 Really, it is unlikely that netif_tx_lock protection is necessary
2762 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763 counters.)
2764 However, it is possible, that they rely on protection
2765 made by us here.
2766
2767 Check this and shot the lock. It is not prone from deadlocks.
2768 Either shot noqueue qdisc, it is even simpler 8)
2769 */
2770 if (dev->flags & IFF_UP) {
2771 int cpu = smp_processor_id(); /* ok because BHs are off */
2772
David S. Millerc773e842008-07-08 23:13:53 -07002773 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774
Eric Dumazet745e20f2010-09-29 13:23:09 -07002775 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2776 goto recursion_alert;
2777
David S. Millerc773e842008-07-08 23:13:53 -07002778 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779
Tom Herbert734664982011-11-28 16:32:44 +00002780 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07002781 __this_cpu_inc(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002782 rc = dev_hard_start_xmit(skb, dev, txq);
Eric Dumazet745e20f2010-09-29 13:23:09 -07002783 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002784 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002785 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786 goto out;
2787 }
2788 }
David S. Millerc773e842008-07-08 23:13:53 -07002789 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00002790 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2791 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792 } else {
2793 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07002794 * unfortunately
2795 */
2796recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00002797 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2798 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 }
2800 }
2801
2802 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002803 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 kfree_skb(skb);
2806 return rc;
2807out:
Herbert Xud4828d82006-06-22 02:28:18 -07002808 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 return rc;
2810}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002811EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812
2813
2814/*=======================================================================
2815 Receiver routines
2816 =======================================================================*/
2817
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002818int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00002819EXPORT_SYMBOL(netdev_max_backlog);
2820
Eric Dumazet3b098e22010-05-15 23:57:10 -07002821int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002822int netdev_budget __read_mostly = 300;
2823int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07002825/* Called with irq disabled */
2826static inline void ____napi_schedule(struct softnet_data *sd,
2827 struct napi_struct *napi)
2828{
2829 list_add_tail(&napi->poll_list, &sd->poll_list);
2830 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2831}
2832
Eric Dumazetdf334542010-03-24 19:13:54 +00002833#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07002834
2835/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002836struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07002837EXPORT_SYMBOL(rps_sock_flow_table);
2838
Ingo Molnarc5905af2012-02-24 08:31:31 +01002839struct static_key rps_needed __read_mostly;
Eric Dumazetadc93002011-11-17 03:13:26 +00002840
Ben Hutchingsc4454772011-01-19 11:03:53 +00002841static struct rps_dev_flow *
2842set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2843 struct rps_dev_flow *rflow, u16 next_cpu)
2844{
Ben Hutchings09994d12011-10-03 04:42:46 +00002845 if (next_cpu != RPS_NO_CPU) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00002846#ifdef CONFIG_RFS_ACCEL
2847 struct netdev_rx_queue *rxqueue;
2848 struct rps_dev_flow_table *flow_table;
2849 struct rps_dev_flow *old_rflow;
2850 u32 flow_id;
2851 u16 rxq_index;
2852 int rc;
2853
2854 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00002855 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2856 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00002857 goto out;
2858 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2859 if (rxq_index == skb_get_rx_queue(skb))
2860 goto out;
2861
2862 rxqueue = dev->_rx + rxq_index;
2863 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2864 if (!flow_table)
2865 goto out;
2866 flow_id = skb->rxhash & flow_table->mask;
2867 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2868 rxq_index, flow_id);
2869 if (rc < 0)
2870 goto out;
2871 old_rflow = rflow;
2872 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00002873 rflow->filter = rc;
2874 if (old_rflow->filter == rflow->filter)
2875 old_rflow->filter = RPS_NO_FILTER;
2876 out:
2877#endif
2878 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00002879 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002880 }
2881
Ben Hutchings09994d12011-10-03 04:42:46 +00002882 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002883 return rflow;
2884}
2885
Tom Herbert0a9627f2010-03-16 08:03:29 +00002886/*
2887 * get_rps_cpu is called from netif_receive_skb and returns the target
2888 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07002889 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00002890 */
Tom Herbertfec5e652010-04-16 16:01:27 -07002891static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2892 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00002893{
Tom Herbert0a9627f2010-03-16 08:03:29 +00002894 struct netdev_rx_queue *rxqueue;
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002895 struct rps_map *map;
Tom Herbertfec5e652010-04-16 16:01:27 -07002896 struct rps_dev_flow_table *flow_table;
2897 struct rps_sock_flow_table *sock_flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002898 int cpu = -1;
Tom Herbertfec5e652010-04-16 16:01:27 -07002899 u16 tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002900
Tom Herbert0a9627f2010-03-16 08:03:29 +00002901 if (skb_rx_queue_recorded(skb)) {
2902 u16 index = skb_get_rx_queue(skb);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002903 if (unlikely(index >= dev->real_num_rx_queues)) {
2904 WARN_ONCE(dev->real_num_rx_queues > 1,
2905 "%s received packet on queue %u, but number "
2906 "of RX queues is %u\n",
2907 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00002908 goto done;
2909 }
2910 rxqueue = dev->_rx + index;
2911 } else
2912 rxqueue = dev->_rx;
2913
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002914 map = rcu_dereference(rxqueue->rps_map);
2915 if (map) {
Tom Herbert85875232011-01-31 16:23:42 -08002916 if (map->len == 1 &&
Eric Dumazet33d480c2011-08-11 19:30:52 +00002917 !rcu_access_pointer(rxqueue->rps_flow_table)) {
Changli Gao6febfca2010-09-03 23:12:37 +00002918 tcpu = map->cpus[0];
2919 if (cpu_online(tcpu))
2920 cpu = tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002921 goto done;
Eric Dumazetb249dcb2010-04-19 21:56:38 +00002922 }
Eric Dumazet33d480c2011-08-11 19:30:52 +00002923 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00002924 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002925 }
2926
Changli Gao2d47b452010-08-17 19:00:56 +00002927 skb_reset_network_header(skb);
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002928 if (!skb_get_rxhash(skb))
Tom Herbert0a9627f2010-03-16 08:03:29 +00002929 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002930
Tom Herbertfec5e652010-04-16 16:01:27 -07002931 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2932 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2933 if (flow_table && sock_flow_table) {
2934 u16 next_cpu;
2935 struct rps_dev_flow *rflow;
2936
2937 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2938 tcpu = rflow->cpu;
2939
2940 next_cpu = sock_flow_table->ents[skb->rxhash &
2941 sock_flow_table->mask];
2942
2943 /*
2944 * If the desired CPU (where last recvmsg was done) is
2945 * different from current CPU (one in the rx-queue flow
2946 * table entry), switch if one of the following holds:
2947 * - Current CPU is unset (equal to RPS_NO_CPU).
2948 * - Current CPU is offline.
2949 * - The current CPU's queue tail has advanced beyond the
2950 * last packet that was enqueued using this table entry.
2951 * This guarantees that all previous packets for the flow
2952 * have been dequeued, thus preserving in order delivery.
2953 */
2954 if (unlikely(tcpu != next_cpu) &&
2955 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2956 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00002957 rflow->last_qtail)) >= 0)) {
2958 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002959 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00002960 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00002961
Tom Herbertfec5e652010-04-16 16:01:27 -07002962 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2963 *rflowp = rflow;
2964 cpu = tcpu;
2965 goto done;
2966 }
2967 }
2968
Tom Herbert0a9627f2010-03-16 08:03:29 +00002969 if (map) {
Tom Herbertfec5e652010-04-16 16:01:27 -07002970 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
Tom Herbert0a9627f2010-03-16 08:03:29 +00002971
2972 if (cpu_online(tcpu)) {
2973 cpu = tcpu;
2974 goto done;
2975 }
2976 }
2977
2978done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00002979 return cpu;
2980}
2981
Ben Hutchingsc4454772011-01-19 11:03:53 +00002982#ifdef CONFIG_RFS_ACCEL
2983
2984/**
2985 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
2986 * @dev: Device on which the filter was set
2987 * @rxq_index: RX queue index
2988 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
2989 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
2990 *
2991 * Drivers that implement ndo_rx_flow_steer() should periodically call
2992 * this function for each installed filter and remove the filters for
2993 * which it returns %true.
2994 */
2995bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
2996 u32 flow_id, u16 filter_id)
2997{
2998 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
2999 struct rps_dev_flow_table *flow_table;
3000 struct rps_dev_flow *rflow;
3001 bool expire = true;
3002 int cpu;
3003
3004 rcu_read_lock();
3005 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3006 if (flow_table && flow_id <= flow_table->mask) {
3007 rflow = &flow_table->flows[flow_id];
3008 cpu = ACCESS_ONCE(rflow->cpu);
3009 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3010 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3011 rflow->last_qtail) <
3012 (int)(10 * flow_table->mask)))
3013 expire = false;
3014 }
3015 rcu_read_unlock();
3016 return expire;
3017}
3018EXPORT_SYMBOL(rps_may_expire_flow);
3019
3020#endif /* CONFIG_RFS_ACCEL */
3021
Tom Herbert0a9627f2010-03-16 08:03:29 +00003022/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003023static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003024{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003025 struct softnet_data *sd = data;
3026
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003027 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003028 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003029}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003030
Tom Herbertfec5e652010-04-16 16:01:27 -07003031#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003032
3033/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003034 * Check if this softnet_data structure is another cpu one
3035 * If yes, queue it to our IPI list and return 1
3036 * If no, return 0
3037 */
3038static int rps_ipi_queued(struct softnet_data *sd)
3039{
3040#ifdef CONFIG_RPS
3041 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3042
3043 if (sd != mysd) {
3044 sd->rps_ipi_next = mysd->rps_ipi_list;
3045 mysd->rps_ipi_list = sd;
3046
3047 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3048 return 1;
3049 }
3050#endif /* CONFIG_RPS */
3051 return 0;
3052}
3053
3054/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003055 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3056 * queue (may be a remote CPU queue).
3057 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003058static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3059 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003060{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003061 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003062 unsigned long flags;
3063
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003064 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003065
3066 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003067
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003068 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003069 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
3070 if (skb_queue_len(&sd->input_pkt_queue)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003071enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003072 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003073 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003074 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003075 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003076 return NET_RX_SUCCESS;
3077 }
3078
Eric Dumazetebda37c22010-05-06 23:51:21 +00003079 /* Schedule NAPI for backlog device
3080 * We can use non atomic operation since we own the queue lock
3081 */
3082 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003083 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003084 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003085 }
3086 goto enqueue;
3087 }
3088
Changli Gaodee42872010-05-02 05:42:16 +00003089 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003090 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003091
Tom Herbert0a9627f2010-03-16 08:03:29 +00003092 local_irq_restore(flags);
3093
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003094 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003095 kfree_skb(skb);
3096 return NET_RX_DROP;
3097}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003098
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099/**
3100 * netif_rx - post buffer to the network code
3101 * @skb: buffer to post
3102 *
3103 * This function receives a packet from a device driver and queues it for
3104 * the upper (protocol) levels to process. It always succeeds. The buffer
3105 * may be dropped during processing for congestion control or by the
3106 * protocol layers.
3107 *
3108 * return values:
3109 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003110 * NET_RX_DROP (packet was dropped)
3111 *
3112 */
3113
3114int netif_rx(struct sk_buff *skb)
3115{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003116 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117
3118 /* if netpoll wants it, pretend we never saw it */
3119 if (netpoll_rx(skb))
3120 return NET_RX_DROP;
3121
Eric Dumazet588f0332011-11-15 04:12:55 +00003122 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003123
Koki Sanagicf66ba52010-08-23 18:45:02 +09003124 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003125#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003126 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003127 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003128 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129
Changli Gaocece1942010-08-07 20:35:43 -07003130 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003131 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003132
3133 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003134 if (cpu < 0)
3135 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003136
3137 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3138
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003139 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003140 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003141 } else
3142#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003143 {
3144 unsigned int qtail;
3145 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3146 put_cpu();
3147 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003148 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003150EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151
3152int netif_rx_ni(struct sk_buff *skb)
3153{
3154 int err;
3155
3156 preempt_disable();
3157 err = netif_rx(skb);
3158 if (local_softirq_pending())
3159 do_softirq();
3160 preempt_enable();
3161
3162 return err;
3163}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164EXPORT_SYMBOL(netif_rx_ni);
3165
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166static void net_tx_action(struct softirq_action *h)
3167{
3168 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3169
3170 if (sd->completion_queue) {
3171 struct sk_buff *clist;
3172
3173 local_irq_disable();
3174 clist = sd->completion_queue;
3175 sd->completion_queue = NULL;
3176 local_irq_enable();
3177
3178 while (clist) {
3179 struct sk_buff *skb = clist;
3180 clist = clist->next;
3181
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003182 WARN_ON(atomic_read(&skb->users));
Koki Sanagi07dc22e2010-08-23 18:46:12 +09003183 trace_kfree_skb(skb, net_tx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184 __kfree_skb(skb);
3185 }
3186 }
3187
3188 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003189 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190
3191 local_irq_disable();
3192 head = sd->output_queue;
3193 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003194 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195 local_irq_enable();
3196
3197 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003198 struct Qdisc *q = head;
3199 spinlock_t *root_lock;
3200
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201 head = head->next_sched;
3202
David S. Miller5fb66222008-08-02 20:02:43 -07003203 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07003204 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003205 smp_mb__before_clear_bit();
3206 clear_bit(__QDISC_STATE_SCHED,
3207 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07003208 qdisc_run(q);
3209 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210 } else {
David S. Miller195648b2008-08-19 04:00:36 -07003211 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003212 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07003213 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003214 } else {
3215 smp_mb__before_clear_bit();
3216 clear_bit(__QDISC_STATE_SCHED,
3217 &q->state);
3218 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003219 }
3220 }
3221 }
3222}
3223
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003224#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3225 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
Michał Mirosławda678292009-06-05 05:35:28 +00003226/* This hook is defined here for ATM LANE */
3227int (*br_fdb_test_addr_hook)(struct net_device *dev,
3228 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07003229EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00003230#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232#ifdef CONFIG_NET_CLS_ACT
3233/* TODO: Maybe we should just force sch_ingress to be compiled in
3234 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3235 * a compare and 2 stores extra right now if we dont have it on
3236 * but have CONFIG_NET_CLS_ACT
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003237 * NOTE: This doesn't stop any functionality; if you dont have
3238 * the ingress scheduler, you just can't add policies on ingress.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 *
3240 */
Eric Dumazet24824a02010-10-02 06:11:55 +00003241static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003244 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07003245 int result = TC_ACT_OK;
3246 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003247
Stephen Hemmingerde384832010-08-01 00:33:23 -07003248 if (unlikely(MAX_RED_LOOP < ttl++)) {
Joe Perchese87cc472012-05-13 21:56:26 +00003249 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3250 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07003251 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003252 }
3253
Herbert Xuf697c3e2007-10-14 00:38:47 -07003254 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3255 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3256
David S. Miller83874002008-07-17 00:53:03 -07003257 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07003258 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07003259 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07003260 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3261 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07003262 spin_unlock(qdisc_lock(q));
3263 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07003264
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265 return result;
3266}
Herbert Xuf697c3e2007-10-14 00:38:47 -07003267
3268static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3269 struct packet_type **pt_prev,
3270 int *ret, struct net_device *orig_dev)
3271{
Eric Dumazet24824a02010-10-02 06:11:55 +00003272 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3273
3274 if (!rxq || rxq->qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07003275 goto out;
3276
3277 if (*pt_prev) {
3278 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3279 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003280 }
3281
Eric Dumazet24824a02010-10-02 06:11:55 +00003282 switch (ing_filter(skb, rxq)) {
Herbert Xuf697c3e2007-10-14 00:38:47 -07003283 case TC_ACT_SHOT:
3284 case TC_ACT_STOLEN:
3285 kfree_skb(skb);
3286 return NULL;
3287 }
3288
3289out:
3290 skb->tc_verd = 0;
3291 return skb;
3292}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003293#endif
3294
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003295/**
3296 * netdev_rx_handler_register - register receive handler
3297 * @dev: device to register a handler for
3298 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00003299 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003300 *
3301 * Register a receive hander for a device. This handler will then be
3302 * called from __netif_receive_skb. A negative errno code is returned
3303 * on a failure.
3304 *
3305 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003306 *
3307 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003308 */
3309int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00003310 rx_handler_func_t *rx_handler,
3311 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003312{
3313 ASSERT_RTNL();
3314
3315 if (dev->rx_handler)
3316 return -EBUSY;
3317
Eric Dumazet00cfec32013-03-29 03:01:22 +00003318 /* Note: rx_handler_data must be set before rx_handler */
Jiri Pirko93e2c322010-06-10 03:34:59 +00003319 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003320 rcu_assign_pointer(dev->rx_handler, rx_handler);
3321
3322 return 0;
3323}
3324EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3325
3326/**
3327 * netdev_rx_handler_unregister - unregister receive handler
3328 * @dev: device to unregister a handler from
3329 *
Kusanagi Kouichi166ec362013-03-18 02:59:52 +00003330 * Unregister a receive handler from a device.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003331 *
3332 * The caller must hold the rtnl_mutex.
3333 */
3334void netdev_rx_handler_unregister(struct net_device *dev)
3335{
3336
3337 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003338 RCU_INIT_POINTER(dev->rx_handler, NULL);
Eric Dumazet00cfec32013-03-29 03:01:22 +00003339 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3340 * section has a guarantee to see a non NULL rx_handler_data
3341 * as well.
3342 */
3343 synchronize_net();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003344 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003345}
3346EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3347
Mel Gormanb4b9e352012-07-31 16:44:26 -07003348/*
3349 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3350 * the special handling of PFMEMALLOC skbs.
3351 */
3352static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3353{
3354 switch (skb->protocol) {
3355 case __constant_htons(ETH_P_ARP):
3356 case __constant_htons(ETH_P_IP):
3357 case __constant_htons(ETH_P_IPV6):
3358 case __constant_htons(ETH_P_8021Q):
Patrick McHardy8ad227f2013-04-19 02:04:31 +00003359 case __constant_htons(ETH_P_8021AD):
Mel Gormanb4b9e352012-07-31 16:44:26 -07003360 return true;
3361 default:
3362 return false;
3363 }
3364}
3365
David S. Miller9754e292013-02-14 15:57:38 -05003366static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367{
3368 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003369 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003370 struct net_device *orig_dev;
David S. Miller63d8ea72011-02-28 10:48:59 -08003371 struct net_device *null_or_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003372 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373 int ret = NET_RX_DROP;
Al Viro252e33462006-11-14 20:48:11 -08003374 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003375
Eric Dumazet588f0332011-11-15 04:12:55 +00003376 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07003377
Koki Sanagicf66ba52010-08-23 18:45:02 +09003378 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08003379
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003381 if (netpoll_receive_skb(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003382 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003383
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07003384 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00003385
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07003386 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00003387 if (!skb_transport_header_was_set(skb))
3388 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00003389 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390
3391 pt_prev = NULL;
3392
3393 rcu_read_lock();
3394
David S. Miller63d8ea72011-02-28 10:48:59 -08003395another_round:
David S. Millerb6858172012-07-23 16:27:54 -07003396 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08003397
3398 __this_cpu_inc(softnet_data.processed);
3399
Patrick McHardy8ad227f2013-04-19 02:04:31 +00003400 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3401 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003402 skb = vlan_untag(skb);
3403 if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003404 goto unlock;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003405 }
3406
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407#ifdef CONFIG_NET_CLS_ACT
3408 if (skb->tc_verd & TC_NCLS) {
3409 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3410 goto ncls;
3411 }
3412#endif
3413
David S. Miller9754e292013-02-14 15:57:38 -05003414 if (pfmemalloc)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003415 goto skip_taps;
3416
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417 list_for_each_entry_rcu(ptype, &ptype_all, list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003418 if (!ptype->dev || ptype->dev == skb->dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003419 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003420 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421 pt_prev = ptype;
3422 }
3423 }
3424
Mel Gormanb4b9e352012-07-31 16:44:26 -07003425skip_taps:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003426#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07003427 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3428 if (!skb)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003429 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430ncls:
3431#endif
3432
David S. Miller9754e292013-02-14 15:57:38 -05003433 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003434 goto drop;
3435
John Fastabend24257172011-10-10 09:16:41 +00003436 if (vlan_tx_tag_present(skb)) {
3437 if (pt_prev) {
3438 ret = deliver_skb(skb, pt_prev, orig_dev);
3439 pt_prev = NULL;
3440 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003441 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00003442 goto another_round;
3443 else if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003444 goto unlock;
John Fastabend24257172011-10-10 09:16:41 +00003445 }
3446
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003447 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003448 if (rx_handler) {
3449 if (pt_prev) {
3450 ret = deliver_skb(skb, pt_prev, orig_dev);
3451 pt_prev = NULL;
3452 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003453 switch (rx_handler(&skb)) {
3454 case RX_HANDLER_CONSUMED:
Cristian Bercaru3bc1b1a2013-03-08 07:03:38 +00003455 ret = NET_RX_SUCCESS;
Mel Gormanb4b9e352012-07-31 16:44:26 -07003456 goto unlock;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003457 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08003458 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003459 case RX_HANDLER_EXACT:
3460 deliver_exact = true;
3461 case RX_HANDLER_PASS:
3462 break;
3463 default:
3464 BUG();
3465 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003466 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003468 if (vlan_tx_nonzero_tag_present(skb))
3469 skb->pkt_type = PACKET_OTHERHOST;
3470
David S. Miller63d8ea72011-02-28 10:48:59 -08003471 /* deliver only exact match when indicated */
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003472 null_or_dev = deliver_exact ? skb->dev : NULL;
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00003473
Linus Torvalds1da177e2005-04-16 15:20:36 -07003474 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003475 list_for_each_entry_rcu(ptype,
3476 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003477 if (ptype->type == type &&
Jiri Pirkoe3f48d32011-02-28 20:26:31 +00003478 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3479 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003480 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003481 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482 pt_prev = ptype;
3483 }
3484 }
3485
3486 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003487 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00003488 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003489 else
3490 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07003492drop:
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003493 atomic_long_inc(&skb->dev->rx_dropped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003494 kfree_skb(skb);
3495 /* Jamal, now you will not able to escape explaining
3496 * me how you were going to use this. :-)
3497 */
3498 ret = NET_RX_DROP;
3499 }
3500
Mel Gormanb4b9e352012-07-31 16:44:26 -07003501unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502 rcu_read_unlock();
Mel Gormanb4b9e352012-07-31 16:44:26 -07003503out:
David S. Miller9754e292013-02-14 15:57:38 -05003504 return ret;
3505}
3506
3507static int __netif_receive_skb(struct sk_buff *skb)
3508{
3509 int ret;
3510
3511 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3512 unsigned long pflags = current->flags;
3513
3514 /*
3515 * PFMEMALLOC skbs are special, they should
3516 * - be delivered to SOCK_MEMALLOC sockets only
3517 * - stay away from userspace
3518 * - have bounded memory usage
3519 *
3520 * Use PF_MEMALLOC as this saves us from propagating the allocation
3521 * context down to all allocation sites.
3522 */
3523 current->flags |= PF_MEMALLOC;
3524 ret = __netif_receive_skb_core(skb, true);
3525 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3526 } else
3527 ret = __netif_receive_skb_core(skb, false);
3528
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529 return ret;
3530}
Tom Herbert0a9627f2010-03-16 08:03:29 +00003531
3532/**
3533 * netif_receive_skb - process receive buffer from network
3534 * @skb: buffer to process
3535 *
3536 * netif_receive_skb() is the main receive data processing function.
3537 * It always succeeds. The buffer may be dropped during processing
3538 * for congestion control or by the protocol layers.
3539 *
3540 * This function may only be called from softirq context and interrupts
3541 * should be enabled.
3542 *
3543 * Return values (usually ignored):
3544 * NET_RX_SUCCESS: no congestion
3545 * NET_RX_DROP: packet was dropped
3546 */
3547int netif_receive_skb(struct sk_buff *skb)
3548{
Eric Dumazet588f0332011-11-15 04:12:55 +00003549 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07003550
Richard Cochranc1f19b52010-07-17 08:49:36 +00003551 if (skb_defer_rx_timestamp(skb))
3552 return NET_RX_SUCCESS;
3553
Eric Dumazetdf334542010-03-24 19:13:54 +00003554#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003555 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07003556 struct rps_dev_flow voidflow, *rflow = &voidflow;
3557 int cpu, ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003558
Eric Dumazet3b098e22010-05-15 23:57:10 -07003559 rcu_read_lock();
Tom Herbert0a9627f2010-03-16 08:03:29 +00003560
Eric Dumazet3b098e22010-05-15 23:57:10 -07003561 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07003562
Eric Dumazet3b098e22010-05-15 23:57:10 -07003563 if (cpu >= 0) {
3564 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3565 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00003566 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07003567 }
Eric Dumazetadc93002011-11-17 03:13:26 +00003568 rcu_read_unlock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003569 }
Tom Herbert1e94d722010-03-18 17:45:44 -07003570#endif
Eric Dumazetadc93002011-11-17 03:13:26 +00003571 return __netif_receive_skb(skb);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003572}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003573EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003574
Eric Dumazet88751272010-04-19 05:07:33 +00003575/* Network device is going away, flush any packets still pending
3576 * Called with irqs disabled.
3577 */
Changli Gao152102c2010-03-30 20:16:22 +00003578static void flush_backlog(void *arg)
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003579{
Changli Gao152102c2010-03-30 20:16:22 +00003580 struct net_device *dev = arg;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003581 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003582 struct sk_buff *skb, *tmp;
3583
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003584 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003585 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003586 if (skb->dev == dev) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003587 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003588 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003589 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003590 }
Changli Gao6e7676c2010-04-27 15:07:33 -07003591 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003592 rps_unlock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003593
3594 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3595 if (skb->dev == dev) {
3596 __skb_unlink(skb, &sd->process_queue);
3597 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003598 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003599 }
3600 }
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003601}
3602
Herbert Xud565b0a2008-12-15 23:38:52 -08003603static int napi_gro_complete(struct sk_buff *skb)
3604{
Vlad Yasevich22061d82012-11-15 08:49:11 +00003605 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003606 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003607 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08003608 int err = -ENOENT;
3609
Eric Dumazetc3c7c252012-12-06 13:54:59 +00003610 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3611
Herbert Xufc59f9a2009-04-14 15:11:06 -07003612 if (NAPI_GRO_CB(skb)->count == 1) {
3613 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003614 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07003615 }
Herbert Xud565b0a2008-12-15 23:38:52 -08003616
3617 rcu_read_lock();
3618 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003619 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08003620 continue;
3621
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003622 err = ptype->callbacks.gro_complete(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003623 break;
3624 }
3625 rcu_read_unlock();
3626
3627 if (err) {
3628 WARN_ON(&ptype->list == head);
3629 kfree_skb(skb);
3630 return NET_RX_SUCCESS;
3631 }
3632
3633out:
Herbert Xud565b0a2008-12-15 23:38:52 -08003634 return netif_receive_skb(skb);
3635}
3636
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003637/* napi->gro_list contains packets ordered by age.
3638 * youngest packets at the head of it.
3639 * Complete skbs in reverse order to reduce latencies.
3640 */
3641void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08003642{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003643 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08003644
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003645 /* scan list and build reverse chain */
3646 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3647 skb->prev = prev;
3648 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08003649 }
3650
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003651 for (skb = prev; skb; skb = prev) {
3652 skb->next = NULL;
3653
3654 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3655 return;
3656
3657 prev = skb->prev;
3658 napi_gro_complete(skb);
3659 napi->gro_count--;
3660 }
3661
Herbert Xud565b0a2008-12-15 23:38:52 -08003662 napi->gro_list = NULL;
3663}
Eric Dumazet86cac582010-08-31 18:25:32 +00003664EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08003665
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003666static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3667{
3668 struct sk_buff *p;
3669 unsigned int maclen = skb->dev->hard_header_len;
3670
3671 for (p = napi->gro_list; p; p = p->next) {
3672 unsigned long diffs;
3673
3674 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3675 diffs |= p->vlan_tci ^ skb->vlan_tci;
3676 if (maclen == ETH_HLEN)
3677 diffs |= compare_ether_header(skb_mac_header(p),
3678 skb_gro_mac_header(skb));
3679 else if (!diffs)
3680 diffs = memcmp(skb_mac_header(p),
3681 skb_gro_mac_header(skb),
3682 maclen);
3683 NAPI_GRO_CB(p)->same_flow = !diffs;
3684 NAPI_GRO_CB(p)->flush = 0;
3685 }
3686}
3687
Rami Rosenbb728822012-11-28 21:55:25 +00003688static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08003689{
3690 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003691 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003692 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003693 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003694 int same_flow;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003695 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003696
Jarek Poplawskice9e76c2010-08-05 01:19:11 +00003697 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
Herbert Xud565b0a2008-12-15 23:38:52 -08003698 goto normal;
3699
David S. Miller21dc3302010-08-23 00:13:46 -07003700 if (skb_is_gso(skb) || skb_has_frag_list(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08003701 goto normal;
3702
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003703 gro_list_prepare(napi, skb);
3704
Herbert Xud565b0a2008-12-15 23:38:52 -08003705 rcu_read_lock();
3706 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003707 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08003708 continue;
3709
Herbert Xu86911732009-01-29 14:19:50 +00003710 skb_set_network_header(skb, skb_gro_offset(skb));
Eric Dumazetefd94502013-02-14 17:31:48 +00003711 skb_reset_mac_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003712 NAPI_GRO_CB(skb)->same_flow = 0;
3713 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08003714 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003715
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003716 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003717 break;
3718 }
3719 rcu_read_unlock();
3720
3721 if (&ptype->list == head)
3722 goto normal;
3723
Herbert Xu0da2afd52008-12-26 14:57:42 -08003724 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003725 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003726
Herbert Xud565b0a2008-12-15 23:38:52 -08003727 if (pp) {
3728 struct sk_buff *nskb = *pp;
3729
3730 *pp = nskb->next;
3731 nskb->next = NULL;
3732 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00003733 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08003734 }
3735
Herbert Xu0da2afd52008-12-26 14:57:42 -08003736 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08003737 goto ok;
3738
Herbert Xu4ae55442009-02-08 18:00:36 +00003739 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08003740 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08003741
Herbert Xu4ae55442009-02-08 18:00:36 +00003742 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08003743 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003744 NAPI_GRO_CB(skb)->age = jiffies;
Herbert Xu86911732009-01-29 14:19:50 +00003745 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003746 skb->next = napi->gro_list;
3747 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003748 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08003749
Herbert Xuad0f9902009-02-01 01:24:55 -08003750pull:
Herbert Xucb189782009-05-26 18:50:31 +00003751 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3752 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3753
3754 BUG_ON(skb->end - skb->tail < grow);
3755
3756 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3757
3758 skb->tail += grow;
3759 skb->data_len -= grow;
3760
3761 skb_shinfo(skb)->frags[0].page_offset += grow;
Eric Dumazet9e903e02011-10-18 21:00:24 +00003762 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
Herbert Xucb189782009-05-26 18:50:31 +00003763
Eric Dumazet9e903e02011-10-18 21:00:24 +00003764 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
Ian Campbellea2ab692011-08-22 23:44:58 +00003765 skb_frag_unref(skb, 0);
Herbert Xucb189782009-05-26 18:50:31 +00003766 memmove(skb_shinfo(skb)->frags,
3767 skb_shinfo(skb)->frags + 1,
Jarek Poplawskie5093ae2010-08-11 02:02:10 +00003768 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
Herbert Xucb189782009-05-26 18:50:31 +00003769 }
Herbert Xuad0f9902009-02-01 01:24:55 -08003770 }
3771
Herbert Xud565b0a2008-12-15 23:38:52 -08003772ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003773 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003774
3775normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08003776 ret = GRO_NORMAL;
3777 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08003778}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003779
Herbert Xu96e93ea2009-01-06 10:49:34 -08003780
Rami Rosenbb728822012-11-28 21:55:25 +00003781static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08003782{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003783 switch (ret) {
3784 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003785 if (netif_receive_skb(skb))
3786 ret = GRO_DROP;
3787 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003788
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003789 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08003790 kfree_skb(skb);
3791 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003792
Eric Dumazetdaa86542012-04-19 07:07:40 +00003793 case GRO_MERGED_FREE:
Eric Dumazetd7e88832012-04-30 08:10:34 +00003794 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3795 kmem_cache_free(skbuff_head_cache, skb);
3796 else
3797 __kfree_skb(skb);
Eric Dumazetdaa86542012-04-19 07:07:40 +00003798 break;
3799
Ben Hutchings5b252f02009-10-29 07:17:09 +00003800 case GRO_HELD:
3801 case GRO_MERGED:
3802 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003803 }
3804
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003805 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003806}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003807
Eric Dumazetca07e432012-10-06 22:28:06 +00003808static void skb_gro_reset_offset(struct sk_buff *skb)
Herbert Xu78a478d2009-05-26 18:50:21 +00003809{
Eric Dumazetca07e432012-10-06 22:28:06 +00003810 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3811 const skb_frag_t *frag0 = &pinfo->frags[0];
3812
Herbert Xu78a478d2009-05-26 18:50:21 +00003813 NAPI_GRO_CB(skb)->data_offset = 0;
3814 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00003815 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00003816
Herbert Xu78d3fd02009-05-26 18:50:23 +00003817 if (skb->mac_header == skb->tail &&
Eric Dumazetca07e432012-10-06 22:28:06 +00003818 pinfo->nr_frags &&
3819 !PageHighMem(skb_frag_page(frag0))) {
3820 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3821 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
Herbert Xu74895942009-05-26 18:50:27 +00003822 }
Herbert Xu78a478d2009-05-26 18:50:21 +00003823}
Herbert Xu78a478d2009-05-26 18:50:21 +00003824
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003825gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003826{
Herbert Xu86911732009-01-29 14:19:50 +00003827 skb_gro_reset_offset(skb);
3828
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003829 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003830}
3831EXPORT_SYMBOL(napi_gro_receive);
3832
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00003833static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08003834{
Herbert Xu96e93ea2009-01-06 10:49:34 -08003835 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00003836 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3837 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00003838 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08003839 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08003840 skb->skb_iif = 0;
Herbert Xu96e93ea2009-01-06 10:49:34 -08003841
3842 napi->skb = skb;
3843}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003844
Herbert Xu76620aa2009-04-16 02:02:07 -07003845struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08003846{
Herbert Xu5d38a072009-01-04 16:13:40 -08003847 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003848
3849 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00003850 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3851 if (skb)
3852 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003853 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08003854 return skb;
3855}
Herbert Xu76620aa2009-04-16 02:02:07 -07003856EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08003857
Rami Rosenbb728822012-11-28 21:55:25 +00003858static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003859 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003860{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003861 switch (ret) {
3862 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00003863 case GRO_HELD:
Ajit Khapardee76b69c2010-02-16 20:25:43 +00003864 skb->protocol = eth_type_trans(skb, skb->dev);
Herbert Xu86911732009-01-29 14:19:50 +00003865
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003866 if (ret == GRO_HELD)
3867 skb_gro_pull(skb, -ETH_HLEN);
3868 else if (netif_receive_skb(skb))
3869 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00003870 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003871
3872 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003873 case GRO_MERGED_FREE:
3874 napi_reuse_skb(napi, skb);
3875 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003876
3877 case GRO_MERGED:
3878 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003879 }
3880
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003881 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003882}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003883
Eric Dumazet4adb9c42012-05-18 20:49:06 +00003884static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08003885{
Herbert Xu76620aa2009-04-16 02:02:07 -07003886 struct sk_buff *skb = napi->skb;
3887 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00003888 unsigned int hlen;
3889 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07003890
3891 napi->skb = NULL;
3892
3893 skb_reset_mac_header(skb);
3894 skb_gro_reset_offset(skb);
3895
Herbert Xua5b1cf22009-05-26 18:50:28 +00003896 off = skb_gro_offset(skb);
3897 hlen = off + sizeof(*eth);
3898 eth = skb_gro_header_fast(skb, off);
3899 if (skb_gro_header_hard(skb, hlen)) {
3900 eth = skb_gro_header_slow(skb, hlen, off);
3901 if (unlikely(!eth)) {
3902 napi_reuse_skb(napi, skb);
3903 skb = NULL;
3904 goto out;
3905 }
Herbert Xu76620aa2009-04-16 02:02:07 -07003906 }
3907
3908 skb_gro_pull(skb, sizeof(*eth));
3909
3910 /*
3911 * This works because the only protocols we care about don't require
3912 * special handling. We'll fix it up properly at the end.
3913 */
3914 skb->protocol = eth->h_proto;
3915
3916out:
3917 return skb;
3918}
Herbert Xu76620aa2009-04-16 02:02:07 -07003919
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003920gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07003921{
3922 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08003923
3924 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003925 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08003926
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003927 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08003928}
3929EXPORT_SYMBOL(napi_gro_frags);
3930
Eric Dumazete326bed2010-04-22 00:22:45 -07003931/*
3932 * net_rps_action sends any pending IPI's for rps.
3933 * Note: called with local irq disabled, but exits with local irq enabled.
3934 */
3935static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3936{
3937#ifdef CONFIG_RPS
3938 struct softnet_data *remsd = sd->rps_ipi_list;
3939
3940 if (remsd) {
3941 sd->rps_ipi_list = NULL;
3942
3943 local_irq_enable();
3944
3945 /* Send pending IPI's to kick RPS processing on remote cpus. */
3946 while (remsd) {
3947 struct softnet_data *next = remsd->rps_ipi_next;
3948
3949 if (cpu_online(remsd->cpu))
3950 __smp_call_function_single(remsd->cpu,
3951 &remsd->csd, 0);
3952 remsd = next;
3953 }
3954 } else
3955#endif
3956 local_irq_enable();
3957}
3958
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003959static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003960{
3961 int work = 0;
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003962 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963
Eric Dumazete326bed2010-04-22 00:22:45 -07003964#ifdef CONFIG_RPS
3965 /* Check if we have pending ipi, its better to send them now,
3966 * not waiting net_rx_action() end.
3967 */
3968 if (sd->rps_ipi_list) {
3969 local_irq_disable();
3970 net_rps_action_and_irq_enable(sd);
3971 }
3972#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003973 napi->weight = weight_p;
Changli Gao6e7676c2010-04-27 15:07:33 -07003974 local_irq_disable();
3975 while (work < quota) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003976 struct sk_buff *skb;
Changli Gao6e7676c2010-04-27 15:07:33 -07003977 unsigned int qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978
Changli Gao6e7676c2010-04-27 15:07:33 -07003979 while ((skb = __skb_dequeue(&sd->process_queue))) {
Eric Dumazete4008272010-04-05 15:42:39 -07003980 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07003981 __netif_receive_skb(skb);
Changli Gao6e7676c2010-04-27 15:07:33 -07003982 local_irq_disable();
Tom Herbert76cc8b12010-05-20 18:37:59 +00003983 input_queue_head_incr(sd);
3984 if (++work >= quota) {
3985 local_irq_enable();
3986 return work;
3987 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003988 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003989
Changli Gao6e7676c2010-04-27 15:07:33 -07003990 rps_lock(sd);
3991 qlen = skb_queue_len(&sd->input_pkt_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003992 if (qlen)
Changli Gao6e7676c2010-04-27 15:07:33 -07003993 skb_queue_splice_tail_init(&sd->input_pkt_queue,
3994 &sd->process_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003995
Changli Gao6e7676c2010-04-27 15:07:33 -07003996 if (qlen < quota - work) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003997 /*
3998 * Inline a custom version of __napi_complete().
3999 * only current cpu owns and manipulates this napi,
4000 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
4001 * we can use a plain write instead of clear_bit(),
4002 * and we dont need an smp_mb() memory barrier.
4003 */
4004 list_del(&napi->poll_list);
4005 napi->state = 0;
4006
Changli Gao6e7676c2010-04-27 15:07:33 -07004007 quota = work + qlen;
4008 }
4009 rps_unlock(sd);
4010 }
4011 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004012
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004013 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004014}
4015
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004016/**
4017 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004018 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004019 *
4020 * The entry's receive function will be scheduled to run
4021 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08004022void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004023{
4024 unsigned long flags;
4025
4026 local_irq_save(flags);
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004027 ____napi_schedule(&__get_cpu_var(softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004028 local_irq_restore(flags);
4029}
4030EXPORT_SYMBOL(__napi_schedule);
4031
Herbert Xud565b0a2008-12-15 23:38:52 -08004032void __napi_complete(struct napi_struct *n)
4033{
4034 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4035 BUG_ON(n->gro_list);
4036
4037 list_del(&n->poll_list);
4038 smp_mb__before_clear_bit();
4039 clear_bit(NAPI_STATE_SCHED, &n->state);
4040}
4041EXPORT_SYMBOL(__napi_complete);
4042
4043void napi_complete(struct napi_struct *n)
4044{
4045 unsigned long flags;
4046
4047 /*
4048 * don't let napi dequeue from the cpu poll list
4049 * just in case its running on a different cpu
4050 */
4051 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4052 return;
4053
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004054 napi_gro_flush(n, false);
Herbert Xud565b0a2008-12-15 23:38:52 -08004055 local_irq_save(flags);
4056 __napi_complete(n);
4057 local_irq_restore(flags);
4058}
4059EXPORT_SYMBOL(napi_complete);
4060
4061void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4062 int (*poll)(struct napi_struct *, int), int weight)
4063{
4064 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00004065 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004066 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08004067 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004068 napi->poll = poll;
Eric Dumazet82dc3c62013-03-05 15:57:22 +00004069 if (weight > NAPI_POLL_WEIGHT)
4070 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4071 weight, dev->name);
Herbert Xud565b0a2008-12-15 23:38:52 -08004072 napi->weight = weight;
4073 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004074 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08004075#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08004076 spin_lock_init(&napi->poll_lock);
4077 napi->poll_owner = -1;
4078#endif
4079 set_bit(NAPI_STATE_SCHED, &napi->state);
4080}
4081EXPORT_SYMBOL(netif_napi_add);
4082
4083void netif_napi_del(struct napi_struct *napi)
4084{
4085 struct sk_buff *skb, *next;
4086
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08004087 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07004088 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08004089
4090 for (skb = napi->gro_list; skb; skb = next) {
4091 next = skb->next;
4092 skb->next = NULL;
4093 kfree_skb(skb);
4094 }
4095
4096 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00004097 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004098}
4099EXPORT_SYMBOL(netif_napi_del);
4100
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101static void net_rx_action(struct softirq_action *h)
4102{
Eric Dumazete326bed2010-04-22 00:22:45 -07004103 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004104 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07004105 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07004106 void *have;
4107
Linus Torvalds1da177e2005-04-16 15:20:36 -07004108 local_irq_disable();
4109
Eric Dumazete326bed2010-04-22 00:22:45 -07004110 while (!list_empty(&sd->poll_list)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004111 struct napi_struct *n;
4112 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004113
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004114 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004115 * Allow this to run for 2 jiffies since which will allow
4116 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004117 */
Eric Dumazetd1f41b62013-03-05 07:15:13 +00004118 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004119 goto softnet_break;
4120
4121 local_irq_enable();
4122
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004123 /* Even though interrupts have been re-enabled, this
4124 * access is safe because interrupts can only add new
4125 * entries to the tail of this list, and only ->poll()
4126 * calls can remove this head entry from the list.
4127 */
Eric Dumazete326bed2010-04-22 00:22:45 -07004128 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004129
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004130 have = netpoll_poll_lock(n);
4131
4132 weight = n->weight;
4133
David S. Miller0a7606c2007-10-29 21:28:47 -07004134 /* This NAPI_STATE_SCHED test is for avoiding a race
4135 * with netpoll's poll_napi(). Only the entity which
4136 * obtains the lock and sees NAPI_STATE_SCHED set will
4137 * actually make the ->poll() call. Therefore we avoid
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004138 * accidentally calling ->poll() when NAPI is not scheduled.
David S. Miller0a7606c2007-10-29 21:28:47 -07004139 */
4140 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00004141 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07004142 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00004143 trace_napi_poll(n);
4144 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004145
4146 WARN_ON_ONCE(work > weight);
4147
4148 budget -= work;
4149
4150 local_irq_disable();
4151
4152 /* Drivers must not modify the NAPI state if they
4153 * consume the entire weight. In such cases this code
4154 * still "owns" the NAPI instance and therefore can
4155 * move the instance around on the list at-will.
4156 */
David S. Millerfed17f32008-01-07 21:00:40 -08004157 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07004158 if (unlikely(napi_disable_pending(n))) {
4159 local_irq_enable();
4160 napi_complete(n);
4161 local_irq_disable();
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004162 } else {
4163 if (n->gro_list) {
4164 /* flush too old packets
4165 * If HZ < 1000, flush all packets.
4166 */
4167 local_irq_enable();
4168 napi_gro_flush(n, HZ >= 1000);
4169 local_irq_disable();
4170 }
Eric Dumazete326bed2010-04-22 00:22:45 -07004171 list_move_tail(&n->poll_list, &sd->poll_list);
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004172 }
David S. Millerfed17f32008-01-07 21:00:40 -08004173 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004174
4175 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004176 }
4177out:
Eric Dumazete326bed2010-04-22 00:22:45 -07004178 net_rps_action_and_irq_enable(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004179
Chris Leechdb217332006-06-17 21:24:58 -07004180#ifdef CONFIG_NET_DMA
4181 /*
4182 * There may not be any more sk_buffs coming right now, so push
4183 * any pending DMA copies to hardware
4184 */
Dan Williams2ba05622009-01-06 11:38:14 -07004185 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07004186#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004187
Linus Torvalds1da177e2005-04-16 15:20:36 -07004188 return;
4189
4190softnet_break:
Changli Gaodee42872010-05-02 05:42:16 +00004191 sd->time_squeeze++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4193 goto out;
4194}
4195
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004196struct netdev_upper {
4197 struct net_device *dev;
4198 bool master;
4199 struct list_head list;
4200 struct rcu_head rcu;
4201 struct list_head search_list;
4202};
4203
4204static void __append_search_uppers(struct list_head *search_list,
4205 struct net_device *dev)
4206{
4207 struct netdev_upper *upper;
4208
4209 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4210 /* check if this upper is not already in search list */
4211 if (list_empty(&upper->search_list))
4212 list_add_tail(&upper->search_list, search_list);
4213 }
4214}
4215
4216static bool __netdev_search_upper_dev(struct net_device *dev,
4217 struct net_device *upper_dev)
4218{
4219 LIST_HEAD(search_list);
4220 struct netdev_upper *upper;
4221 struct netdev_upper *tmp;
4222 bool ret = false;
4223
4224 __append_search_uppers(&search_list, dev);
4225 list_for_each_entry(upper, &search_list, search_list) {
4226 if (upper->dev == upper_dev) {
4227 ret = true;
4228 break;
4229 }
4230 __append_search_uppers(&search_list, upper->dev);
4231 }
4232 list_for_each_entry_safe(upper, tmp, &search_list, search_list)
4233 INIT_LIST_HEAD(&upper->search_list);
4234 return ret;
4235}
4236
4237static struct netdev_upper *__netdev_find_upper(struct net_device *dev,
4238 struct net_device *upper_dev)
4239{
4240 struct netdev_upper *upper;
4241
4242 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4243 if (upper->dev == upper_dev)
4244 return upper;
4245 }
4246 return NULL;
4247}
4248
4249/**
4250 * netdev_has_upper_dev - Check if device is linked to an upper device
4251 * @dev: device
4252 * @upper_dev: upper device to check
4253 *
4254 * Find out if a device is linked to specified upper device and return true
4255 * in case it is. Note that this checks only immediate upper device,
4256 * not through a complete stack of devices. The caller must hold the RTNL lock.
4257 */
4258bool netdev_has_upper_dev(struct net_device *dev,
4259 struct net_device *upper_dev)
4260{
4261 ASSERT_RTNL();
4262
4263 return __netdev_find_upper(dev, upper_dev);
4264}
4265EXPORT_SYMBOL(netdev_has_upper_dev);
4266
4267/**
4268 * netdev_has_any_upper_dev - Check if device is linked to some device
4269 * @dev: device
4270 *
4271 * Find out if a device is linked to an upper device and return true in case
4272 * it is. The caller must hold the RTNL lock.
4273 */
4274bool netdev_has_any_upper_dev(struct net_device *dev)
4275{
4276 ASSERT_RTNL();
4277
4278 return !list_empty(&dev->upper_dev_list);
4279}
4280EXPORT_SYMBOL(netdev_has_any_upper_dev);
4281
4282/**
4283 * netdev_master_upper_dev_get - Get master upper device
4284 * @dev: device
4285 *
4286 * Find a master upper device and return pointer to it or NULL in case
4287 * it's not there. The caller must hold the RTNL lock.
4288 */
4289struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4290{
4291 struct netdev_upper *upper;
4292
4293 ASSERT_RTNL();
4294
4295 if (list_empty(&dev->upper_dev_list))
4296 return NULL;
4297
4298 upper = list_first_entry(&dev->upper_dev_list,
4299 struct netdev_upper, list);
4300 if (likely(upper->master))
4301 return upper->dev;
4302 return NULL;
4303}
4304EXPORT_SYMBOL(netdev_master_upper_dev_get);
4305
4306/**
4307 * netdev_master_upper_dev_get_rcu - Get master upper device
4308 * @dev: device
4309 *
4310 * Find a master upper device and return pointer to it or NULL in case
4311 * it's not there. The caller must hold the RCU read lock.
4312 */
4313struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4314{
4315 struct netdev_upper *upper;
4316
4317 upper = list_first_or_null_rcu(&dev->upper_dev_list,
4318 struct netdev_upper, list);
4319 if (upper && likely(upper->master))
4320 return upper->dev;
4321 return NULL;
4322}
4323EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4324
4325static int __netdev_upper_dev_link(struct net_device *dev,
4326 struct net_device *upper_dev, bool master)
4327{
4328 struct netdev_upper *upper;
4329
4330 ASSERT_RTNL();
4331
4332 if (dev == upper_dev)
4333 return -EBUSY;
4334
4335 /* To prevent loops, check if dev is not upper device to upper_dev. */
4336 if (__netdev_search_upper_dev(upper_dev, dev))
4337 return -EBUSY;
4338
4339 if (__netdev_find_upper(dev, upper_dev))
4340 return -EEXIST;
4341
4342 if (master && netdev_master_upper_dev_get(dev))
4343 return -EBUSY;
4344
4345 upper = kmalloc(sizeof(*upper), GFP_KERNEL);
4346 if (!upper)
4347 return -ENOMEM;
4348
4349 upper->dev = upper_dev;
4350 upper->master = master;
4351 INIT_LIST_HEAD(&upper->search_list);
4352
4353 /* Ensure that master upper link is always the first item in list. */
4354 if (master)
4355 list_add_rcu(&upper->list, &dev->upper_dev_list);
4356 else
4357 list_add_tail_rcu(&upper->list, &dev->upper_dev_list);
4358 dev_hold(upper_dev);
4359
4360 return 0;
4361}
4362
4363/**
4364 * netdev_upper_dev_link - Add a link to the upper device
4365 * @dev: device
4366 * @upper_dev: new upper device
4367 *
4368 * Adds a link to device which is upper to this one. The caller must hold
4369 * the RTNL lock. On a failure a negative errno code is returned.
4370 * On success the reference counts are adjusted and the function
4371 * returns zero.
4372 */
4373int netdev_upper_dev_link(struct net_device *dev,
4374 struct net_device *upper_dev)
4375{
4376 return __netdev_upper_dev_link(dev, upper_dev, false);
4377}
4378EXPORT_SYMBOL(netdev_upper_dev_link);
4379
4380/**
4381 * netdev_master_upper_dev_link - Add a master link to the upper device
4382 * @dev: device
4383 * @upper_dev: new upper device
4384 *
4385 * Adds a link to device which is upper to this one. In this case, only
4386 * one master upper device can be linked, although other non-master devices
4387 * might be linked as well. The caller must hold the RTNL lock.
4388 * On a failure a negative errno code is returned. On success the reference
4389 * counts are adjusted and the function returns zero.
4390 */
4391int netdev_master_upper_dev_link(struct net_device *dev,
4392 struct net_device *upper_dev)
4393{
4394 return __netdev_upper_dev_link(dev, upper_dev, true);
4395}
4396EXPORT_SYMBOL(netdev_master_upper_dev_link);
4397
4398/**
4399 * netdev_upper_dev_unlink - Removes a link to upper device
4400 * @dev: device
4401 * @upper_dev: new upper device
4402 *
4403 * Removes a link to device which is upper to this one. The caller must hold
4404 * the RTNL lock.
4405 */
4406void netdev_upper_dev_unlink(struct net_device *dev,
4407 struct net_device *upper_dev)
4408{
4409 struct netdev_upper *upper;
4410
4411 ASSERT_RTNL();
4412
4413 upper = __netdev_find_upper(dev, upper_dev);
4414 if (!upper)
4415 return;
4416 list_del_rcu(&upper->list);
4417 dev_put(upper_dev);
4418 kfree_rcu(upper, rcu);
4419}
4420EXPORT_SYMBOL(netdev_upper_dev_unlink);
4421
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004422static void dev_change_rx_flags(struct net_device *dev, int flags)
4423{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004424 const struct net_device_ops *ops = dev->netdev_ops;
4425
4426 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4427 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004428}
4429
Wang Chendad9b332008-06-18 01:48:28 -07004430static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07004431{
Eric Dumazetb536db92011-11-30 21:42:26 +00004432 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06004433 kuid_t uid;
4434 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07004435
Patrick McHardy24023452007-07-14 18:51:31 -07004436 ASSERT_RTNL();
4437
Wang Chendad9b332008-06-18 01:48:28 -07004438 dev->flags |= IFF_PROMISC;
4439 dev->promiscuity += inc;
4440 if (dev->promiscuity == 0) {
4441 /*
4442 * Avoid overflow.
4443 * If inc causes overflow, untouch promisc and return error.
4444 */
4445 if (inc < 0)
4446 dev->flags &= ~IFF_PROMISC;
4447 else {
4448 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00004449 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4450 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07004451 return -EOVERFLOW;
4452 }
4453 }
Patrick McHardy4417da62007-06-27 01:28:10 -07004454 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00004455 pr_info("device %s %s promiscuous mode\n",
4456 dev->name,
4457 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11004458 if (audit_enabled) {
4459 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05004460 audit_log(current->audit_context, GFP_ATOMIC,
4461 AUDIT_ANOM_PROMISCUOUS,
4462 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4463 dev->name, (dev->flags & IFF_PROMISC),
4464 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07004465 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06004466 from_kuid(&init_user_ns, uid),
4467 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05004468 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11004469 }
Patrick McHardy24023452007-07-14 18:51:31 -07004470
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004471 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07004472 }
Wang Chendad9b332008-06-18 01:48:28 -07004473 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07004474}
4475
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476/**
4477 * dev_set_promiscuity - update promiscuity count on a device
4478 * @dev: device
4479 * @inc: modifier
4480 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07004481 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07004482 * remains above zero the interface remains promiscuous. Once it hits zero
4483 * the device reverts back to normal filtering operation. A negative inc
4484 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07004485 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004486 */
Wang Chendad9b332008-06-18 01:48:28 -07004487int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004488{
Eric Dumazetb536db92011-11-30 21:42:26 +00004489 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07004490 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491
Wang Chendad9b332008-06-18 01:48:28 -07004492 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07004493 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07004494 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07004495 if (dev->flags != old_flags)
4496 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07004497 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004499EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004500
4501/**
4502 * dev_set_allmulti - update allmulti count on a device
4503 * @dev: device
4504 * @inc: modifier
4505 *
4506 * Add or remove reception of all multicast frames to a device. While the
4507 * count in the device remains above zero the interface remains listening
4508 * to all interfaces. Once it hits zero the device reverts back to normal
4509 * filtering operation. A negative @inc value is used to drop the counter
4510 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07004511 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004512 */
4513
Wang Chendad9b332008-06-18 01:48:28 -07004514int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004515{
Eric Dumazetb536db92011-11-30 21:42:26 +00004516 unsigned int old_flags = dev->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004517
Patrick McHardy24023452007-07-14 18:51:31 -07004518 ASSERT_RTNL();
4519
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07004521 dev->allmulti += inc;
4522 if (dev->allmulti == 0) {
4523 /*
4524 * Avoid overflow.
4525 * If inc causes overflow, untouch allmulti and return error.
4526 */
4527 if (inc < 0)
4528 dev->flags &= ~IFF_ALLMULTI;
4529 else {
4530 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00004531 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4532 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07004533 return -EOVERFLOW;
4534 }
4535 }
Patrick McHardy24023452007-07-14 18:51:31 -07004536 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004537 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07004538 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07004539 }
Wang Chendad9b332008-06-18 01:48:28 -07004540 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07004541}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004542EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07004543
4544/*
4545 * Upload unicast and multicast address lists to device and
4546 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08004547 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07004548 * are present.
4549 */
4550void __dev_set_rx_mode(struct net_device *dev)
4551{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004552 const struct net_device_ops *ops = dev->netdev_ops;
4553
Patrick McHardy4417da62007-06-27 01:28:10 -07004554 /* dev_open will call this function so the list will stay sane. */
4555 if (!(dev->flags&IFF_UP))
4556 return;
4557
4558 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09004559 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07004560
Jiri Pirko01789342011-08-16 06:29:00 +00004561 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07004562 /* Unicast addresses changes may only happen under the rtnl,
4563 * therefore calling __dev_set_promiscuity here is safe.
4564 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08004565 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07004566 __dev_set_promiscuity(dev, 1);
Joe Perches2d348d12011-07-25 16:17:35 -07004567 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08004568 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07004569 __dev_set_promiscuity(dev, -1);
Joe Perches2d348d12011-07-25 16:17:35 -07004570 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07004571 }
Patrick McHardy4417da62007-06-27 01:28:10 -07004572 }
Jiri Pirko01789342011-08-16 06:29:00 +00004573
4574 if (ops->ndo_set_rx_mode)
4575 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004576}
4577
4578void dev_set_rx_mode(struct net_device *dev)
4579{
David S. Millerb9e40852008-07-15 00:15:08 -07004580 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004581 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07004582 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004583}
4584
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004585/**
4586 * dev_get_flags - get flags reported to userspace
4587 * @dev: device
4588 *
4589 * Get the combination of flag bits exported through APIs to userspace.
4590 */
Eric Dumazet95c96172012-04-15 05:58:06 +00004591unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004592{
Eric Dumazet95c96172012-04-15 05:58:06 +00004593 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004594
4595 flags = (dev->flags & ~(IFF_PROMISC |
4596 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004597 IFF_RUNNING |
4598 IFF_LOWER_UP |
4599 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004600 (dev->gflags & (IFF_PROMISC |
4601 IFF_ALLMULTI));
4602
Stefan Rompfb00055a2006-03-20 17:09:11 -08004603 if (netif_running(dev)) {
4604 if (netif_oper_up(dev))
4605 flags |= IFF_RUNNING;
4606 if (netif_carrier_ok(dev))
4607 flags |= IFF_LOWER_UP;
4608 if (netif_dormant(dev))
4609 flags |= IFF_DORMANT;
4610 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004611
4612 return flags;
4613}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004614EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615
Patrick McHardybd380812010-02-26 06:34:53 +00004616int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004617{
Eric Dumazetb536db92011-11-30 21:42:26 +00004618 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00004619 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004620
Patrick McHardy24023452007-07-14 18:51:31 -07004621 ASSERT_RTNL();
4622
Linus Torvalds1da177e2005-04-16 15:20:36 -07004623 /*
4624 * Set the flags on our device.
4625 */
4626
4627 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4628 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4629 IFF_AUTOMEDIA)) |
4630 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4631 IFF_ALLMULTI));
4632
4633 /*
4634 * Load in the correct multicast list now the flags have changed.
4635 */
4636
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004637 if ((old_flags ^ flags) & IFF_MULTICAST)
4638 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004639
Patrick McHardy4417da62007-06-27 01:28:10 -07004640 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004641
4642 /*
4643 * Have we downed the interface. We handle IFF_UP ourselves
4644 * according to user attempts to set it, rather than blindly
4645 * setting it.
4646 */
4647
4648 ret = 0;
4649 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
Patrick McHardybd380812010-02-26 06:34:53 +00004650 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004651
4652 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004653 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004654 }
4655
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004657 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4658
Linus Torvalds1da177e2005-04-16 15:20:36 -07004659 dev->gflags ^= IFF_PROMISC;
4660 dev_set_promiscuity(dev, inc);
4661 }
4662
4663 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4664 is important. Some (broken) drivers set IFF_PROMISC, when
4665 IFF_ALLMULTI is requested not asking us and not reporting.
4666 */
4667 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004668 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4669
Linus Torvalds1da177e2005-04-16 15:20:36 -07004670 dev->gflags ^= IFF_ALLMULTI;
4671 dev_set_allmulti(dev, inc);
4672 }
4673
Patrick McHardybd380812010-02-26 06:34:53 +00004674 return ret;
4675}
4676
4677void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4678{
4679 unsigned int changes = dev->flags ^ old_flags;
4680
4681 if (changes & IFF_UP) {
4682 if (dev->flags & IFF_UP)
4683 call_netdevice_notifiers(NETDEV_UP, dev);
4684 else
4685 call_netdevice_notifiers(NETDEV_DOWN, dev);
4686 }
4687
4688 if (dev->flags & IFF_UP &&
4689 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4690 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4691}
4692
4693/**
4694 * dev_change_flags - change device settings
4695 * @dev: device
4696 * @flags: device state flags
4697 *
4698 * Change settings on device based state flags. The flags are
4699 * in the userspace exported format.
4700 */
Eric Dumazetb536db92011-11-30 21:42:26 +00004701int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00004702{
Eric Dumazetb536db92011-11-30 21:42:26 +00004703 int ret;
4704 unsigned int changes, old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00004705
4706 ret = __dev_change_flags(dev, flags);
4707 if (ret < 0)
4708 return ret;
4709
4710 changes = old_flags ^ dev->flags;
Thomas Graf7c355f52007-06-05 16:03:03 -07004711 if (changes)
4712 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004713
Patrick McHardybd380812010-02-26 06:34:53 +00004714 __dev_notify_flags(dev, old_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004715 return ret;
4716}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004717EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004718
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004719/**
4720 * dev_set_mtu - Change maximum transfer unit
4721 * @dev: device
4722 * @new_mtu: new transfer unit
4723 *
4724 * Change the maximum transfer size of the network device.
4725 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004726int dev_set_mtu(struct net_device *dev, int new_mtu)
4727{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004728 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004729 int err;
4730
4731 if (new_mtu == dev->mtu)
4732 return 0;
4733
4734 /* MTU must be positive. */
4735 if (new_mtu < 0)
4736 return -EINVAL;
4737
4738 if (!netif_device_present(dev))
4739 return -ENODEV;
4740
4741 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004742 if (ops->ndo_change_mtu)
4743 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004744 else
4745 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004746
Jiri Pirkoe3d8fab2012-12-03 01:16:32 +00004747 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004748 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004749 return err;
4750}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004751EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004752
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004753/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00004754 * dev_set_group - Change group this device belongs to
4755 * @dev: device
4756 * @new_group: group this device should belong to
4757 */
4758void dev_set_group(struct net_device *dev, int new_group)
4759{
4760 dev->group = new_group;
4761}
4762EXPORT_SYMBOL(dev_set_group);
4763
4764/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004765 * dev_set_mac_address - Change Media Access Control Address
4766 * @dev: device
4767 * @sa: new address
4768 *
4769 * Change the hardware (MAC) address of the device
4770 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004771int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4772{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004773 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004774 int err;
4775
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004776 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004777 return -EOPNOTSUPP;
4778 if (sa->sa_family != dev->type)
4779 return -EINVAL;
4780 if (!netif_device_present(dev))
4781 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004782 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00004783 if (err)
4784 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00004785 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00004786 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04004787 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00004788 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004789}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004790EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004791
Jiri Pirko4bf84c32012-12-27 23:49:37 +00004792/**
4793 * dev_change_carrier - Change device carrier
4794 * @dev: device
Randy Dunlap691b3b72013-03-04 12:32:43 +00004795 * @new_carrier: new value
Jiri Pirko4bf84c32012-12-27 23:49:37 +00004796 *
4797 * Change device carrier
4798 */
4799int dev_change_carrier(struct net_device *dev, bool new_carrier)
4800{
4801 const struct net_device_ops *ops = dev->netdev_ops;
4802
4803 if (!ops->ndo_change_carrier)
4804 return -EOPNOTSUPP;
4805 if (!netif_device_present(dev))
4806 return -ENODEV;
4807 return ops->ndo_change_carrier(dev, new_carrier);
4808}
4809EXPORT_SYMBOL(dev_change_carrier);
4810
Linus Torvalds1da177e2005-04-16 15:20:36 -07004811/**
4812 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004813 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004814 *
4815 * Returns a suitable unique value for a new device interface
4816 * number. The caller must hold the rtnl semaphore or the
4817 * dev_base_lock to be sure it remains unique.
4818 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004819static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004820{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00004821 int ifindex = net->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004822 for (;;) {
4823 if (++ifindex <= 0)
4824 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004825 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00004826 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004827 }
4828}
4829
Linus Torvalds1da177e2005-04-16 15:20:36 -07004830/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004831static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004832
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004833static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004834{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004835 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004836}
4837
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004838static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004839{
Krishna Kumare93737b2009-12-08 22:26:02 +00004840 struct net_device *dev, *tmp;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004841
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004842 BUG_ON(dev_boot_phase);
4843 ASSERT_RTNL();
4844
Krishna Kumare93737b2009-12-08 22:26:02 +00004845 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004846 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00004847 * for initialization unwind. Remove those
4848 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004849 */
4850 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00004851 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
4852 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004853
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004854 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00004855 list_del(&dev->unreg_list);
4856 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004857 }
Eric Dumazet449f4542011-05-19 12:24:16 +00004858 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004859 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00004860 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004861
Octavian Purdila44345722010-12-13 12:44:07 +00004862 /* If device is running, close it first. */
4863 dev_close_many(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004864
Octavian Purdila44345722010-12-13 12:44:07 +00004865 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004866 /* And unlink it from device chain. */
4867 unlist_netdevice(dev);
4868
4869 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004870 }
4871
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004872 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004873
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004874 list_for_each_entry(dev, head, unreg_list) {
4875 /* Shutdown queueing discipline. */
4876 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004877
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004878
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004879 /* Notify protocols, that we are about to destroy
4880 this device. They should clean all the things.
4881 */
4882 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4883
Patrick McHardya2835762010-02-26 06:34:51 +00004884 if (!dev->rtnl_link_ops ||
4885 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4886 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4887
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004888 /*
4889 * Flush the unicast and multicast chains
4890 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00004891 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004892 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004893
4894 if (dev->netdev_ops->ndo_uninit)
4895 dev->netdev_ops->ndo_uninit(dev);
4896
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004897 /* Notifier chain MUST detach us all upper devices. */
4898 WARN_ON(netdev_has_any_upper_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004899
4900 /* Remove entries from kobject tree */
4901 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00004902#ifdef CONFIG_XPS
4903 /* Remove XPS queueing entries */
4904 netif_reset_xps_queues_gt(dev, 0);
4905#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004906 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004907
Eric W. Biederman850a5452011-10-13 22:25:23 +00004908 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004909
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00004910 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004911 dev_put(dev);
4912}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004913
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004914static void rollback_registered(struct net_device *dev)
4915{
4916 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004917
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004918 list_add(&dev->unreg_list, &single);
4919 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00004920 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004921}
4922
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004923static netdev_features_t netdev_fix_features(struct net_device *dev,
4924 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07004925{
Michał Mirosław57422dc2011-01-22 12:14:12 +00004926 /* Fix illegal checksum combinations */
4927 if ((features & NETIF_F_HW_CSUM) &&
4928 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04004929 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00004930 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4931 }
4932
Herbert Xub63365a2008-10-23 01:11:29 -07004933 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00004934 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04004935 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00004936 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07004937 }
4938
Pravin B Shelarec5f0612013-03-07 09:28:01 +00004939 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
4940 !(features & NETIF_F_IP_CSUM)) {
4941 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
4942 features &= ~NETIF_F_TSO;
4943 features &= ~NETIF_F_TSO_ECN;
4944 }
4945
4946 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
4947 !(features & NETIF_F_IPV6_CSUM)) {
4948 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
4949 features &= ~NETIF_F_TSO6;
4950 }
4951
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00004952 /* TSO ECN requires that TSO is present as well. */
4953 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
4954 features &= ~NETIF_F_TSO_ECN;
4955
Michał Mirosław212b5732011-02-15 16:59:16 +00004956 /* Software GSO depends on SG. */
4957 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04004958 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00004959 features &= ~NETIF_F_GSO;
4960 }
4961
Michał Mirosławacd11302011-01-24 15:45:15 -08004962 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07004963 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00004964 /* maybe split UFO into V4 and V6? */
4965 if (!((features & NETIF_F_GEN_CSUM) ||
4966 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
4967 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04004968 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08004969 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07004970 features &= ~NETIF_F_UFO;
4971 }
4972
4973 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04004974 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08004975 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07004976 features &= ~NETIF_F_UFO;
4977 }
4978 }
4979
4980 return features;
4981}
Herbert Xub63365a2008-10-23 01:11:29 -07004982
Michał Mirosław6cb6a272011-04-02 22:48:47 -07004983int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00004984{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004985 netdev_features_t features;
Michał Mirosław5455c692011-02-15 16:59:17 +00004986 int err = 0;
4987
Michał Mirosław87267482011-04-12 09:56:38 +00004988 ASSERT_RTNL();
4989
Michał Mirosław5455c692011-02-15 16:59:17 +00004990 features = netdev_get_wanted_features(dev);
4991
4992 if (dev->netdev_ops->ndo_fix_features)
4993 features = dev->netdev_ops->ndo_fix_features(dev, features);
4994
4995 /* driver might be less strict about feature dependencies */
4996 features = netdev_fix_features(dev, features);
4997
4998 if (dev->features == features)
Michał Mirosław6cb6a272011-04-02 22:48:47 -07004999 return 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00005000
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005001 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5002 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00005003
5004 if (dev->netdev_ops->ndo_set_features)
5005 err = dev->netdev_ops->ndo_set_features(dev, features);
5006
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005007 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00005008 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005009 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5010 err, &features, &dev->features);
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005011 return -1;
5012 }
5013
5014 if (!err)
5015 dev->features = features;
5016
5017 return 1;
5018}
5019
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005020/**
5021 * netdev_update_features - recalculate device features
5022 * @dev: the device to check
5023 *
5024 * Recalculate dev->features set and send notifications if it
5025 * has changed. Should be called after driver or hardware dependent
5026 * conditions might have changed that influence the features.
5027 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005028void netdev_update_features(struct net_device *dev)
5029{
5030 if (__netdev_update_features(dev))
5031 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00005032}
5033EXPORT_SYMBOL(netdev_update_features);
5034
Linus Torvalds1da177e2005-04-16 15:20:36 -07005035/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005036 * netdev_change_features - recalculate device features
5037 * @dev: the device to check
5038 *
5039 * Recalculate dev->features set and send notifications even
5040 * if they have not changed. Should be called instead of
5041 * netdev_update_features() if also dev->vlan_features might
5042 * have changed to allow the changes to be propagated to stacked
5043 * VLAN devices.
5044 */
5045void netdev_change_features(struct net_device *dev)
5046{
5047 __netdev_update_features(dev);
5048 netdev_features_change(dev);
5049}
5050EXPORT_SYMBOL(netdev_change_features);
5051
5052/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005053 * netif_stacked_transfer_operstate - transfer operstate
5054 * @rootdev: the root or lower level device to transfer state from
5055 * @dev: the device to transfer operstate to
5056 *
5057 * Transfer operational state from root to device. This is normally
5058 * called when a stacking relationship exists between the root
5059 * device and the device(a leaf device).
5060 */
5061void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5062 struct net_device *dev)
5063{
5064 if (rootdev->operstate == IF_OPER_DORMANT)
5065 netif_dormant_on(dev);
5066 else
5067 netif_dormant_off(dev);
5068
5069 if (netif_carrier_ok(rootdev)) {
5070 if (!netif_carrier_ok(dev))
5071 netif_carrier_on(dev);
5072 } else {
5073 if (netif_carrier_ok(dev))
5074 netif_carrier_off(dev);
5075 }
5076}
5077EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5078
Tom Herbertbf264142010-11-26 08:36:09 +00005079#ifdef CONFIG_RPS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005080static int netif_alloc_rx_queues(struct net_device *dev)
5081{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005082 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00005083 struct netdev_rx_queue *rx;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005084
Tom Herbertbd25fa72010-10-18 18:00:16 +00005085 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005086
Tom Herbertbd25fa72010-10-18 18:00:16 +00005087 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00005088 if (!rx)
Tom Herbertbd25fa72010-10-18 18:00:16 +00005089 return -ENOMEM;
Joe Perches62b59422013-02-04 16:48:16 +00005090
Tom Herbertbd25fa72010-10-18 18:00:16 +00005091 dev->_rx = rx;
5092
Tom Herbertbd25fa72010-10-18 18:00:16 +00005093 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00005094 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005095 return 0;
5096}
Tom Herbertbf264142010-11-26 08:36:09 +00005097#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005098
Changli Gaoaa942102010-12-04 02:31:41 +00005099static void netdev_init_one_queue(struct net_device *dev,
5100 struct netdev_queue *queue, void *_unused)
5101{
5102 /* Initialize queue lock */
5103 spin_lock_init(&queue->_xmit_lock);
5104 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5105 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00005106 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00005107 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00005108#ifdef CONFIG_BQL
5109 dql_init(&queue->dql, HZ);
5110#endif
Changli Gaoaa942102010-12-04 02:31:41 +00005111}
5112
Tom Herberte6484932010-10-18 18:04:39 +00005113static int netif_alloc_netdev_queues(struct net_device *dev)
5114{
5115 unsigned int count = dev->num_tx_queues;
5116 struct netdev_queue *tx;
5117
5118 BUG_ON(count < 1);
5119
5120 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00005121 if (!tx)
Tom Herberte6484932010-10-18 18:04:39 +00005122 return -ENOMEM;
Joe Perches62b59422013-02-04 16:48:16 +00005123
Tom Herberte6484932010-10-18 18:04:39 +00005124 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00005125
Tom Herberte6484932010-10-18 18:04:39 +00005126 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5127 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00005128
5129 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00005130}
5131
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005132/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005133 * register_netdevice - register a network device
5134 * @dev: device to register
5135 *
5136 * Take a completed network device structure and add it to the kernel
5137 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5138 * chain. 0 is returned on success. A negative errno code is returned
5139 * on a failure to set up the device, or if the name is a duplicate.
5140 *
5141 * Callers must hold the rtnl semaphore. You may want
5142 * register_netdev() instead of this.
5143 *
5144 * BUGS:
5145 * The locking appears insufficient to guarantee two parallel registers
5146 * will not get the same name.
5147 */
5148
5149int register_netdevice(struct net_device *dev)
5150{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005151 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005152 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005153
5154 BUG_ON(dev_boot_phase);
5155 ASSERT_RTNL();
5156
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005157 might_sleep();
5158
Linus Torvalds1da177e2005-04-16 15:20:36 -07005159 /* When net_device's are persistent, this will be fatal. */
5160 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005161 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005162
David S. Millerf1f28aa2008-07-15 00:08:33 -07005163 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07005164 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005165
Linus Torvalds1da177e2005-04-16 15:20:36 -07005166 dev->iflink = -1;
5167
Gao feng828de4f2012-09-13 20:58:27 +00005168 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00005169 if (ret < 0)
5170 goto out;
5171
Linus Torvalds1da177e2005-04-16 15:20:36 -07005172 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005173 if (dev->netdev_ops->ndo_init) {
5174 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005175 if (ret) {
5176 if (ret > 0)
5177 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08005178 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005179 }
5180 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005181
Patrick McHardyf6469682013-04-19 02:04:27 +00005182 if (((dev->hw_features | dev->features) &
5183 NETIF_F_HW_VLAN_CTAG_FILTER) &&
Michał Mirosławd2ed2732013-01-29 15:14:16 +00005184 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
5185 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
5186 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
5187 ret = -EINVAL;
5188 goto err_uninit;
5189 }
5190
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00005191 ret = -EBUSY;
5192 if (!dev->ifindex)
5193 dev->ifindex = dev_new_index(net);
5194 else if (__dev_get_by_index(net, dev->ifindex))
5195 goto err_uninit;
5196
Linus Torvalds1da177e2005-04-16 15:20:36 -07005197 if (dev->iflink == -1)
5198 dev->iflink = dev->ifindex;
5199
Michał Mirosław5455c692011-02-15 16:59:17 +00005200 /* Transfer changeable features to wanted_features and enable
5201 * software offloads (GSO and GRO).
5202 */
5203 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00005204 dev->features |= NETIF_F_SOFT_FEATURES;
5205 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005206
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07005207 /* Turn on no cache copy if HW is doing checksum */
Michał Mirosław34324dc2011-11-15 15:29:55 +00005208 if (!(dev->flags & IFF_LOOPBACK)) {
5209 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5210 if (dev->features & NETIF_F_ALL_CSUM) {
5211 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5212 dev->features |= NETIF_F_NOCACHE_COPY;
5213 }
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07005214 }
5215
Michał Mirosław1180e7d2011-07-14 14:41:11 -07005216 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00005217 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07005218 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00005219
Pravin B Shelaree579672013-03-07 09:28:08 +00005220 /* Make NETIF_F_SG inheritable to tunnel devices.
5221 */
5222 dev->hw_enc_features |= NETIF_F_SG;
5223
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00005224 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5225 ret = notifier_to_errno(ret);
5226 if (ret)
5227 goto err_uninit;
5228
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005229 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005230 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005231 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005232 dev->reg_state = NETREG_REGISTERED;
5233
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005234 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00005235
Linus Torvalds1da177e2005-04-16 15:20:36 -07005236 /*
5237 * Default initial state at registry is that the
5238 * device is present.
5239 */
5240
5241 set_bit(__LINK_STATE_PRESENT, &dev->state);
5242
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01005243 linkwatch_init_dev(dev);
5244
Linus Torvalds1da177e2005-04-16 15:20:36 -07005245 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005246 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005247 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04005248 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005249
Jiri Pirko948b3372013-01-08 01:38:25 +00005250 /* If the device has permanent device address, driver should
5251 * set dev_addr and also addr_assign_type should be set to
5252 * NET_ADDR_PERM (default value).
5253 */
5254 if (dev->addr_assign_type == NET_ADDR_PERM)
5255 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5256
Linus Torvalds1da177e2005-04-16 15:20:36 -07005257 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005258 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07005259 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005260 if (ret) {
5261 rollback_registered(dev);
5262 dev->reg_state = NETREG_UNREGISTERED;
5263 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005264 /*
5265 * Prevent userspace races by waiting until the network
5266 * device is fully setup before sending notifications.
5267 */
Patrick McHardya2835762010-02-26 06:34:51 +00005268 if (!dev->rtnl_link_ops ||
5269 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5270 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005271
5272out:
5273 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005274
5275err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005276 if (dev->netdev_ops->ndo_uninit)
5277 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005278 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005279}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005280EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005281
5282/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005283 * init_dummy_netdev - init a dummy network device for NAPI
5284 * @dev: device to init
5285 *
5286 * This takes a network device structure and initialize the minimum
5287 * amount of fields so it can be used to schedule NAPI polls without
5288 * registering a full blown interface. This is to be used by drivers
5289 * that need to tie several hardware interfaces to a single NAPI
5290 * poll scheduler due to HW limitations.
5291 */
5292int init_dummy_netdev(struct net_device *dev)
5293{
5294 /* Clear everything. Note we don't initialize spinlocks
5295 * are they aren't supposed to be taken by any of the
5296 * NAPI code and this dummy netdev is supposed to be
5297 * only ever used for NAPI polls
5298 */
5299 memset(dev, 0, sizeof(struct net_device));
5300
5301 /* make sure we BUG if trying to hit standard
5302 * register/unregister code path
5303 */
5304 dev->reg_state = NETREG_DUMMY;
5305
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005306 /* NAPI wants this */
5307 INIT_LIST_HEAD(&dev->napi_list);
5308
5309 /* a dummy interface is started by default */
5310 set_bit(__LINK_STATE_PRESENT, &dev->state);
5311 set_bit(__LINK_STATE_START, &dev->state);
5312
Eric Dumazet29b44332010-10-11 10:22:12 +00005313 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5314 * because users of this 'device' dont need to change
5315 * its refcount.
5316 */
5317
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005318 return 0;
5319}
5320EXPORT_SYMBOL_GPL(init_dummy_netdev);
5321
5322
5323/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005324 * register_netdev - register a network device
5325 * @dev: device to register
5326 *
5327 * Take a completed network device structure and add it to the kernel
5328 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5329 * chain. 0 is returned on success. A negative errno code is returned
5330 * on a failure to set up the device, or if the name is a duplicate.
5331 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07005332 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07005333 * and expands the device name if you passed a format string to
5334 * alloc_netdev.
5335 */
5336int register_netdev(struct net_device *dev)
5337{
5338 int err;
5339
5340 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005341 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005342 rtnl_unlock();
5343 return err;
5344}
5345EXPORT_SYMBOL(register_netdev);
5346
Eric Dumazet29b44332010-10-11 10:22:12 +00005347int netdev_refcnt_read(const struct net_device *dev)
5348{
5349 int i, refcnt = 0;
5350
5351 for_each_possible_cpu(i)
5352 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5353 return refcnt;
5354}
5355EXPORT_SYMBOL(netdev_refcnt_read);
5356
Ben Hutchings2c530402012-07-10 10:55:09 +00005357/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005358 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00005359 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005360 *
5361 * This is called when unregistering network devices.
5362 *
5363 * Any protocol or device that holds a reference should register
5364 * for netdevice notification, and cleanup and put back the
5365 * reference if they receive an UNREGISTER event.
5366 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005367 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005368 */
5369static void netdev_wait_allrefs(struct net_device *dev)
5370{
5371 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00005372 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005373
Eric Dumazete014deb2009-11-17 05:59:21 +00005374 linkwatch_forget_dev(dev);
5375
Linus Torvalds1da177e2005-04-16 15:20:36 -07005376 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00005377 refcnt = netdev_refcnt_read(dev);
5378
5379 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005380 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005381 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005382
5383 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005384 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005385
Eric Dumazet748e2d92012-08-22 21:50:59 +00005386 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005387 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00005388 rtnl_lock();
5389
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005390 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005391 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5392 &dev->state)) {
5393 /* We must not have linkwatch events
5394 * pending on unregister. If this
5395 * happens, we simply run the queue
5396 * unscheduled, resulting in a noop
5397 * for this device.
5398 */
5399 linkwatch_run_queue();
5400 }
5401
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005402 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005403
5404 rebroadcast_time = jiffies;
5405 }
5406
5407 msleep(250);
5408
Eric Dumazet29b44332010-10-11 10:22:12 +00005409 refcnt = netdev_refcnt_read(dev);
5410
Linus Torvalds1da177e2005-04-16 15:20:36 -07005411 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005412 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5413 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005414 warning_time = jiffies;
5415 }
5416 }
5417}
5418
5419/* The sequence is:
5420 *
5421 * rtnl_lock();
5422 * ...
5423 * register_netdevice(x1);
5424 * register_netdevice(x2);
5425 * ...
5426 * unregister_netdevice(y1);
5427 * unregister_netdevice(y2);
5428 * ...
5429 * rtnl_unlock();
5430 * free_netdev(y1);
5431 * free_netdev(y2);
5432 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07005433 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07005434 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005435 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07005436 * without deadlocking with linkwatch via keventd.
5437 * 2) Since we run with the RTNL semaphore not held, we can sleep
5438 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07005439 *
5440 * We must not return until all unregister events added during
5441 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005442 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005443void netdev_run_todo(void)
5444{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005445 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005446
Linus Torvalds1da177e2005-04-16 15:20:36 -07005447 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005448 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07005449
5450 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005451
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005452
5453 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00005454 if (!list_empty(&list))
5455 rcu_barrier();
5456
Linus Torvalds1da177e2005-04-16 15:20:36 -07005457 while (!list_empty(&list)) {
5458 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00005459 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005460 list_del(&dev->todo_list);
5461
Eric Dumazet748e2d92012-08-22 21:50:59 +00005462 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005463 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00005464 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005465
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005466 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005467 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07005468 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005469 dump_stack();
5470 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005471 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005472
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005473 dev->reg_state = NETREG_UNREGISTERED;
5474
Changli Gao152102c2010-03-30 20:16:22 +00005475 on_each_cpu(flush_backlog, dev, 1);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005476
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005477 netdev_wait_allrefs(dev);
5478
5479 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00005480 BUG_ON(netdev_refcnt_read(dev));
Eric Dumazet33d480c2011-08-11 19:30:52 +00005481 WARN_ON(rcu_access_pointer(dev->ip_ptr));
5482 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07005483 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005484
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005485 if (dev->destructor)
5486 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07005487
5488 /* Free network device */
5489 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005490 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005491}
5492
Ben Hutchings3cfde792010-07-09 09:11:52 +00005493/* Convert net_device_stats to rtnl_link_stats64. They have the same
5494 * fields in the same order, with only the type differing.
5495 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00005496void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5497 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00005498{
5499#if BITS_PER_LONG == 64
Eric Dumazet77a1abf2012-03-05 04:50:09 +00005500 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5501 memcpy(stats64, netdev_stats, sizeof(*stats64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00005502#else
5503 size_t i, n = sizeof(*stats64) / sizeof(u64);
5504 const unsigned long *src = (const unsigned long *)netdev_stats;
5505 u64 *dst = (u64 *)stats64;
5506
5507 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5508 sizeof(*stats64) / sizeof(u64));
5509 for (i = 0; i < n; i++)
5510 dst[i] = src[i];
5511#endif
5512}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00005513EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00005514
Eric Dumazetd83345a2009-11-16 03:36:51 +00005515/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005516 * dev_get_stats - get network device statistics
5517 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07005518 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005519 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00005520 * Get network statistics from device. Return @storage.
5521 * The device driver may provide its own method by setting
5522 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5523 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005524 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00005525struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5526 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005527{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005528 const struct net_device_ops *ops = dev->netdev_ops;
5529
Eric Dumazet28172732010-07-07 14:58:56 -07005530 if (ops->ndo_get_stats64) {
5531 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00005532 ops->ndo_get_stats64(dev, storage);
5533 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00005534 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00005535 } else {
5536 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07005537 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00005538 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet28172732010-07-07 14:58:56 -07005539 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07005540}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005541EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005542
Eric Dumazet24824a02010-10-02 06:11:55 +00005543struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07005544{
Eric Dumazet24824a02010-10-02 06:11:55 +00005545 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07005546
Eric Dumazet24824a02010-10-02 06:11:55 +00005547#ifdef CONFIG_NET_CLS_ACT
5548 if (queue)
5549 return queue;
5550 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
5551 if (!queue)
5552 return NULL;
5553 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet24824a02010-10-02 06:11:55 +00005554 queue->qdisc = &noop_qdisc;
5555 queue->qdisc_sleeping = &noop_qdisc;
5556 rcu_assign_pointer(dev->ingress_queue, queue);
5557#endif
5558 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07005559}
5560
Eric Dumazet2c60db02012-09-16 09:17:26 +00005561static const struct ethtool_ops default_ethtool_ops;
5562
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00005563void netdev_set_default_ethtool_ops(struct net_device *dev,
5564 const struct ethtool_ops *ops)
5565{
5566 if (dev->ethtool_ops == &default_ethtool_ops)
5567 dev->ethtool_ops = ops;
5568}
5569EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
5570
Linus Torvalds1da177e2005-04-16 15:20:36 -07005571/**
Tom Herbert36909ea2011-01-09 19:36:31 +00005572 * alloc_netdev_mqs - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005573 * @sizeof_priv: size of private data to allocate space for
5574 * @name: device name format string
5575 * @setup: callback to initialize device
Tom Herbert36909ea2011-01-09 19:36:31 +00005576 * @txqs: the number of TX subqueues to allocate
5577 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005578 *
5579 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005580 * and performs basic initialization. Also allocates subquue structs
Tom Herbert36909ea2011-01-09 19:36:31 +00005581 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005582 */
Tom Herbert36909ea2011-01-09 19:36:31 +00005583struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5584 void (*setup)(struct net_device *),
5585 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005586{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005587 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005588 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005589 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005590
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005591 BUG_ON(strlen(name) >= sizeof(dev->name));
5592
Tom Herbert36909ea2011-01-09 19:36:31 +00005593 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005594 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00005595 return NULL;
5596 }
5597
Tom Herbert36909ea2011-01-09 19:36:31 +00005598#ifdef CONFIG_RPS
5599 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005600 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00005601 return NULL;
5602 }
5603#endif
5604
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005605 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005606 if (sizeof_priv) {
5607 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005608 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005609 alloc_size += sizeof_priv;
5610 }
5611 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005612 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005613
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005614 p = kzalloc(alloc_size, GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00005615 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005616 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005617
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005618 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005619 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005620
Eric Dumazet29b44332010-10-11 10:22:12 +00005621 dev->pcpu_refcnt = alloc_percpu(int);
5622 if (!dev->pcpu_refcnt)
Tom Herberte6484932010-10-18 18:04:39 +00005623 goto free_p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005624
Linus Torvalds1da177e2005-04-16 15:20:36 -07005625 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00005626 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005627
Jiri Pirko22bedad32010-04-01 21:22:57 +00005628 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005629 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00005630
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005631 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005632
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005633 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00005634 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005635
Herbert Xud565b0a2008-12-15 23:38:52 -08005636 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005637 INIT_LIST_HEAD(&dev->unreg_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00005638 INIT_LIST_HEAD(&dev->link_watch_list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005639 INIT_LIST_HEAD(&dev->upper_dev_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005640 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005641 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08005642
5643 dev->num_tx_queues = txqs;
5644 dev->real_num_tx_queues = txqs;
5645 if (netif_alloc_netdev_queues(dev))
5646 goto free_all;
5647
5648#ifdef CONFIG_RPS
5649 dev->num_rx_queues = rxqs;
5650 dev->real_num_rx_queues = rxqs;
5651 if (netif_alloc_rx_queues(dev))
5652 goto free_all;
5653#endif
5654
Linus Torvalds1da177e2005-04-16 15:20:36 -07005655 strcpy(dev->name, name);
Vlad Dogarucbda10f2011-01-13 23:38:30 +00005656 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00005657 if (!dev->ethtool_ops)
5658 dev->ethtool_ops = &default_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005659 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005660
David S. Miller8d3bdbd2011-02-08 15:02:50 -08005661free_all:
5662 free_netdev(dev);
5663 return NULL;
5664
Eric Dumazet29b44332010-10-11 10:22:12 +00005665free_pcpu:
5666 free_percpu(dev->pcpu_refcnt);
Tom Herberted9af2e2010-11-09 10:47:30 +00005667 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00005668#ifdef CONFIG_RPS
5669 kfree(dev->_rx);
5670#endif
5671
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005672free_p:
5673 kfree(p);
5674 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005675}
Tom Herbert36909ea2011-01-09 19:36:31 +00005676EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005677
5678/**
5679 * free_netdev - free network device
5680 * @dev: device
5681 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005682 * This function does the last stage of destroying an allocated device
5683 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005684 * If this is the last reference then it will be freed.
5685 */
5686void free_netdev(struct net_device *dev)
5687{
Herbert Xud565b0a2008-12-15 23:38:52 -08005688 struct napi_struct *p, *n;
5689
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005690 release_net(dev_net(dev));
5691
David S. Millere8a04642008-07-17 00:34:19 -07005692 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00005693#ifdef CONFIG_RPS
5694 kfree(dev->_rx);
5695#endif
David S. Millere8a04642008-07-17 00:34:19 -07005696
Eric Dumazet33d480c2011-08-11 19:30:52 +00005697 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00005698
Jiri Pirkof001fde2009-05-05 02:48:28 +00005699 /* Flush device addresses */
5700 dev_addr_flush(dev);
5701
Herbert Xud565b0a2008-12-15 23:38:52 -08005702 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5703 netif_napi_del(p);
5704
Eric Dumazet29b44332010-10-11 10:22:12 +00005705 free_percpu(dev->pcpu_refcnt);
5706 dev->pcpu_refcnt = NULL;
5707
Stephen Hemminger3041a062006-05-26 13:25:24 -07005708 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005709 if (dev->reg_state == NETREG_UNINITIALIZED) {
5710 kfree((char *)dev - dev->padded);
5711 return;
5712 }
5713
5714 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5715 dev->reg_state = NETREG_RELEASED;
5716
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005717 /* will free via device release */
5718 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005719}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005720EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005721
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005722/**
5723 * synchronize_net - Synchronize with packet receive processing
5724 *
5725 * Wait for packets currently being received to be done.
5726 * Does not block later packets from starting.
5727 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005728void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005729{
5730 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00005731 if (rtnl_is_locked())
5732 synchronize_rcu_expedited();
5733 else
5734 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005735}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005736EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005737
5738/**
Eric Dumazet44a08732009-10-27 07:03:04 +00005739 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07005740 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00005741 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08005742 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005743 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005744 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00005745 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005746 *
5747 * Callers must hold the rtnl semaphore. You may want
5748 * unregister_netdev() instead of this.
5749 */
5750
Eric Dumazet44a08732009-10-27 07:03:04 +00005751void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005752{
Herbert Xua6620712007-12-12 19:21:56 -08005753 ASSERT_RTNL();
5754
Eric Dumazet44a08732009-10-27 07:03:04 +00005755 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005756 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00005757 } else {
5758 rollback_registered(dev);
5759 /* Finish processing unregister after unlock */
5760 net_set_todo(dev);
5761 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005762}
Eric Dumazet44a08732009-10-27 07:03:04 +00005763EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005764
5765/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005766 * unregister_netdevice_many - unregister many devices
5767 * @head: list of devices
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005768 */
5769void unregister_netdevice_many(struct list_head *head)
5770{
5771 struct net_device *dev;
5772
5773 if (!list_empty(head)) {
5774 rollback_registered_many(head);
5775 list_for_each_entry(dev, head, unreg_list)
5776 net_set_todo(dev);
5777 }
5778}
Eric Dumazet63c80992009-10-27 07:06:49 +00005779EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005780
5781/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005782 * unregister_netdev - remove device from the kernel
5783 * @dev: device
5784 *
5785 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005786 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005787 *
5788 * This is just a wrapper for unregister_netdevice that takes
5789 * the rtnl semaphore. In general you want to use this and not
5790 * unregister_netdevice.
5791 */
5792void unregister_netdev(struct net_device *dev)
5793{
5794 rtnl_lock();
5795 unregister_netdevice(dev);
5796 rtnl_unlock();
5797}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005798EXPORT_SYMBOL(unregister_netdev);
5799
Eric W. Biedermance286d32007-09-12 13:53:49 +02005800/**
5801 * dev_change_net_namespace - move device to different nethost namespace
5802 * @dev: device
5803 * @net: network namespace
5804 * @pat: If not NULL name pattern to try if the current device name
5805 * is already taken in the destination network namespace.
5806 *
5807 * This function shuts down a device interface and moves it
5808 * to a new network namespace. On success 0 is returned, on
5809 * a failure a netagive errno code is returned.
5810 *
5811 * Callers must hold the rtnl semaphore.
5812 */
5813
5814int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5815{
Eric W. Biedermance286d32007-09-12 13:53:49 +02005816 int err;
5817
5818 ASSERT_RTNL();
5819
5820 /* Don't allow namespace local devices to be moved. */
5821 err = -EINVAL;
5822 if (dev->features & NETIF_F_NETNS_LOCAL)
5823 goto out;
5824
5825 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02005826 if (dev->reg_state != NETREG_REGISTERED)
5827 goto out;
5828
5829 /* Get out if there is nothing todo */
5830 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005831 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005832 goto out;
5833
5834 /* Pick the destination device name, and ensure
5835 * we can use it in the destination network namespace.
5836 */
5837 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00005838 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005839 /* We get here if we can't use the current device name */
5840 if (!pat)
5841 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00005842 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005843 goto out;
5844 }
5845
5846 /*
5847 * And now a mini version of register_netdevice unregister_netdevice.
5848 */
5849
5850 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005851 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005852
5853 /* And unlink it from device chain */
5854 err = -ENODEV;
5855 unlist_netdevice(dev);
5856
5857 synchronize_net();
5858
5859 /* Shutdown queueing discipline. */
5860 dev_shutdown(dev);
5861
5862 /* Notify protocols, that we are about to destroy
5863 this device. They should clean all the things.
David Lamparter3b27e102010-09-17 03:22:19 +00005864
5865 Note that dev->reg_state stays at NETREG_REGISTERED.
5866 This is wanted because this way 8021q and macvlan know
5867 the device is just moving and can keep their slaves up.
Eric W. Biedermance286d32007-09-12 13:53:49 +02005868 */
5869 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00005870 rcu_barrier();
5871 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric W. Biedermand2237d32011-10-21 06:24:20 +00005872 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005873
5874 /*
5875 * Flush the unicast and multicast chains
5876 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005877 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00005878 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005879
Serge Hallyn4e66ae22012-12-03 16:17:12 +00005880 /* Send a netdev-removed uevent to the old namespace */
5881 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
5882
Eric W. Biedermance286d32007-09-12 13:53:49 +02005883 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005884 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005885
Eric W. Biedermance286d32007-09-12 13:53:49 +02005886 /* If there is an ifindex conflict assign a new one */
5887 if (__dev_get_by_index(net, dev->ifindex)) {
5888 int iflink = (dev->iflink == dev->ifindex);
5889 dev->ifindex = dev_new_index(net);
5890 if (iflink)
5891 dev->iflink = dev->ifindex;
5892 }
5893
Serge Hallyn4e66ae22012-12-03 16:17:12 +00005894 /* Send a netdev-add uevent to the new namespace */
5895 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
5896
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005897 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07005898 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005899 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005900
5901 /* Add the device back in the hashes */
5902 list_netdevice(dev);
5903
5904 /* Notify protocols, that a new device appeared. */
5905 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5906
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005907 /*
5908 * Prevent userspace races by waiting until the network
5909 * device is fully setup before sending notifications.
5910 */
5911 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5912
Eric W. Biedermance286d32007-09-12 13:53:49 +02005913 synchronize_net();
5914 err = 0;
5915out:
5916 return err;
5917}
Johannes Berg463d0182009-07-14 00:33:35 +02005918EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005919
Linus Torvalds1da177e2005-04-16 15:20:36 -07005920static int dev_cpu_callback(struct notifier_block *nfb,
5921 unsigned long action,
5922 void *ocpu)
5923{
5924 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005925 struct sk_buff *skb;
5926 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5927 struct softnet_data *sd, *oldsd;
5928
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005929 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005930 return NOTIFY_OK;
5931
5932 local_irq_disable();
5933 cpu = smp_processor_id();
5934 sd = &per_cpu(softnet_data, cpu);
5935 oldsd = &per_cpu(softnet_data, oldcpu);
5936
5937 /* Find end of our completion_queue. */
5938 list_skb = &sd->completion_queue;
5939 while (*list_skb)
5940 list_skb = &(*list_skb)->next;
5941 /* Append completion queue from offline CPU. */
5942 *list_skb = oldsd->completion_queue;
5943 oldsd->completion_queue = NULL;
5944
Linus Torvalds1da177e2005-04-16 15:20:36 -07005945 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00005946 if (oldsd->output_queue) {
5947 *sd->output_queue_tailp = oldsd->output_queue;
5948 sd->output_queue_tailp = oldsd->output_queue_tailp;
5949 oldsd->output_queue = NULL;
5950 oldsd->output_queue_tailp = &oldsd->output_queue;
5951 }
Heiko Carstens264524d2011-06-06 20:50:03 +00005952 /* Append NAPI poll list from offline CPU. */
5953 if (!list_empty(&oldsd->poll_list)) {
5954 list_splice_init(&oldsd->poll_list, &sd->poll_list);
5955 raise_softirq_irqoff(NET_RX_SOFTIRQ);
5956 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005957
5958 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5959 local_irq_enable();
5960
5961 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00005962 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
5963 netif_rx(skb);
5964 input_queue_head_incr(oldsd);
5965 }
Tom Herbertfec5e652010-04-16 16:01:27 -07005966 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005967 netif_rx(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00005968 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07005969 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005970
5971 return NOTIFY_OK;
5972}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005973
5974
Herbert Xu7f353bf2007-08-10 15:47:58 -07005975/**
Herbert Xub63365a2008-10-23 01:11:29 -07005976 * netdev_increment_features - increment feature set by one
5977 * @all: current feature set
5978 * @one: new feature set
5979 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07005980 *
5981 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07005982 * @one to the master device with current feature set @all. Will not
5983 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07005984 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005985netdev_features_t netdev_increment_features(netdev_features_t all,
5986 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07005987{
Michał Mirosław1742f182011-04-22 06:31:16 +00005988 if (mask & NETIF_F_GEN_CSUM)
5989 mask |= NETIF_F_ALL_CSUM;
5990 mask |= NETIF_F_VLAN_CHALLENGED;
5991
5992 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
5993 all &= one | ~NETIF_F_ALL_FOR_ALL;
5994
Michał Mirosław1742f182011-04-22 06:31:16 +00005995 /* If one device supports hw checksumming, set for all. */
5996 if (all & NETIF_F_GEN_CSUM)
5997 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07005998
5999 return all;
6000}
Herbert Xub63365a2008-10-23 01:11:29 -07006001EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006002
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006003static struct hlist_head *netdev_create_hash(void)
6004{
6005 int i;
6006 struct hlist_head *hash;
6007
6008 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6009 if (hash != NULL)
6010 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6011 INIT_HLIST_HEAD(&hash[i]);
6012
6013 return hash;
6014}
6015
Eric W. Biederman881d9662007-09-17 11:56:21 -07006016/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07006017static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006018{
Rustad, Mark D734b6542012-07-18 09:06:07 +00006019 if (net != &init_net)
6020 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07006021
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006022 net->dev_name_head = netdev_create_hash();
6023 if (net->dev_name_head == NULL)
6024 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006025
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006026 net->dev_index_head = netdev_create_hash();
6027 if (net->dev_index_head == NULL)
6028 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006029
6030 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006031
6032err_idx:
6033 kfree(net->dev_name_head);
6034err_name:
6035 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006036}
6037
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006038/**
6039 * netdev_drivername - network driver for the device
6040 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006041 *
6042 * Determine network driver for device.
6043 */
David S. Miller3019de12011-06-06 16:41:33 -07006044const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07006045{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07006046 const struct device_driver *driver;
6047 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07006048 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07006049
6050 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006051 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07006052 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006053
6054 driver = parent->driver;
6055 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07006056 return driver->name;
6057 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006058}
6059
Joe Perchesb004ff42012-09-12 20:12:19 -07006060static int __netdev_printk(const char *level, const struct net_device *dev,
Joe Perches256df2f2010-06-27 01:02:35 +00006061 struct va_format *vaf)
6062{
6063 int r;
6064
Joe Perchesb004ff42012-09-12 20:12:19 -07006065 if (dev && dev->dev.parent) {
Joe Perches666f3552012-09-12 20:14:11 -07006066 r = dev_printk_emit(level[1] - '0',
6067 dev->dev.parent,
6068 "%s %s %s: %pV",
6069 dev_driver_string(dev->dev.parent),
6070 dev_name(dev->dev.parent),
6071 netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006072 } else if (dev) {
Joe Perches256df2f2010-06-27 01:02:35 +00006073 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006074 } else {
Joe Perches256df2f2010-06-27 01:02:35 +00006075 r = printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006076 }
Joe Perches256df2f2010-06-27 01:02:35 +00006077
6078 return r;
6079}
6080
6081int netdev_printk(const char *level, const struct net_device *dev,
6082 const char *format, ...)
6083{
6084 struct va_format vaf;
6085 va_list args;
6086 int r;
6087
6088 va_start(args, format);
6089
6090 vaf.fmt = format;
6091 vaf.va = &args;
6092
6093 r = __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006094
Joe Perches256df2f2010-06-27 01:02:35 +00006095 va_end(args);
6096
6097 return r;
6098}
6099EXPORT_SYMBOL(netdev_printk);
6100
6101#define define_netdev_printk_level(func, level) \
6102int func(const struct net_device *dev, const char *fmt, ...) \
6103{ \
6104 int r; \
6105 struct va_format vaf; \
6106 va_list args; \
6107 \
6108 va_start(args, fmt); \
6109 \
6110 vaf.fmt = fmt; \
6111 vaf.va = &args; \
6112 \
6113 r = __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07006114 \
Joe Perches256df2f2010-06-27 01:02:35 +00006115 va_end(args); \
6116 \
6117 return r; \
6118} \
6119EXPORT_SYMBOL(func);
6120
6121define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6122define_netdev_printk_level(netdev_alert, KERN_ALERT);
6123define_netdev_printk_level(netdev_crit, KERN_CRIT);
6124define_netdev_printk_level(netdev_err, KERN_ERR);
6125define_netdev_printk_level(netdev_warn, KERN_WARNING);
6126define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6127define_netdev_printk_level(netdev_info, KERN_INFO);
6128
Pavel Emelyanov46650792007-10-08 20:38:39 -07006129static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006130{
6131 kfree(net->dev_name_head);
6132 kfree(net->dev_index_head);
6133}
6134
Denis V. Lunev022cbae2007-11-13 03:23:50 -08006135static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07006136 .init = netdev_init,
6137 .exit = netdev_exit,
6138};
6139
Pavel Emelyanov46650792007-10-08 20:38:39 -07006140static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02006141{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006142 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02006143 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006144 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02006145 * initial network namespace
6146 */
6147 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006148 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006149 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006150 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02006151
6152 /* Ignore unmoveable devices (i.e. loopback) */
6153 if (dev->features & NETIF_F_NETNS_LOCAL)
6154 continue;
6155
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006156 /* Leave virtual devices for the generic cleanup */
6157 if (dev->rtnl_link_ops)
6158 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08006159
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006160 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006161 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6162 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006163 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006164 pr_emerg("%s: failed to move %s to init_net: %d\n",
6165 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006166 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02006167 }
6168 }
6169 rtnl_unlock();
6170}
6171
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006172static void __net_exit default_device_exit_batch(struct list_head *net_list)
6173{
6174 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04006175 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006176 * Do this across as many network namespaces as possible to
6177 * improve batching efficiency.
6178 */
6179 struct net_device *dev;
6180 struct net *net;
6181 LIST_HEAD(dev_kill_list);
6182
6183 rtnl_lock();
6184 list_for_each_entry(net, net_list, exit_list) {
6185 for_each_netdev_reverse(net, dev) {
6186 if (dev->rtnl_link_ops)
6187 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6188 else
6189 unregister_netdevice_queue(dev, &dev_kill_list);
6190 }
6191 }
6192 unregister_netdevice_many(&dev_kill_list);
Eric Dumazetceaaec92011-02-17 22:59:19 +00006193 list_del(&dev_kill_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006194 rtnl_unlock();
6195}
6196
Denis V. Lunev022cbae2007-11-13 03:23:50 -08006197static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006198 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006199 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02006200};
6201
Linus Torvalds1da177e2005-04-16 15:20:36 -07006202/*
6203 * Initialize the DEV module. At boot time this walks the device list and
6204 * unhooks any devices that fail to initialise (normally hardware not
6205 * present) and leaves us with a valid list of present and active devices.
6206 *
6207 */
6208
6209/*
6210 * This is called single threaded during boot, so no need
6211 * to take the rtnl semaphore.
6212 */
6213static int __init net_dev_init(void)
6214{
6215 int i, rc = -ENOMEM;
6216
6217 BUG_ON(!dev_boot_phase);
6218
Linus Torvalds1da177e2005-04-16 15:20:36 -07006219 if (dev_proc_init())
6220 goto out;
6221
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006222 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07006223 goto out;
6224
6225 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08006226 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006227 INIT_LIST_HEAD(&ptype_base[i]);
6228
Vlad Yasevich62532da2012-11-15 08:49:10 +00006229 INIT_LIST_HEAD(&offload_base);
6230
Eric W. Biederman881d9662007-09-17 11:56:21 -07006231 if (register_pernet_subsys(&netdev_net_ops))
6232 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006233
6234 /*
6235 * Initialise the packet receive queues.
6236 */
6237
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07006238 for_each_possible_cpu(i) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006239 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006240
Changli Gaodee42872010-05-02 05:42:16 +00006241 memset(sd, 0, sizeof(*sd));
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006242 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07006243 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006244 sd->completion_queue = NULL;
6245 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00006246 sd->output_queue = NULL;
6247 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00006248#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006249 sd->csd.func = rps_trigger_softirq;
6250 sd->csd.info = sd;
6251 sd->csd.flags = 0;
6252 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07006253#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00006254
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006255 sd->backlog.poll = process_backlog;
6256 sd->backlog.weight = weight_p;
6257 sd->backlog.gro_list = NULL;
6258 sd->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006259 }
6260
Linus Torvalds1da177e2005-04-16 15:20:36 -07006261 dev_boot_phase = 0;
6262
Eric W. Biederman505d4f72008-11-07 22:54:20 -08006263 /* The loopback device is special if any other network devices
6264 * is present in a network namespace the loopback device must
6265 * be present. Since we now dynamically allocate and free the
6266 * loopback device ensure this invariant is maintained by
6267 * keeping the loopback device as the first device on the
6268 * list of network devices. Ensuring the loopback devices
6269 * is the first device that appears and the last network device
6270 * that disappears.
6271 */
6272 if (register_pernet_device(&loopback_net_ops))
6273 goto out;
6274
6275 if (register_pernet_device(&default_device_ops))
6276 goto out;
6277
Carlos R. Mafra962cf362008-05-15 11:15:37 -03006278 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6279 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006280
6281 hotcpu_notifier(dev_cpu_callback, 0);
6282 dst_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006283 rc = 0;
6284out:
6285 return rc;
6286}
6287
6288subsys_initcall(net_dev_init);