blob: fe10551d3671053ea6d6a6da57de037f43ad78d0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080083#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070093#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/notifier.h>
95#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020096#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <net/sock.h>
98#include <linux/rtnetlink.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/stat.h>
102#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700103#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700114#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500117#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700118#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700119#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700120#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700121#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700122#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700123#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700124#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700129#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700131#include "net-sysfs.h"
132
Herbert Xud565b0a2008-12-15 23:38:52 -0800133/* Instead of increasing this, you should create a hash table. */
134#define MAX_GRO_SKBS 8
135
Herbert Xu5d38a072009-01-04 16:13:40 -0800136/* This should be increased if a protocol with a bigger head is added. */
137#define GRO_MAX_HEAD (MAX_HEADER + 128)
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/*
140 * The list of packet types we will receive (as opposed to discard)
141 * and the routines to invoke.
142 *
143 * Why 16. Because with 16 the only overlap we get on a hash of the
144 * low nibble of the protocol value is RARP/SNAP/X.25.
145 *
146 * NOTE: That is no longer true with the addition of VLAN tags. Not
147 * sure which should go first, but I bet it won't make much
148 * difference if we are running VLANs. The good news is that
149 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700150 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 * --BLG
152 *
153 * 0800 IP
154 * 8100 802.1Q VLAN
155 * 0001 802.3
156 * 0002 AX.25
157 * 0004 802.2
158 * 8035 RARP
159 * 0005 SNAP
160 * 0805 X.25
161 * 0806 ARP
162 * 8137 IPX
163 * 0009 Localtalk
164 * 86DD IPv6
165 */
166
Pavel Emelyanov82d8a862007-11-26 20:12:58 +0800167#define PTYPE_HASH_SIZE (16)
168#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a862007-11-26 20:12:58 +0800171static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700172static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 * semaphore.
177 *
178 * Pure readers hold dev_base_lock for reading.
179 *
180 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700181 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 * actual updates. This allows pure readers to access the list even
183 * while a writer is preparing to update it.
184 *
185 * To put it another way, dev_base_lock is held for writing only to
186 * protect against pure readers; the rtnl semaphore provides the
187 * protection against other writers.
188 *
189 * See, for example usages, register_netdevice() and
190 * unregister_netdevice(), which must be called with the rtnl
191 * semaphore held.
192 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194EXPORT_SYMBOL(dev_base_lock);
195
196#define NETDEV_HASHBITS 8
Eric W. Biederman881d9662007-09-17 11:56:21 -0700197#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
Eric W. Biederman881d9662007-09-17 11:56:21 -0700199static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200{
201 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
Eric W. Biederman881d9662007-09-17 11:56:21 -0700202 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203}
204
Eric W. Biederman881d9662007-09-17 11:56:21 -0700205static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206{
Eric W. Biederman881d9662007-09-17 11:56:21 -0700207 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208}
209
Eric W. Biedermance286d32007-09-12 13:53:49 +0200210/* Device list insertion */
211static int list_netdevice(struct net_device *dev)
212{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900213 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200214
215 ASSERT_RTNL();
216
217 write_lock_bh(&dev_base_lock);
218 list_add_tail(&dev->dev_list, &net->dev_base_head);
219 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
220 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
221 write_unlock_bh(&dev_base_lock);
222 return 0;
223}
224
225/* Device list removal */
226static void unlist_netdevice(struct net_device *dev)
227{
228 ASSERT_RTNL();
229
230 /* Unlink dev from the device chain */
231 write_lock_bh(&dev_base_lock);
232 list_del(&dev->dev_list);
233 hlist_del(&dev->name_hlist);
234 hlist_del(&dev->index_hlist);
235 write_unlock_bh(&dev_base_lock);
236}
237
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238/*
239 * Our notifier list
240 */
241
Alan Sternf07d5b92006-05-09 15:23:03 -0700242static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
244/*
245 * Device drivers call our routines to queue packets here. We empty the
246 * queue in the local softnet handler.
247 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700248
249DEFINE_PER_CPU(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700250EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
David S. Millercf508b12008-07-22 14:16:42 -0700252#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700253/*
David S. Millerc773e842008-07-08 23:13:53 -0700254 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700255 * according to dev->type
256 */
257static const unsigned short netdev_lock_type[] =
258 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
259 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
260 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
261 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
262 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
263 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
264 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
265 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
266 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
267 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
268 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
Dmitry Eremin-Solenikov929122c2009-08-14 20:00:20 +0400272 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000273 ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700274
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700275static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700276 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
277 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
278 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
279 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
280 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
281 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
282 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
283 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
284 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
285 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
286 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
287 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
288 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800289 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
Dmitry Eremin-Solenikov929122c2009-08-14 20:00:20 +0400290 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000291 "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700292
293static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700294static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700295
296static inline unsigned short netdev_lock_pos(unsigned short dev_type)
297{
298 int i;
299
300 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
301 if (netdev_lock_type[i] == dev_type)
302 return i;
303 /* the last key is used by default */
304 return ARRAY_SIZE(netdev_lock_type) - 1;
305}
306
David S. Millercf508b12008-07-22 14:16:42 -0700307static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
308 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700309{
310 int i;
311
312 i = netdev_lock_pos(dev_type);
313 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
314 netdev_lock_name[i]);
315}
David S. Millercf508b12008-07-22 14:16:42 -0700316
317static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
318{
319 int i;
320
321 i = netdev_lock_pos(dev->type);
322 lockdep_set_class_and_name(&dev->addr_list_lock,
323 &netdev_addr_lock_key[i],
324 netdev_lock_name[i]);
325}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700326#else
David S. Millercf508b12008-07-22 14:16:42 -0700327static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
328 unsigned short dev_type)
329{
330}
331static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700332{
333}
334#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
336/*******************************************************************************
337
338 Protocol management and registration routines
339
340*******************************************************************************/
341
342/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 * Add a protocol ID to the list. Now that the input handler is
344 * smarter we can dispense with all the messy stuff that used to be
345 * here.
346 *
347 * BEWARE!!! Protocol handlers, mangling input packets,
348 * MUST BE last in hash buckets and checking protocol handlers
349 * MUST start from promiscuous ptype_all chain in net_bh.
350 * It is true now, do not change it.
351 * Explanation follows: if protocol handler, mangling packet, will
352 * be the first on list, it is not able to sense, that packet
353 * is cloned and should be copied-on-write, so that it will
354 * change it and subsequent readers will get broken packet.
355 * --ANK (980803)
356 */
357
358/**
359 * dev_add_pack - add packet handler
360 * @pt: packet type declaration
361 *
362 * Add a protocol handler to the networking stack. The passed &packet_type
363 * is linked into kernel lists and may not be freed until it has been
364 * removed from the kernel lists.
365 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900366 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 * guarantee all CPU's that are in middle of receiving packets
368 * will see the new packet type (until the next received packet).
369 */
370
371void dev_add_pack(struct packet_type *pt)
372{
373 int hash;
374
375 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700376 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700378 else {
Pavel Emelyanov82d8a862007-11-26 20:12:58 +0800379 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 list_add_rcu(&pt->list, &ptype_base[hash]);
381 }
382 spin_unlock_bh(&ptype_lock);
383}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700384EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386/**
387 * __dev_remove_pack - remove packet handler
388 * @pt: packet type declaration
389 *
390 * Remove a protocol handler that was previously added to the kernel
391 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
392 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900393 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 *
395 * The packet type might still be in use by receivers
396 * and must not be freed until after all the CPU's have gone
397 * through a quiescent state.
398 */
399void __dev_remove_pack(struct packet_type *pt)
400{
401 struct list_head *head;
402 struct packet_type *pt1;
403
404 spin_lock_bh(&ptype_lock);
405
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700406 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700408 else
Pavel Emelyanov82d8a862007-11-26 20:12:58 +0800409 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
411 list_for_each_entry(pt1, head, list) {
412 if (pt == pt1) {
413 list_del_rcu(&pt->list);
414 goto out;
415 }
416 }
417
418 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
419out:
420 spin_unlock_bh(&ptype_lock);
421}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700422EXPORT_SYMBOL(__dev_remove_pack);
423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424/**
425 * dev_remove_pack - remove packet handler
426 * @pt: packet type declaration
427 *
428 * Remove a protocol handler that was previously added to the kernel
429 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
430 * from the kernel lists and can be freed or reused once this function
431 * returns.
432 *
433 * This call sleeps to guarantee that no CPU is looking at the packet
434 * type after return.
435 */
436void dev_remove_pack(struct packet_type *pt)
437{
438 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900439
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 synchronize_net();
441}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700442EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444/******************************************************************************
445
446 Device Boot-time Settings Routines
447
448*******************************************************************************/
449
450/* Boot time configuration table */
451static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
452
453/**
454 * netdev_boot_setup_add - add new setup entry
455 * @name: name of the device
456 * @map: configured settings for the device
457 *
458 * Adds new setup entry to the dev_boot_setup list. The function
459 * returns 0 on error and 1 on success. This is a generic routine to
460 * all netdevices.
461 */
462static int netdev_boot_setup_add(char *name, struct ifmap *map)
463{
464 struct netdev_boot_setup *s;
465 int i;
466
467 s = dev_boot_setup;
468 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
469 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
470 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700471 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 memcpy(&s[i].map, map, sizeof(s[i].map));
473 break;
474 }
475 }
476
477 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
478}
479
480/**
481 * netdev_boot_setup_check - check boot time settings
482 * @dev: the netdevice
483 *
484 * Check boot time settings for the device.
485 * The found settings are set for the device to be used
486 * later in the device probing.
487 * Returns 0 if no settings found, 1 if they are.
488 */
489int netdev_boot_setup_check(struct net_device *dev)
490{
491 struct netdev_boot_setup *s = dev_boot_setup;
492 int i;
493
494 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
495 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700496 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 dev->irq = s[i].map.irq;
498 dev->base_addr = s[i].map.base_addr;
499 dev->mem_start = s[i].map.mem_start;
500 dev->mem_end = s[i].map.mem_end;
501 return 1;
502 }
503 }
504 return 0;
505}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700506EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
508
509/**
510 * netdev_boot_base - get address from boot time settings
511 * @prefix: prefix for network device
512 * @unit: id for network device
513 *
514 * Check boot time settings for the base address of device.
515 * The found settings are set for the device to be used
516 * later in the device probing.
517 * Returns 0 if no settings found.
518 */
519unsigned long netdev_boot_base(const char *prefix, int unit)
520{
521 const struct netdev_boot_setup *s = dev_boot_setup;
522 char name[IFNAMSIZ];
523 int i;
524
525 sprintf(name, "%s%d", prefix, unit);
526
527 /*
528 * If device already registered then return base of 1
529 * to indicate not to probe for this interface
530 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700531 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 return 1;
533
534 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
535 if (!strcmp(name, s[i].name))
536 return s[i].map.base_addr;
537 return 0;
538}
539
540/*
541 * Saves at boot time configured settings for any netdevice.
542 */
543int __init netdev_boot_setup(char *str)
544{
545 int ints[5];
546 struct ifmap map;
547
548 str = get_options(str, ARRAY_SIZE(ints), ints);
549 if (!str || !*str)
550 return 0;
551
552 /* Save settings */
553 memset(&map, 0, sizeof(map));
554 if (ints[0] > 0)
555 map.irq = ints[1];
556 if (ints[0] > 1)
557 map.base_addr = ints[2];
558 if (ints[0] > 2)
559 map.mem_start = ints[3];
560 if (ints[0] > 3)
561 map.mem_end = ints[4];
562
563 /* Add new entry to the list */
564 return netdev_boot_setup_add(str, &map);
565}
566
567__setup("netdev=", netdev_boot_setup);
568
569/*******************************************************************************
570
571 Device Interface Subroutines
572
573*******************************************************************************/
574
575/**
576 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700577 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 * @name: name to find
579 *
580 * Find an interface by name. Must be called under RTNL semaphore
581 * or @dev_base_lock. If the name is found a pointer to the device
582 * is returned. If the name is not found then %NULL is returned. The
583 * reference counters are not incremented so the caller must be
584 * careful with locks.
585 */
586
Eric W. Biederman881d9662007-09-17 11:56:21 -0700587struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588{
589 struct hlist_node *p;
590
Eric W. Biederman881d9662007-09-17 11:56:21 -0700591 hlist_for_each(p, dev_name_hash(net, name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 struct net_device *dev
593 = hlist_entry(p, struct net_device, name_hlist);
594 if (!strncmp(dev->name, name, IFNAMSIZ))
595 return dev;
596 }
597 return NULL;
598}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700599EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
601/**
602 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700603 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 * @name: name to find
605 *
606 * Find an interface by name. This can be called from any
607 * context and does its own locking. The returned handle has
608 * the usage count incremented and the caller must use dev_put() to
609 * release it when it is no longer needed. %NULL is returned if no
610 * matching device is found.
611 */
612
Eric W. Biederman881d9662007-09-17 11:56:21 -0700613struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614{
615 struct net_device *dev;
616
617 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700618 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 if (dev)
620 dev_hold(dev);
621 read_unlock(&dev_base_lock);
622 return dev;
623}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700624EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
626/**
627 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700628 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 * @ifindex: index of device
630 *
631 * Search for an interface by index. Returns %NULL if the device
632 * is not found or a pointer to the device. The device has not
633 * had its reference counter increased so the caller must be careful
634 * about locking. The caller must hold either the RTNL semaphore
635 * or @dev_base_lock.
636 */
637
Eric W. Biederman881d9662007-09-17 11:56:21 -0700638struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639{
640 struct hlist_node *p;
641
Eric W. Biederman881d9662007-09-17 11:56:21 -0700642 hlist_for_each(p, dev_index_hash(net, ifindex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 struct net_device *dev
644 = hlist_entry(p, struct net_device, index_hlist);
645 if (dev->ifindex == ifindex)
646 return dev;
647 }
648 return NULL;
649}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700650EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651
652
653/**
654 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700655 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 * @ifindex: index of device
657 *
658 * Search for an interface by index. Returns NULL if the device
659 * is not found or a pointer to the device. The device returned has
660 * had a reference added and the pointer is safe until the user calls
661 * dev_put to indicate they have finished with it.
662 */
663
Eric W. Biederman881d9662007-09-17 11:56:21 -0700664struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665{
666 struct net_device *dev;
667
668 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700669 dev = __dev_get_by_index(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 if (dev)
671 dev_hold(dev);
672 read_unlock(&dev_base_lock);
673 return dev;
674}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700675EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676
677/**
678 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700679 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 * @type: media type of device
681 * @ha: hardware address
682 *
683 * Search for an interface by MAC address. Returns NULL if the device
684 * is not found or a pointer to the device. The caller must hold the
685 * rtnl semaphore. The returned device has not had its ref count increased
686 * and the caller must therefore be careful about locking
687 *
688 * BUGS:
689 * If the API was consistent this would be __dev_get_by_hwaddr
690 */
691
Eric W. Biederman881d9662007-09-17 11:56:21 -0700692struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693{
694 struct net_device *dev;
695
696 ASSERT_RTNL();
697
Denis V. Lunev81103a52007-12-12 10:47:38 -0800698 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 if (dev->type == type &&
700 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700701 return dev;
702
703 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704}
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300705EXPORT_SYMBOL(dev_getbyhwaddr);
706
Eric W. Biederman881d9662007-09-17 11:56:21 -0700707struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700708{
709 struct net_device *dev;
710
711 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700712 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700713 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700714 return dev;
715
716 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700717}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700718EXPORT_SYMBOL(__dev_getfirstbyhwtype);
719
Eric W. Biederman881d9662007-09-17 11:56:21 -0700720struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721{
722 struct net_device *dev;
723
724 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700725 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700726 if (dev)
727 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 rtnl_unlock();
729 return dev;
730}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731EXPORT_SYMBOL(dev_getfirstbyhwtype);
732
733/**
734 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700735 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 * @if_flags: IFF_* values
737 * @mask: bitmask of bits in if_flags to check
738 *
739 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900740 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 * had a reference added and the pointer is safe until the user calls
742 * dev_put to indicate they have finished with it.
743 */
744
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700745struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
746 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700748 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749
Pavel Emelianov7562f872007-05-03 15:13:45 -0700750 ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700752 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 if (((dev->flags ^ if_flags) & mask) == 0) {
754 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700755 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 break;
757 }
758 }
759 read_unlock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700760 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700762EXPORT_SYMBOL(dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763
764/**
765 * dev_valid_name - check if name is okay for network device
766 * @name: name string
767 *
768 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700769 * to allow sysfs to work. We also disallow any kind of
770 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800772int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700774 if (*name == '\0')
775 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700776 if (strlen(name) >= IFNAMSIZ)
777 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700778 if (!strcmp(name, ".") || !strcmp(name, ".."))
779 return 0;
780
781 while (*name) {
782 if (*name == '/' || isspace(*name))
783 return 0;
784 name++;
785 }
786 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700788EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789
790/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200791 * __dev_alloc_name - allocate a name for a device
792 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200794 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 *
796 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700797 * id. It scans list of devices to build up a free map, then chooses
798 * the first empty slot. The caller must hold the dev_base or rtnl lock
799 * while allocating the name and adding the device in order to avoid
800 * duplicates.
801 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
802 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 */
804
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200805static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806{
807 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 const char *p;
809 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700810 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 struct net_device *d;
812
813 p = strnchr(name, IFNAMSIZ-1, '%');
814 if (p) {
815 /*
816 * Verify the string as this thing may have come from
817 * the user. There must be either one "%d" and no other "%"
818 * characters.
819 */
820 if (p[1] != 'd' || strchr(p + 2, '%'))
821 return -EINVAL;
822
823 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700824 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 if (!inuse)
826 return -ENOMEM;
827
Eric W. Biederman881d9662007-09-17 11:56:21 -0700828 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 if (!sscanf(d->name, name, &i))
830 continue;
831 if (i < 0 || i >= max_netdevices)
832 continue;
833
834 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200835 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 if (!strncmp(buf, d->name, IFNAMSIZ))
837 set_bit(i, inuse);
838 }
839
840 i = find_first_zero_bit(inuse, max_netdevices);
841 free_page((unsigned long) inuse);
842 }
843
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200844 snprintf(buf, IFNAMSIZ, name, i);
845 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
848 /* It is possible to run out of possible slots
849 * when the name is long and there isn't enough space left
850 * for the digits, or if all bits are used.
851 */
852 return -ENFILE;
853}
854
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200855/**
856 * dev_alloc_name - allocate a name for a device
857 * @dev: device
858 * @name: name format string
859 *
860 * Passed a format string - eg "lt%d" it will try and find a suitable
861 * id. It scans list of devices to build up a free map, then chooses
862 * the first empty slot. The caller must hold the dev_base or rtnl lock
863 * while allocating the name and adding the device in order to avoid
864 * duplicates.
865 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
866 * Returns the number of the unit assigned or a negative errno code.
867 */
868
869int dev_alloc_name(struct net_device *dev, const char *name)
870{
871 char buf[IFNAMSIZ];
872 struct net *net;
873 int ret;
874
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900875 BUG_ON(!dev_net(dev));
876 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200877 ret = __dev_alloc_name(net, name, buf);
878 if (ret >= 0)
879 strlcpy(dev->name, buf, IFNAMSIZ);
880 return ret;
881}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700882EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200883
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
885/**
886 * dev_change_name - change name of a device
887 * @dev: device
888 * @newname: name (or format string) must be at least IFNAMSIZ
889 *
890 * Change name of a device, can pass format strings "eth%d".
891 * for wildcarding.
892 */
Stephen Hemmingercf04a4c2008-09-30 02:22:14 -0700893int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894{
Herbert Xufcc5a032007-07-30 17:03:38 -0700895 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700897 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700898 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899
900 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900901 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900903 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 if (dev->flags & IFF_UP)
905 return -EBUSY;
906
907 if (!dev_valid_name(newname))
908 return -EINVAL;
909
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700910 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
911 return 0;
912
Herbert Xufcc5a032007-07-30 17:03:38 -0700913 memcpy(oldname, dev->name, IFNAMSIZ);
914
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 if (strchr(newname, '%')) {
916 err = dev_alloc_name(dev, newname);
917 if (err < 0)
918 return err;
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700919 } else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 return -EEXIST;
921 else
922 strlcpy(dev->name, newname, IFNAMSIZ);
923
Herbert Xufcc5a032007-07-30 17:03:38 -0700924rollback:
Eric W. Biederman38918452008-10-27 17:51:47 -0700925 /* For now only devices in the initial network namespace
926 * are in sysfs.
927 */
928 if (net == &init_net) {
929 ret = device_rename(&dev->dev, dev->name);
930 if (ret) {
931 memcpy(dev->name, oldname, IFNAMSIZ);
932 return ret;
933 }
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700934 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700935
936 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600937 hlist_del(&dev->name_hlist);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700938 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700939 write_unlock_bh(&dev_base_lock);
940
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700941 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -0700942 ret = notifier_to_errno(ret);
943
944 if (ret) {
Eric Dumazet91e9c072009-11-15 23:30:24 +0000945 /* err >= 0 after dev_alloc_name() or stores the first errno */
946 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -0700947 err = ret;
948 memcpy(dev->name, oldname, IFNAMSIZ);
949 goto rollback;
Eric Dumazet91e9c072009-11-15 23:30:24 +0000950 } else {
951 printk(KERN_ERR
952 "%s: name change rollback failed: %d.\n",
953 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -0700954 }
955 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956
957 return err;
958}
959
960/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700961 * dev_set_alias - change ifalias of a device
962 * @dev: device
963 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -0700964 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700965 *
966 * Set ifalias for a device,
967 */
968int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
969{
970 ASSERT_RTNL();
971
972 if (len >= IFALIASZ)
973 return -EINVAL;
974
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -0700975 if (!len) {
976 if (dev->ifalias) {
977 kfree(dev->ifalias);
978 dev->ifalias = NULL;
979 }
980 return 0;
981 }
982
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700983 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700984 if (!dev->ifalias)
985 return -ENOMEM;
986
987 strlcpy(dev->ifalias, alias, len+1);
988 return len;
989}
990
991
992/**
Stephen Hemminger3041a062006-05-26 13:25:24 -0700993 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700994 * @dev: device to cause notification
995 *
996 * Called to indicate a device has changed features.
997 */
998void netdev_features_change(struct net_device *dev)
999{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001000 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001001}
1002EXPORT_SYMBOL(netdev_features_change);
1003
1004/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 * netdev_state_change - device changes state
1006 * @dev: device to cause notification
1007 *
1008 * Called to indicate a device has changed state. This function calls
1009 * the notifier chains for netdev_chain and sends a NEWLINK message
1010 * to the routing socket.
1011 */
1012void netdev_state_change(struct net_device *dev)
1013{
1014 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001015 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1017 }
1018}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001019EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020
Moni Shoua75c78502009-09-15 02:37:40 -07001021void netdev_bonding_change(struct net_device *dev, unsigned long event)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001022{
Moni Shoua75c78502009-09-15 02:37:40 -07001023 call_netdevice_notifiers(event, dev);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001024}
1025EXPORT_SYMBOL(netdev_bonding_change);
1026
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027/**
1028 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001029 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 * @name: name of interface
1031 *
1032 * If a network interface is not present and the process has suitable
1033 * privileges this function loads the module. If module loading is not
1034 * available in this kernel then it becomes a nop.
1035 */
1036
Eric W. Biederman881d9662007-09-17 11:56:21 -07001037void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001039 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040
1041 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001042 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 read_unlock(&dev_base_lock);
1044
Eric Parisa8f80e82009-08-13 09:44:51 -04001045 if (!dev && capable(CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 request_module("%s", name);
1047}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001048EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050/**
1051 * dev_open - prepare an interface for use.
1052 * @dev: device to open
1053 *
1054 * Takes a device from down to up state. The device's private open
1055 * function is invoked and then the multicast lists are loaded. Finally
1056 * the device is moved into the up state and a %NETDEV_UP message is
1057 * sent to the netdev notifier chain.
1058 *
1059 * Calling this function on an active interface is a nop. On a failure
1060 * a negative errno code is returned.
1061 */
1062int dev_open(struct net_device *dev)
1063{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001064 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001065 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001067 ASSERT_RTNL();
1068
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 /*
1070 * Is it already up?
1071 */
1072
1073 if (dev->flags & IFF_UP)
1074 return 0;
1075
1076 /*
1077 * Is it even present?
1078 */
1079 if (!netif_device_present(dev))
1080 return -ENODEV;
1081
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001082 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1083 ret = notifier_to_errno(ret);
1084 if (ret)
1085 return ret;
1086
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 /*
1088 * Call device private open method
1089 */
1090 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001091
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001092 if (ops->ndo_validate_addr)
1093 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001094
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001095 if (!ret && ops->ndo_open)
1096 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001098 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 * If it went open OK then:
1100 */
1101
Jeff Garzikbada3392007-10-23 20:19:37 -07001102 if (ret)
1103 clear_bit(__LINK_STATE_START, &dev->state);
1104 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 /*
1106 * Set the flags.
1107 */
1108 dev->flags |= IFF_UP;
1109
1110 /*
Dan Williams649274d2009-01-11 00:20:39 -08001111 * Enable NET_DMA
1112 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001113 net_dmaengine_get();
Dan Williams649274d2009-01-11 00:20:39 -08001114
1115 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 * Initialize multicasting status
1117 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001118 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119
1120 /*
1121 * Wakeup transmit queue engine
1122 */
1123 dev_activate(dev);
1124
1125 /*
1126 * ... and announce new interface.
1127 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001128 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001130
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 return ret;
1132}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001133EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
1135/**
1136 * dev_close - shutdown an interface.
1137 * @dev: device to shutdown
1138 *
1139 * This function moves an active device into down state. A
1140 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1141 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1142 * chain.
1143 */
1144int dev_close(struct net_device *dev)
1145{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001146 const struct net_device_ops *ops = dev->netdev_ops;
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001147 ASSERT_RTNL();
1148
David S. Miller9d5010d2007-09-12 14:33:25 +02001149 might_sleep();
1150
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 if (!(dev->flags & IFF_UP))
1152 return 0;
1153
1154 /*
1155 * Tell people we are going down, so that they can
1156 * prepare to death, when device is still operating.
1157 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001158 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 clear_bit(__LINK_STATE_START, &dev->state);
1161
1162 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001163 * it can be even on different cpu. So just clear netif_running().
1164 *
1165 * dev->stop() will invoke napi_disable() on all of it's
1166 * napi_struct instances on this device.
1167 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001170 dev_deactivate(dev);
1171
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 /*
1173 * Call the device specific close. This cannot fail.
1174 * Only if device is UP
1175 *
1176 * We allow it to be called even after a DETACH hot-plug
1177 * event.
1178 */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001179 if (ops->ndo_stop)
1180 ops->ndo_stop(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181
1182 /*
1183 * Device is now down.
1184 */
1185
1186 dev->flags &= ~IFF_UP;
1187
1188 /*
1189 * Tell people we are down
1190 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001191 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192
Dan Williams649274d2009-01-11 00:20:39 -08001193 /*
1194 * Shutdown NET_DMA
1195 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001196 net_dmaengine_put();
Dan Williams649274d2009-01-11 00:20:39 -08001197
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 return 0;
1199}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001200EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
1202
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001203/**
1204 * dev_disable_lro - disable Large Receive Offload on a device
1205 * @dev: device
1206 *
1207 * Disable Large Receive Offload (LRO) on a net device. Must be
1208 * called under RTNL. This is needed if received packets may be
1209 * forwarded to another interface.
1210 */
1211void dev_disable_lro(struct net_device *dev)
1212{
1213 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1214 dev->ethtool_ops->set_flags) {
1215 u32 flags = dev->ethtool_ops->get_flags(dev);
1216 if (flags & ETH_FLAG_LRO) {
1217 flags &= ~ETH_FLAG_LRO;
1218 dev->ethtool_ops->set_flags(dev, flags);
1219 }
1220 }
1221 WARN_ON(dev->features & NETIF_F_LRO);
1222}
1223EXPORT_SYMBOL(dev_disable_lro);
1224
1225
Eric W. Biederman881d9662007-09-17 11:56:21 -07001226static int dev_boot_phase = 1;
1227
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228/*
1229 * Device change register/unregister. These are not inline or static
1230 * as we export them to the world.
1231 */
1232
1233/**
1234 * register_netdevice_notifier - register a network notifier block
1235 * @nb: notifier
1236 *
1237 * Register a notifier to be called when network device events occur.
1238 * The notifier passed is linked into the kernel structures and must
1239 * not be reused until it has been unregistered. A negative errno code
1240 * is returned on a failure.
1241 *
1242 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001243 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 * view of the network device list.
1245 */
1246
1247int register_netdevice_notifier(struct notifier_block *nb)
1248{
1249 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001250 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001251 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 int err;
1253
1254 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001255 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001256 if (err)
1257 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001258 if (dev_boot_phase)
1259 goto unlock;
1260 for_each_net(net) {
1261 for_each_netdev(net, dev) {
1262 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1263 err = notifier_to_errno(err);
1264 if (err)
1265 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266
Eric W. Biederman881d9662007-09-17 11:56:21 -07001267 if (!(dev->flags & IFF_UP))
1268 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001269
Eric W. Biederman881d9662007-09-17 11:56:21 -07001270 nb->notifier_call(nb, NETDEV_UP, dev);
1271 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001273
1274unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 rtnl_unlock();
1276 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001277
1278rollback:
1279 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001280 for_each_net(net) {
1281 for_each_netdev(net, dev) {
1282 if (dev == last)
1283 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001284
Eric W. Biederman881d9662007-09-17 11:56:21 -07001285 if (dev->flags & IFF_UP) {
1286 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1287 nb->notifier_call(nb, NETDEV_DOWN, dev);
1288 }
1289 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001290 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001291 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001292
1293 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001294 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001296EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297
1298/**
1299 * unregister_netdevice_notifier - unregister a network notifier block
1300 * @nb: notifier
1301 *
1302 * Unregister a notifier previously registered by
1303 * register_netdevice_notifier(). The notifier is unlinked into the
1304 * kernel structures and may then be reused. A negative errno code
1305 * is returned on a failure.
1306 */
1307
1308int unregister_netdevice_notifier(struct notifier_block *nb)
1309{
Herbert Xu9f514952006-03-25 01:24:25 -08001310 int err;
1311
1312 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001313 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001314 rtnl_unlock();
1315 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001317EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318
1319/**
1320 * call_netdevice_notifiers - call all network notifier blocks
1321 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001322 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 *
1324 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001325 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 */
1327
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001328int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001330 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331}
1332
1333/* When > 0 there are consumers of rx skb time stamps */
1334static atomic_t netstamp_needed = ATOMIC_INIT(0);
1335
1336void net_enable_timestamp(void)
1337{
1338 atomic_inc(&netstamp_needed);
1339}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001340EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341
1342void net_disable_timestamp(void)
1343{
1344 atomic_dec(&netstamp_needed);
1345}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001346EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001348static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349{
1350 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001351 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001352 else
1353 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354}
1355
1356/*
1357 * Support routine. Sends outgoing frames to any network
1358 * taps currently in use.
1359 */
1360
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001361static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362{
1363 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001364
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001365#ifdef CONFIG_NET_CLS_ACT
1366 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1367 net_timestamp(skb);
1368#else
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001369 net_timestamp(skb);
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001370#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371
1372 rcu_read_lock();
1373 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1374 /* Never send packets back to the socket
1375 * they originated from - MvS (miquels@drinkel.ow.org)
1376 */
1377 if ((ptype->dev == dev || !ptype->dev) &&
1378 (ptype->af_packet_priv == NULL ||
1379 (struct sock *)ptype->af_packet_priv != skb->sk)) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001380 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 if (!skb2)
1382 break;
1383
1384 /* skb->nh should be correctly
1385 set by sender, so that the second statement is
1386 just protection against buggy protocols.
1387 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001388 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001390 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001391 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 if (net_ratelimit())
1393 printk(KERN_CRIT "protocol %04x is "
1394 "buggy, dev %s\n",
1395 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001396 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 }
1398
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001399 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001401 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 }
1403 }
1404 rcu_read_unlock();
1405}
1406
Denis Vlasenko56079432006-03-29 15:57:29 -08001407
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001408static inline void __netif_reschedule(struct Qdisc *q)
1409{
1410 struct softnet_data *sd;
1411 unsigned long flags;
1412
1413 local_irq_save(flags);
1414 sd = &__get_cpu_var(softnet_data);
1415 q->next_sched = sd->output_queue;
1416 sd->output_queue = q;
1417 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1418 local_irq_restore(flags);
1419}
1420
David S. Miller37437bb2008-07-16 02:15:04 -07001421void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001422{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001423 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1424 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001425}
1426EXPORT_SYMBOL(__netif_schedule);
1427
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001428void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001429{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001430 if (atomic_dec_and_test(&skb->users)) {
1431 struct softnet_data *sd;
1432 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001433
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001434 local_irq_save(flags);
1435 sd = &__get_cpu_var(softnet_data);
1436 skb->next = sd->completion_queue;
1437 sd->completion_queue = skb;
1438 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1439 local_irq_restore(flags);
1440 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001441}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001442EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001443
1444void dev_kfree_skb_any(struct sk_buff *skb)
1445{
1446 if (in_irq() || irqs_disabled())
1447 dev_kfree_skb_irq(skb);
1448 else
1449 dev_kfree_skb(skb);
1450}
1451EXPORT_SYMBOL(dev_kfree_skb_any);
1452
1453
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001454/**
1455 * netif_device_detach - mark device as removed
1456 * @dev: network device
1457 *
1458 * Mark device as removed from system and therefore no longer available.
1459 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001460void netif_device_detach(struct net_device *dev)
1461{
1462 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1463 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001464 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001465 }
1466}
1467EXPORT_SYMBOL(netif_device_detach);
1468
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001469/**
1470 * netif_device_attach - mark device as attached
1471 * @dev: network device
1472 *
1473 * Mark device as attached from system and restart if needed.
1474 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001475void netif_device_attach(struct net_device *dev)
1476{
1477 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1478 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001479 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001480 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001481 }
1482}
1483EXPORT_SYMBOL(netif_device_attach);
1484
Ben Hutchings6de329e2008-06-16 17:02:28 -07001485static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1486{
1487 return ((features & NETIF_F_GEN_CSUM) ||
1488 ((features & NETIF_F_IP_CSUM) &&
1489 protocol == htons(ETH_P_IP)) ||
1490 ((features & NETIF_F_IPV6_CSUM) &&
Yi Zou1c8dbcf2009-02-27 14:06:54 -08001491 protocol == htons(ETH_P_IPV6)) ||
1492 ((features & NETIF_F_FCOE_CRC) &&
1493 protocol == htons(ETH_P_FCOE)));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001494}
1495
1496static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1497{
1498 if (can_checksum_protocol(dev->features, skb->protocol))
1499 return true;
1500
1501 if (skb->protocol == htons(ETH_P_8021Q)) {
1502 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1503 if (can_checksum_protocol(dev->features & dev->vlan_features,
1504 veh->h_vlan_encapsulated_proto))
1505 return true;
1506 }
1507
1508 return false;
1509}
Denis Vlasenko56079432006-03-29 15:57:29 -08001510
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511/*
1512 * Invalidate hardware checksum when packet is to be mangled, and
1513 * complete checksum manually on outgoing path.
1514 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001515int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516{
Al Virod3bc23e2006-11-14 21:24:49 -08001517 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001518 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519
Patrick McHardy84fa7932006-08-29 16:44:56 -07001520 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001521 goto out_set_summed;
1522
1523 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001524 /* Let GSO fix up the checksum. */
1525 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 }
1527
Herbert Xua0308472007-10-15 01:47:15 -07001528 offset = skb->csum_start - skb_headroom(skb);
1529 BUG_ON(offset >= skb_headlen(skb));
1530 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1531
1532 offset += skb->csum_offset;
1533 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1534
1535 if (skb_cloned(skb) &&
1536 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1538 if (ret)
1539 goto out;
1540 }
1541
Herbert Xua0308472007-10-15 01:47:15 -07001542 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001543out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001545out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 return ret;
1547}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001548EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001550/**
1551 * skb_gso_segment - Perform segmentation on skb.
1552 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001553 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001554 *
1555 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001556 *
1557 * It may return NULL if the skb requires no segmentation. This is
1558 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001559 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001560struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001561{
1562 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1563 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001564 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001565 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001566
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001567 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001568 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001569 __skb_pull(skb, skb->mac_len);
1570
Herbert Xu67fd1a72009-01-19 16:26:44 -08001571 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1572 struct net_device *dev = skb->dev;
1573 struct ethtool_drvinfo info = {};
1574
1575 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1576 dev->ethtool_ops->get_drvinfo(dev, &info);
1577
1578 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1579 "ip_summed=%d",
1580 info.driver, dev ? dev->features : 0L,
1581 skb->sk ? skb->sk->sk_route_caps : 0L,
1582 skb->len, skb->data_len, skb->ip_summed);
1583
Herbert Xua430a432006-07-08 13:34:56 -07001584 if (skb_header_cloned(skb) &&
1585 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1586 return ERR_PTR(err);
1587 }
1588
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001589 rcu_read_lock();
Pavel Emelyanov82d8a862007-11-26 20:12:58 +08001590 list_for_each_entry_rcu(ptype,
1591 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001592 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001593 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001594 err = ptype->gso_send_check(skb);
1595 segs = ERR_PTR(err);
1596 if (err || skb_gso_ok(skb, features))
1597 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001598 __skb_push(skb, (skb->data -
1599 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001600 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001601 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001602 break;
1603 }
1604 }
1605 rcu_read_unlock();
1606
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001607 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001608
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001609 return segs;
1610}
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001611EXPORT_SYMBOL(skb_gso_segment);
1612
Herbert Xufb286bb2005-11-10 13:01:24 -08001613/* Take action when hardware reception checksum errors are detected. */
1614#ifdef CONFIG_BUG
1615void netdev_rx_csum_fault(struct net_device *dev)
1616{
1617 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001618 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001619 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001620 dump_stack();
1621 }
1622}
1623EXPORT_SYMBOL(netdev_rx_csum_fault);
1624#endif
1625
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626/* Actually, we should eliminate this check as soon as we know, that:
1627 * 1. IOMMU is present and allows to map all the memory.
1628 * 2. No high memory really exists on this machine.
1629 */
1630
1631static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1632{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001633#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 int i;
1635
1636 if (dev->features & NETIF_F_HIGHDMA)
1637 return 0;
1638
1639 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1640 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1641 return 1;
1642
Herbert Xu3d3a8532006-06-27 13:33:10 -07001643#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 return 0;
1645}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001647struct dev_gso_cb {
1648 void (*destructor)(struct sk_buff *skb);
1649};
1650
1651#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1652
1653static void dev_gso_skb_destructor(struct sk_buff *skb)
1654{
1655 struct dev_gso_cb *cb;
1656
1657 do {
1658 struct sk_buff *nskb = skb->next;
1659
1660 skb->next = nskb->next;
1661 nskb->next = NULL;
1662 kfree_skb(nskb);
1663 } while (skb->next);
1664
1665 cb = DEV_GSO_CB(skb);
1666 if (cb->destructor)
1667 cb->destructor(skb);
1668}
1669
1670/**
1671 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1672 * @skb: buffer to segment
1673 *
1674 * This function segments the given skb and stores the list of segments
1675 * in skb->next.
1676 */
1677static int dev_gso_segment(struct sk_buff *skb)
1678{
1679 struct net_device *dev = skb->dev;
1680 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001681 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1682 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001683
Herbert Xu576a30e2006-06-27 13:22:38 -07001684 segs = skb_gso_segment(skb, features);
1685
1686 /* Verifying header integrity only. */
1687 if (!segs)
1688 return 0;
1689
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001690 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001691 return PTR_ERR(segs);
1692
1693 skb->next = segs;
1694 DEV_GSO_CB(skb)->destructor = skb->destructor;
1695 skb->destructor = dev_gso_skb_destructor;
1696
1697 return 0;
1698}
1699
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001700int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1701 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001702{
Stephen Hemminger00829822008-11-20 20:14:53 -08001703 const struct net_device_ops *ops = dev->netdev_ops;
Patrick Ohlyac45f602009-02-12 05:03:37 +00001704 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08001705
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001706 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001707 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001708 dev_queue_xmit_nit(skb, dev);
1709
Herbert Xu576a30e2006-06-27 13:22:38 -07001710 if (netif_needs_gso(dev, skb)) {
1711 if (unlikely(dev_gso_segment(skb)))
1712 goto out_kfree_skb;
1713 if (skb->next)
1714 goto gso;
1715 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001716
Eric Dumazet93f154b2009-05-18 22:19:19 -07001717 /*
1718 * If device doesnt need skb->dst, release it right now while
1719 * its hot in this cpu cache
1720 */
Eric Dumazetadf30902009-06-02 05:19:30 +00001721 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1722 skb_dst_drop(skb);
1723
Patrick Ohlyac45f602009-02-12 05:03:37 +00001724 rc = ops->ndo_start_xmit(skb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001725 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07001726 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00001727 /*
1728 * TODO: if skb_orphan() was called by
1729 * dev->hard_start_xmit() (for example, the unmodified
1730 * igb driver does that; bnx2 doesn't), then
1731 * skb_tx_software_timestamp() will be unable to send
1732 * back the time stamp.
1733 *
1734 * How can this be prevented? Always create another
1735 * reference to the socket before calling
1736 * dev->hard_start_xmit()? Prevent that skb_orphan()
1737 * does anything in dev->hard_start_xmit() by clearing
1738 * the skb destructor before the call and restoring it
1739 * afterwards, then doing the skb_orphan() ourselves?
1740 */
Patrick Ohlyac45f602009-02-12 05:03:37 +00001741 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001742 }
1743
Herbert Xu576a30e2006-06-27 13:22:38 -07001744gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001745 do {
1746 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001747
1748 skb->next = nskb->next;
1749 nskb->next = NULL;
Stephen Hemminger00829822008-11-20 20:14:53 -08001750 rc = ops->ndo_start_xmit(nskb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001751 if (unlikely(rc != NETDEV_TX_OK)) {
Michael Chanf54d9e82006-06-25 23:57:04 -07001752 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001753 skb->next = nskb;
1754 return rc;
1755 }
Eric Dumazet08baf562009-05-25 22:58:01 -07001756 txq_trans_update(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001757 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001758 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001759 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001760
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001761 skb->destructor = DEV_GSO_CB(skb)->destructor;
1762
1763out_kfree_skb:
1764 kfree_skb(skb);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001765 return NETDEV_TX_OK;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001766}
1767
David S. Miller70192982009-01-27 16:34:47 -08001768static u32 skb_tx_hashrnd;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001769
Stephen Hemminger92477442009-03-21 13:39:26 -07001770u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
David S. Miller8f0f2222008-07-15 03:47:03 -07001771{
David S. Miller70192982009-01-27 16:34:47 -08001772 u32 hash;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001773
David S. Miller513de112009-05-03 14:43:10 -07001774 if (skb_rx_queue_recorded(skb)) {
1775 hash = skb_get_rx_queue(skb);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001776 while (unlikely(hash >= dev->real_num_tx_queues))
David S. Miller513de112009-05-03 14:43:10 -07001777 hash -= dev->real_num_tx_queues;
1778 return hash;
1779 }
Eric Dumazetec581f62009-05-01 09:05:06 -07001780
1781 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08001782 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07001783 else
David S. Miller70192982009-01-27 16:34:47 -08001784 hash = skb->protocol;
David S. Millerd5a9e242009-01-27 16:22:11 -08001785
David S. Miller70192982009-01-27 16:34:47 -08001786 hash = jhash_1word(hash, skb_tx_hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08001787
David S. Millerb6b2fed2008-07-21 09:48:06 -07001788 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001789}
Stephen Hemminger92477442009-03-21 13:39:26 -07001790EXPORT_SYMBOL(skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07001791
David S. Millere8a04642008-07-17 00:34:19 -07001792static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1793 struct sk_buff *skb)
1794{
Stephen Hemminger00829822008-11-20 20:14:53 -08001795 const struct net_device_ops *ops = dev->netdev_ops;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001796 u16 queue_index = 0;
1797
Stephen Hemminger00829822008-11-20 20:14:53 -08001798 if (ops->ndo_select_queue)
1799 queue_index = ops->ndo_select_queue(dev, skb);
David S. Miller8f0f2222008-07-15 03:47:03 -07001800 else if (dev->real_num_tx_queues > 1)
David S. Miller70192982009-01-27 16:34:47 -08001801 queue_index = skb_tx_hash(dev, skb);
David S. Millereae792b2008-07-15 03:03:33 -07001802
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001803 skb_set_queue_mapping(skb, queue_index);
1804 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001805}
1806
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00001807static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1808 struct net_device *dev,
1809 struct netdev_queue *txq)
1810{
1811 spinlock_t *root_lock = qdisc_lock(q);
1812 int rc;
1813
1814 spin_lock(root_lock);
1815 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1816 kfree_skb(skb);
1817 rc = NET_XMIT_DROP;
1818 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
1819 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
1820 /*
1821 * This is a work-conserving queue; there are no old skbs
1822 * waiting to be sent out; and the qdisc is not running -
1823 * xmit the skb directly.
1824 */
1825 __qdisc_update_bstats(q, skb->len);
1826 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
1827 __qdisc_run(q);
1828 else
1829 clear_bit(__QDISC_STATE_RUNNING, &q->state);
1830
1831 rc = NET_XMIT_SUCCESS;
1832 } else {
1833 rc = qdisc_enqueue_root(skb, q);
1834 qdisc_run(q);
1835 }
1836 spin_unlock(root_lock);
1837
1838 return rc;
1839}
1840
Dave Jonesd29f7492008-07-22 14:09:06 -07001841/**
1842 * dev_queue_xmit - transmit a buffer
1843 * @skb: buffer to transmit
1844 *
1845 * Queue a buffer for transmission to a network device. The caller must
1846 * have set the device and priority and built the buffer before calling
1847 * this function. The function can be called from an interrupt.
1848 *
1849 * A negative errno code is returned on a failure. A success does not
1850 * guarantee the frame will be transmitted as it may be dropped due
1851 * to congestion or traffic shaping.
1852 *
1853 * -----------------------------------------------------------------------------------
1854 * I notice this method can also return errors from the queue disciplines,
1855 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1856 * be positive.
1857 *
1858 * Regardless of the return value, the skb is consumed, so it is currently
1859 * difficult to retry a send to this method. (You can bump the ref count
1860 * before sending to hold a reference for retry if you are careful.)
1861 *
1862 * When calling this method, interrupts MUST be enabled. This is because
1863 * the BH enable code must have IRQs enabled so that it will not deadlock.
1864 * --BLG
1865 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866int dev_queue_xmit(struct sk_buff *skb)
1867{
1868 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07001869 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 struct Qdisc *q;
1871 int rc = -ENOMEM;
1872
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001873 /* GSO will handle the following emulations directly. */
1874 if (netif_needs_gso(dev, skb))
1875 goto gso;
1876
David S. Miller4cf704f2009-06-09 00:18:51 -07001877 if (skb_has_frags(skb) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001879 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 goto out_kfree_skb;
1881
1882 /* Fragmented skb is linearized if device does not support SG,
1883 * or if at least one of fragments is in highmem and device
1884 * does not support DMA from it.
1885 */
1886 if (skb_shinfo(skb)->nr_frags &&
1887 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001888 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 goto out_kfree_skb;
1890
1891 /* If packet is not checksummed and device does not support
1892 * checksumming for this protocol, complete checksumming here.
1893 */
Herbert Xu663ead32007-04-09 11:59:07 -07001894 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1895 skb_set_transport_header(skb, skb->csum_start -
1896 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001897 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1898 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07001899 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001901gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001902 /* Disable soft irqs for various locks below. Also
1903 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001905 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906
David S. Millereae792b2008-07-15 03:03:33 -07001907 txq = dev_pick_tx(dev, skb);
David S. Millerb0e1e642008-07-08 17:42:10 -07001908 q = rcu_dereference(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07001909
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001911 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912#endif
1913 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00001914 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07001915 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 }
1917
1918 /* The device has no queue. Common case for software devices:
1919 loopback, all the sorts of tunnels...
1920
Herbert Xu932ff272006-06-09 12:20:56 -07001921 Really, it is unlikely that netif_tx_lock protection is necessary
1922 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 counters.)
1924 However, it is possible, that they rely on protection
1925 made by us here.
1926
1927 Check this and shot the lock. It is not prone from deadlocks.
1928 Either shot noqueue qdisc, it is even simpler 8)
1929 */
1930 if (dev->flags & IFF_UP) {
1931 int cpu = smp_processor_id(); /* ok because BHs are off */
1932
David S. Millerc773e842008-07-08 23:13:53 -07001933 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934
David S. Millerc773e842008-07-08 23:13:53 -07001935 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001937 if (!netif_tx_queue_stopped(txq)) {
Krishna Kumar03a9a442009-08-29 20:21:36 +00001938 rc = NET_XMIT_SUCCESS;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001939 if (!dev_hard_start_xmit(skb, dev, txq)) {
David S. Millerc773e842008-07-08 23:13:53 -07001940 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 goto out;
1942 }
1943 }
David S. Millerc773e842008-07-08 23:13:53 -07001944 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 if (net_ratelimit())
1946 printk(KERN_CRIT "Virtual device %s asks to "
1947 "queue packet!\n", dev->name);
1948 } else {
1949 /* Recursion is detected! It is possible,
1950 * unfortunately */
1951 if (net_ratelimit())
1952 printk(KERN_CRIT "Dead loop on virtual device "
1953 "%s, fix it urgently!\n", dev->name);
1954 }
1955 }
1956
1957 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07001958 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959
1960out_kfree_skb:
1961 kfree_skb(skb);
1962 return rc;
1963out:
Herbert Xud4828d82006-06-22 02:28:18 -07001964 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 return rc;
1966}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001967EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968
1969
1970/*=======================================================================
1971 Receiver routines
1972 =======================================================================*/
1973
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07001974int netdev_max_backlog __read_mostly = 1000;
1975int netdev_budget __read_mostly = 300;
1976int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977
1978DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1979
1980
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981/**
1982 * netif_rx - post buffer to the network code
1983 * @skb: buffer to post
1984 *
1985 * This function receives a packet from a device driver and queues it for
1986 * the upper (protocol) levels to process. It always succeeds. The buffer
1987 * may be dropped during processing for congestion control or by the
1988 * protocol layers.
1989 *
1990 * return values:
1991 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 * NET_RX_DROP (packet was dropped)
1993 *
1994 */
1995
1996int netif_rx(struct sk_buff *skb)
1997{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 struct softnet_data *queue;
1999 unsigned long flags;
2000
2001 /* if netpoll wants it, pretend we never saw it */
2002 if (netpoll_rx(skb))
2003 return NET_RX_DROP;
2004
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002005 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002006 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007
2008 /*
2009 * The code is rearranged so that the path is the most
2010 * short when CPU is congested, but is still operating.
2011 */
2012 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 queue = &__get_cpu_var(softnet_data);
2014
2015 __get_cpu_var(netdev_rx_stat).total++;
2016 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
2017 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018enqueue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07002021 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022 }
2023
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002024 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 goto enqueue;
2026 }
2027
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 __get_cpu_var(netdev_rx_stat).dropped++;
2029 local_irq_restore(flags);
2030
2031 kfree_skb(skb);
2032 return NET_RX_DROP;
2033}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002034EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035
2036int netif_rx_ni(struct sk_buff *skb)
2037{
2038 int err;
2039
2040 preempt_disable();
2041 err = netif_rx(skb);
2042 if (local_softirq_pending())
2043 do_softirq();
2044 preempt_enable();
2045
2046 return err;
2047}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048EXPORT_SYMBOL(netif_rx_ni);
2049
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050static void net_tx_action(struct softirq_action *h)
2051{
2052 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2053
2054 if (sd->completion_queue) {
2055 struct sk_buff *clist;
2056
2057 local_irq_disable();
2058 clist = sd->completion_queue;
2059 sd->completion_queue = NULL;
2060 local_irq_enable();
2061
2062 while (clist) {
2063 struct sk_buff *skb = clist;
2064 clist = clist->next;
2065
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002066 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 __kfree_skb(skb);
2068 }
2069 }
2070
2071 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07002072 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073
2074 local_irq_disable();
2075 head = sd->output_queue;
2076 sd->output_queue = NULL;
2077 local_irq_enable();
2078
2079 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07002080 struct Qdisc *q = head;
2081 spinlock_t *root_lock;
2082
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 head = head->next_sched;
2084
David S. Miller5fb66222008-08-02 20:02:43 -07002085 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07002086 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002087 smp_mb__before_clear_bit();
2088 clear_bit(__QDISC_STATE_SCHED,
2089 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07002090 qdisc_run(q);
2091 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 } else {
David S. Miller195648b2008-08-19 04:00:36 -07002093 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002094 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07002095 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002096 } else {
2097 smp_mb__before_clear_bit();
2098 clear_bit(__QDISC_STATE_SCHED,
2099 &q->state);
2100 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 }
2102 }
2103 }
2104}
2105
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002106static inline int deliver_skb(struct sk_buff *skb,
2107 struct packet_type *pt_prev,
2108 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109{
2110 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002111 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112}
2113
2114#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Michał Mirosławda678292009-06-05 05:35:28 +00002115
2116#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2117/* This hook is defined here for ATM LANE */
2118int (*br_fdb_test_addr_hook)(struct net_device *dev,
2119 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002120EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002121#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122
Stephen Hemminger6229e362007-03-21 13:38:47 -07002123/*
2124 * If bridge module is loaded call bridging hook.
2125 * returns NULL if packet was consumed.
2126 */
2127struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2128 struct sk_buff *skb) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002129EXPORT_SYMBOL_GPL(br_handle_frame_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002130
Stephen Hemminger6229e362007-03-21 13:38:47 -07002131static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2132 struct packet_type **pt_prev, int *ret,
2133 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134{
2135 struct net_bridge_port *port;
2136
Stephen Hemminger6229e362007-03-21 13:38:47 -07002137 if (skb->pkt_type == PACKET_LOOPBACK ||
2138 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2139 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140
2141 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002142 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002144 }
2145
Stephen Hemminger6229e362007-03-21 13:38:47 -07002146 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147}
2148#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002149#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150#endif
2151
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002152#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2153struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2154EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2155
2156static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2157 struct packet_type **pt_prev,
2158 int *ret,
2159 struct net_device *orig_dev)
2160{
2161 if (skb->dev->macvlan_port == NULL)
2162 return skb;
2163
2164 if (*pt_prev) {
2165 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2166 *pt_prev = NULL;
2167 }
2168 return macvlan_handle_frame_hook(skb);
2169}
2170#else
2171#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2172#endif
2173
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174#ifdef CONFIG_NET_CLS_ACT
2175/* TODO: Maybe we should just force sch_ingress to be compiled in
2176 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2177 * a compare and 2 stores extra right now if we dont have it on
2178 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002179 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 * the ingress scheduler, you just cant add policies on ingress.
2181 *
2182 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002183static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002186 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002187 struct netdev_queue *rxq;
2188 int result = TC_ACT_OK;
2189 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002190
Herbert Xuf697c3e2007-10-14 00:38:47 -07002191 if (MAX_RED_LOOP < ttl++) {
2192 printk(KERN_WARNING
2193 "Redir loop detected Dropping packet (%d->%d)\n",
2194 skb->iif, dev->ifindex);
2195 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 }
2197
Herbert Xuf697c3e2007-10-14 00:38:47 -07002198 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2199 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2200
David S. Miller555353c2008-07-08 17:33:13 -07002201 rxq = &dev->rx_queue;
2202
David S. Miller83874002008-07-17 00:53:03 -07002203 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002204 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002205 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002206 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2207 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002208 spin_unlock(qdisc_lock(q));
2209 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002210
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 return result;
2212}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002213
2214static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2215 struct packet_type **pt_prev,
2216 int *ret, struct net_device *orig_dev)
2217{
David S. Miller8d50b532008-07-30 02:37:46 -07002218 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002219 goto out;
2220
2221 if (*pt_prev) {
2222 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2223 *pt_prev = NULL;
2224 } else {
2225 /* Huh? Why does turning on AF_PACKET affect this? */
2226 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2227 }
2228
2229 switch (ing_filter(skb)) {
2230 case TC_ACT_SHOT:
2231 case TC_ACT_STOLEN:
2232 kfree_skb(skb);
2233 return NULL;
2234 }
2235
2236out:
2237 skb->tc_verd = 0;
2238 return skb;
2239}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240#endif
2241
Patrick McHardybc1d0412008-07-14 22:49:30 -07002242/*
2243 * netif_nit_deliver - deliver received packets to network taps
2244 * @skb: buffer
2245 *
2246 * This function is used to deliver incoming packets to network
2247 * taps. It should be used when the normal netif_receive_skb path
2248 * is bypassed, for example because of VLAN acceleration.
2249 */
2250void netif_nit_deliver(struct sk_buff *skb)
2251{
2252 struct packet_type *ptype;
2253
2254 if (list_empty(&ptype_all))
2255 return;
2256
2257 skb_reset_network_header(skb);
2258 skb_reset_transport_header(skb);
2259 skb->mac_len = skb->network_header - skb->mac_header;
2260
2261 rcu_read_lock();
2262 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2263 if (!ptype->dev || ptype->dev == skb->dev)
2264 deliver_skb(skb, ptype, skb->dev);
2265 }
2266 rcu_read_unlock();
2267}
2268
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002269/**
2270 * netif_receive_skb - process receive buffer from network
2271 * @skb: buffer to process
2272 *
2273 * netif_receive_skb() is the main receive data processing function.
2274 * It always succeeds. The buffer may be dropped during processing
2275 * for congestion control or by the protocol layers.
2276 *
2277 * This function may only be called from softirq context and interrupts
2278 * should be enabled.
2279 *
2280 * Return values (usually ignored):
2281 * NET_RX_SUCCESS: no congestion
2282 * NET_RX_DROP: packet was dropped
2283 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284int netif_receive_skb(struct sk_buff *skb)
2285{
2286 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002287 struct net_device *orig_dev;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002288 struct net_device *null_or_orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002290 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07002292 if (!skb->tstamp.tv64)
2293 net_timestamp(skb);
2294
Patrick McHardy9b22ea52008-11-04 14:49:57 -08002295 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
2296 return NET_RX_SUCCESS;
2297
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002299 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 return NET_RX_DROP;
2301
Patrick McHardyc01003c2007-03-29 11:46:52 -07002302 if (!skb->iif)
2303 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002304
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002305 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002306 orig_dev = skb->dev;
2307 if (orig_dev->master) {
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002308 if (skb_bond_should_drop(skb))
2309 null_or_orig = orig_dev; /* deliver only exact match */
2310 else
2311 skb->dev = orig_dev->master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002312 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002313
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 __get_cpu_var(netdev_rx_stat).total++;
2315
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002316 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002317 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002318 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319
2320 pt_prev = NULL;
2321
2322 rcu_read_lock();
2323
2324#ifdef CONFIG_NET_CLS_ACT
2325 if (skb->tc_verd & TC_NCLS) {
2326 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2327 goto ncls;
2328 }
2329#endif
2330
2331 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002332 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2333 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002334 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002335 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 pt_prev = ptype;
2337 }
2338 }
2339
2340#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002341 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2342 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344ncls:
2345#endif
2346
Stephen Hemminger6229e362007-03-21 13:38:47 -07002347 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2348 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002350 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2351 if (!skb)
2352 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353
2354 type = skb->protocol;
Pavel Emelyanov82d8a862007-11-26 20:12:58 +08002355 list_for_each_entry_rcu(ptype,
2356 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357 if (ptype->type == type &&
Joe Eykholtf9823072008-07-02 18:22:02 -07002358 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2359 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002360 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002361 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 pt_prev = ptype;
2363 }
2364 }
2365
2366 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002367 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368 } else {
2369 kfree_skb(skb);
2370 /* Jamal, now you will not able to escape explaining
2371 * me how you were going to use this. :-)
2372 */
2373 ret = NET_RX_DROP;
2374 }
2375
2376out:
2377 rcu_read_unlock();
2378 return ret;
2379}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002380EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002382/* Network device is going away, flush any packets still pending */
2383static void flush_backlog(void *arg)
2384{
2385 struct net_device *dev = arg;
2386 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2387 struct sk_buff *skb, *tmp;
2388
2389 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2390 if (skb->dev == dev) {
2391 __skb_unlink(skb, &queue->input_pkt_queue);
2392 kfree_skb(skb);
2393 }
2394}
2395
Herbert Xud565b0a2008-12-15 23:38:52 -08002396static int napi_gro_complete(struct sk_buff *skb)
2397{
2398 struct packet_type *ptype;
2399 __be16 type = skb->protocol;
2400 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2401 int err = -ENOENT;
2402
Herbert Xufc59f9a2009-04-14 15:11:06 -07002403 if (NAPI_GRO_CB(skb)->count == 1) {
2404 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002405 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07002406 }
Herbert Xud565b0a2008-12-15 23:38:52 -08002407
2408 rcu_read_lock();
2409 list_for_each_entry_rcu(ptype, head, list) {
2410 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2411 continue;
2412
2413 err = ptype->gro_complete(skb);
2414 break;
2415 }
2416 rcu_read_unlock();
2417
2418 if (err) {
2419 WARN_ON(&ptype->list == head);
2420 kfree_skb(skb);
2421 return NET_RX_SUCCESS;
2422 }
2423
2424out:
Herbert Xud565b0a2008-12-15 23:38:52 -08002425 return netif_receive_skb(skb);
2426}
2427
2428void napi_gro_flush(struct napi_struct *napi)
2429{
2430 struct sk_buff *skb, *next;
2431
2432 for (skb = napi->gro_list; skb; skb = next) {
2433 next = skb->next;
2434 skb->next = NULL;
2435 napi_gro_complete(skb);
2436 }
2437
Herbert Xu4ae55442009-02-08 18:00:36 +00002438 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002439 napi->gro_list = NULL;
2440}
2441EXPORT_SYMBOL(napi_gro_flush);
2442
Herbert Xu96e93ea2009-01-06 10:49:34 -08002443int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08002444{
2445 struct sk_buff **pp = NULL;
2446 struct packet_type *ptype;
2447 __be16 type = skb->protocol;
2448 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
Herbert Xu0da2afd2008-12-26 14:57:42 -08002449 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08002450 int mac_len;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002451 int ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002452
2453 if (!(skb->dev->features & NETIF_F_GRO))
2454 goto normal;
2455
David S. Miller4cf704f2009-06-09 00:18:51 -07002456 if (skb_is_gso(skb) || skb_has_frags(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08002457 goto normal;
2458
Herbert Xud565b0a2008-12-15 23:38:52 -08002459 rcu_read_lock();
2460 list_for_each_entry_rcu(ptype, head, list) {
Herbert Xud565b0a2008-12-15 23:38:52 -08002461 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2462 continue;
2463
Herbert Xu86911732009-01-29 14:19:50 +00002464 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08002465 mac_len = skb->network_header - skb->mac_header;
2466 skb->mac_len = mac_len;
2467 NAPI_GRO_CB(skb)->same_flow = 0;
2468 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08002469 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002470
Herbert Xud565b0a2008-12-15 23:38:52 -08002471 pp = ptype->gro_receive(&napi->gro_list, skb);
2472 break;
2473 }
2474 rcu_read_unlock();
2475
2476 if (&ptype->list == head)
2477 goto normal;
2478
Herbert Xu0da2afd2008-12-26 14:57:42 -08002479 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002480 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd2008-12-26 14:57:42 -08002481
Herbert Xud565b0a2008-12-15 23:38:52 -08002482 if (pp) {
2483 struct sk_buff *nskb = *pp;
2484
2485 *pp = nskb->next;
2486 nskb->next = NULL;
2487 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00002488 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08002489 }
2490
Herbert Xu0da2afd2008-12-26 14:57:42 -08002491 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08002492 goto ok;
2493
Herbert Xu4ae55442009-02-08 18:00:36 +00002494 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08002495 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08002496
Herbert Xu4ae55442009-02-08 18:00:36 +00002497 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08002498 NAPI_GRO_CB(skb)->count = 1;
Herbert Xu86911732009-01-29 14:19:50 +00002499 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002500 skb->next = napi->gro_list;
2501 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002502 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08002503
Herbert Xuad0f9902009-02-01 01:24:55 -08002504pull:
Herbert Xucb189782009-05-26 18:50:31 +00002505 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2506 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2507
2508 BUG_ON(skb->end - skb->tail < grow);
2509
2510 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2511
2512 skb->tail += grow;
2513 skb->data_len -= grow;
2514
2515 skb_shinfo(skb)->frags[0].page_offset += grow;
2516 skb_shinfo(skb)->frags[0].size -= grow;
2517
2518 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2519 put_page(skb_shinfo(skb)->frags[0].page);
2520 memmove(skb_shinfo(skb)->frags,
2521 skb_shinfo(skb)->frags + 1,
2522 --skb_shinfo(skb)->nr_frags);
2523 }
Herbert Xuad0f9902009-02-01 01:24:55 -08002524 }
2525
Herbert Xud565b0a2008-12-15 23:38:52 -08002526ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002527 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002528
2529normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08002530 ret = GRO_NORMAL;
2531 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08002532}
Herbert Xu96e93ea2009-01-06 10:49:34 -08002533EXPORT_SYMBOL(dev_gro_receive);
2534
2535static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2536{
2537 struct sk_buff *p;
2538
Herbert Xud1c76af2009-03-16 10:50:02 -07002539 if (netpoll_rx_on(skb))
2540 return GRO_NORMAL;
2541
Herbert Xu96e93ea2009-01-06 10:49:34 -08002542 for (p = napi->gro_list; p; p = p->next) {
Stephen Hemmingerf2bde732009-04-01 11:20:20 +00002543 NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
2544 && !compare_ether_header(skb_mac_header(p),
2545 skb_gro_mac_header(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08002546 NAPI_GRO_CB(p)->flush = 0;
2547 }
2548
2549 return dev_gro_receive(napi, skb);
2550}
Herbert Xu5d38a072009-01-04 16:13:40 -08002551
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002552int napi_skb_finish(int ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08002553{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002554 int err = NET_RX_SUCCESS;
2555
2556 switch (ret) {
2557 case GRO_NORMAL:
Herbert Xu5d38a072009-01-04 16:13:40 -08002558 return netif_receive_skb(skb);
2559
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002560 case GRO_DROP:
2561 err = NET_RX_DROP;
2562 /* fall through */
2563
2564 case GRO_MERGED_FREE:
Herbert Xu5d38a072009-01-04 16:13:40 -08002565 kfree_skb(skb);
2566 break;
2567 }
2568
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002569 return err;
2570}
2571EXPORT_SYMBOL(napi_skb_finish);
2572
Herbert Xu78a478d2009-05-26 18:50:21 +00002573void skb_gro_reset_offset(struct sk_buff *skb)
2574{
2575 NAPI_GRO_CB(skb)->data_offset = 0;
2576 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00002577 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00002578
Herbert Xu78d3fd02009-05-26 18:50:23 +00002579 if (skb->mac_header == skb->tail &&
Herbert Xu74895942009-05-26 18:50:27 +00002580 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
Herbert Xu78a478d2009-05-26 18:50:21 +00002581 NAPI_GRO_CB(skb)->frag0 =
2582 page_address(skb_shinfo(skb)->frags[0].page) +
2583 skb_shinfo(skb)->frags[0].page_offset;
Herbert Xu74895942009-05-26 18:50:27 +00002584 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2585 }
Herbert Xu78a478d2009-05-26 18:50:21 +00002586}
2587EXPORT_SYMBOL(skb_gro_reset_offset);
2588
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002589int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2590{
Herbert Xu86911732009-01-29 14:19:50 +00002591 skb_gro_reset_offset(skb);
2592
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002593 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002594}
2595EXPORT_SYMBOL(napi_gro_receive);
2596
Herbert Xu96e93ea2009-01-06 10:49:34 -08002597void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2598{
Herbert Xu96e93ea2009-01-06 10:49:34 -08002599 __skb_pull(skb, skb_headlen(skb));
2600 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2601
2602 napi->skb = skb;
2603}
2604EXPORT_SYMBOL(napi_reuse_skb);
2605
Herbert Xu76620aa2009-04-16 02:02:07 -07002606struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08002607{
2608 struct net_device *dev = napi->dev;
2609 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002610
2611 if (!skb) {
2612 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
2613 if (!skb)
2614 goto out;
2615
2616 skb_reserve(skb, NET_IP_ALIGN);
Herbert Xu76620aa2009-04-16 02:02:07 -07002617
2618 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002619 }
2620
Herbert Xu96e93ea2009-01-06 10:49:34 -08002621out:
2622 return skb;
2623}
Herbert Xu76620aa2009-04-16 02:02:07 -07002624EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002625
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002626int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2627{
2628 int err = NET_RX_SUCCESS;
2629
2630 switch (ret) {
2631 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00002632 case GRO_HELD:
Herbert Xu86911732009-01-29 14:19:50 +00002633 skb->protocol = eth_type_trans(skb, napi->dev);
2634
2635 if (ret == GRO_NORMAL)
2636 return netif_receive_skb(skb);
2637
2638 skb_gro_pull(skb, -ETH_HLEN);
2639 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002640
2641 case GRO_DROP:
2642 err = NET_RX_DROP;
2643 /* fall through */
2644
2645 case GRO_MERGED_FREE:
2646 napi_reuse_skb(napi, skb);
2647 break;
2648 }
2649
2650 return err;
2651}
2652EXPORT_SYMBOL(napi_frags_finish);
2653
Herbert Xu76620aa2009-04-16 02:02:07 -07002654struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002655{
Herbert Xu76620aa2009-04-16 02:02:07 -07002656 struct sk_buff *skb = napi->skb;
2657 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00002658 unsigned int hlen;
2659 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07002660
2661 napi->skb = NULL;
2662
2663 skb_reset_mac_header(skb);
2664 skb_gro_reset_offset(skb);
2665
Herbert Xua5b1cf22009-05-26 18:50:28 +00002666 off = skb_gro_offset(skb);
2667 hlen = off + sizeof(*eth);
2668 eth = skb_gro_header_fast(skb, off);
2669 if (skb_gro_header_hard(skb, hlen)) {
2670 eth = skb_gro_header_slow(skb, hlen, off);
2671 if (unlikely(!eth)) {
2672 napi_reuse_skb(napi, skb);
2673 skb = NULL;
2674 goto out;
2675 }
Herbert Xu76620aa2009-04-16 02:02:07 -07002676 }
2677
2678 skb_gro_pull(skb, sizeof(*eth));
2679
2680 /*
2681 * This works because the only protocols we care about don't require
2682 * special handling. We'll fix it up properly at the end.
2683 */
2684 skb->protocol = eth->h_proto;
2685
2686out:
2687 return skb;
2688}
2689EXPORT_SYMBOL(napi_frags_skb);
2690
2691int napi_gro_frags(struct napi_struct *napi)
2692{
2693 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002694
2695 if (!skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002696 return NET_RX_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08002697
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002698 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08002699}
2700EXPORT_SYMBOL(napi_gro_frags);
2701
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002702static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703{
2704 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2706 unsigned long start_time = jiffies;
2707
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002708 napi->weight = weight_p;
2709 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711
2712 local_irq_disable();
2713 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002714 if (!skb) {
Herbert Xu8f1ead22009-03-26 00:59:10 -07002715 __napi_complete(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002716 local_irq_enable();
Herbert Xu8f1ead22009-03-26 00:59:10 -07002717 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002718 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719 local_irq_enable();
2720
Herbert Xu8f1ead22009-03-26 00:59:10 -07002721 netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002722 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002724 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725}
2726
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002727/**
2728 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002729 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002730 *
2731 * The entry's receive function will be scheduled to run
2732 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002733void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002734{
2735 unsigned long flags;
2736
2737 local_irq_save(flags);
2738 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2739 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2740 local_irq_restore(flags);
2741}
2742EXPORT_SYMBOL(__napi_schedule);
2743
Herbert Xud565b0a2008-12-15 23:38:52 -08002744void __napi_complete(struct napi_struct *n)
2745{
2746 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2747 BUG_ON(n->gro_list);
2748
2749 list_del(&n->poll_list);
2750 smp_mb__before_clear_bit();
2751 clear_bit(NAPI_STATE_SCHED, &n->state);
2752}
2753EXPORT_SYMBOL(__napi_complete);
2754
2755void napi_complete(struct napi_struct *n)
2756{
2757 unsigned long flags;
2758
2759 /*
2760 * don't let napi dequeue from the cpu poll list
2761 * just in case its running on a different cpu
2762 */
2763 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2764 return;
2765
2766 napi_gro_flush(n);
2767 local_irq_save(flags);
2768 __napi_complete(n);
2769 local_irq_restore(flags);
2770}
2771EXPORT_SYMBOL(napi_complete);
2772
2773void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2774 int (*poll)(struct napi_struct *, int), int weight)
2775{
2776 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00002777 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002778 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08002779 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08002780 napi->poll = poll;
2781 napi->weight = weight;
2782 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08002783 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08002784#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08002785 spin_lock_init(&napi->poll_lock);
2786 napi->poll_owner = -1;
2787#endif
2788 set_bit(NAPI_STATE_SCHED, &napi->state);
2789}
2790EXPORT_SYMBOL(netif_napi_add);
2791
2792void netif_napi_del(struct napi_struct *napi)
2793{
2794 struct sk_buff *skb, *next;
2795
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08002796 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07002797 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08002798
2799 for (skb = napi->gro_list; skb; skb = next) {
2800 next = skb->next;
2801 skb->next = NULL;
2802 kfree_skb(skb);
2803 }
2804
2805 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00002806 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002807}
2808EXPORT_SYMBOL(netif_napi_del);
2809
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002810
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811static void net_rx_action(struct softirq_action *h)
2812{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002813 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002814 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002815 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002816 void *have;
2817
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 local_irq_disable();
2819
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002820 while (!list_empty(list)) {
2821 struct napi_struct *n;
2822 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002824 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002825 * Allow this to run for 2 jiffies since which will allow
2826 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002827 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002828 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 goto softnet_break;
2830
2831 local_irq_enable();
2832
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002833 /* Even though interrupts have been re-enabled, this
2834 * access is safe because interrupts can only add new
2835 * entries to the tail of this list, and only ->poll()
2836 * calls can remove this head entry from the list.
2837 */
2838 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002840 have = netpoll_poll_lock(n);
2841
2842 weight = n->weight;
2843
David S. Miller0a7606c2007-10-29 21:28:47 -07002844 /* This NAPI_STATE_SCHED test is for avoiding a race
2845 * with netpoll's poll_napi(). Only the entity which
2846 * obtains the lock and sees NAPI_STATE_SCHED set will
2847 * actually make the ->poll() call. Therefore we avoid
2848 * accidently calling ->poll() when NAPI is not scheduled.
2849 */
2850 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00002851 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07002852 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00002853 trace_napi_poll(n);
2854 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002855
2856 WARN_ON_ONCE(work > weight);
2857
2858 budget -= work;
2859
2860 local_irq_disable();
2861
2862 /* Drivers must not modify the NAPI state if they
2863 * consume the entire weight. In such cases this code
2864 * still "owns" the NAPI instance and therefore can
2865 * move the instance around on the list at-will.
2866 */
David S. Millerfed17f32008-01-07 21:00:40 -08002867 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07002868 if (unlikely(napi_disable_pending(n))) {
2869 local_irq_enable();
2870 napi_complete(n);
2871 local_irq_disable();
2872 } else
David S. Millerfed17f32008-01-07 21:00:40 -08002873 list_move_tail(&n->poll_list, list);
2874 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002875
2876 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877 }
2878out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002879 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002880
Chris Leechdb217332006-06-17 21:24:58 -07002881#ifdef CONFIG_NET_DMA
2882 /*
2883 * There may not be any more sk_buffs coming right now, so push
2884 * any pending DMA copies to hardware
2885 */
Dan Williams2ba05622009-01-06 11:38:14 -07002886 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07002887#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002888
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 return;
2890
2891softnet_break:
2892 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2893 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2894 goto out;
2895}
2896
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002897static gifconf_func_t *gifconf_list[NPROTO];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898
2899/**
2900 * register_gifconf - register a SIOCGIF handler
2901 * @family: Address family
2902 * @gifconf: Function handler
2903 *
2904 * Register protocol dependent address dumping routines. The handler
2905 * that is passed must not be freed or reused until it has been replaced
2906 * by another handler.
2907 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002908int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909{
2910 if (family >= NPROTO)
2911 return -EINVAL;
2912 gifconf_list[family] = gifconf;
2913 return 0;
2914}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002915EXPORT_SYMBOL(register_gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916
2917
2918/*
2919 * Map an interface index to its name (SIOCGIFNAME)
2920 */
2921
2922/*
2923 * We need this ioctl for efficient implementation of the
2924 * if_indextoname() function required by the IPv6 API. Without
2925 * it, we would have to search all the interfaces to find a
2926 * match. --pb
2927 */
2928
Eric W. Biederman881d9662007-09-17 11:56:21 -07002929static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930{
2931 struct net_device *dev;
2932 struct ifreq ifr;
2933
2934 /*
2935 * Fetch the caller's info block.
2936 */
2937
2938 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2939 return -EFAULT;
2940
2941 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07002942 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943 if (!dev) {
2944 read_unlock(&dev_base_lock);
2945 return -ENODEV;
2946 }
2947
2948 strcpy(ifr.ifr_name, dev->name);
2949 read_unlock(&dev_base_lock);
2950
2951 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2952 return -EFAULT;
2953 return 0;
2954}
2955
2956/*
2957 * Perform a SIOCGIFCONF call. This structure will change
2958 * size eventually, and there is nothing I can do about it.
2959 * Thus we will need a 'compatibility mode'.
2960 */
2961
Eric W. Biederman881d9662007-09-17 11:56:21 -07002962static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963{
2964 struct ifconf ifc;
2965 struct net_device *dev;
2966 char __user *pos;
2967 int len;
2968 int total;
2969 int i;
2970
2971 /*
2972 * Fetch the caller's info block.
2973 */
2974
2975 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2976 return -EFAULT;
2977
2978 pos = ifc.ifc_buf;
2979 len = ifc.ifc_len;
2980
2981 /*
2982 * Loop over the interfaces, and write an info block for each.
2983 */
2984
2985 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002986 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987 for (i = 0; i < NPROTO; i++) {
2988 if (gifconf_list[i]) {
2989 int done;
2990 if (!pos)
2991 done = gifconf_list[i](dev, NULL, 0);
2992 else
2993 done = gifconf_list[i](dev, pos + total,
2994 len - total);
2995 if (done < 0)
2996 return -EFAULT;
2997 total += done;
2998 }
2999 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003000 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001
3002 /*
3003 * All done. Write the updated control block back to the caller.
3004 */
3005 ifc.ifc_len = total;
3006
3007 /*
3008 * Both BSD and Solaris return 0 here, so we do too.
3009 */
3010 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3011}
3012
3013#ifdef CONFIG_PROC_FS
3014/*
3015 * This is invoked by the /proc filesystem handler to display a device
3016 * in detail.
3017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003018void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08003019 __acquires(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020{
Denis V. Luneve372c412007-11-19 22:31:54 -08003021 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07003022 loff_t off;
3023 struct net_device *dev;
3024
Linus Torvalds1da177e2005-04-16 15:20:36 -07003025 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07003026 if (!*pos)
3027 return SEQ_START_TOKEN;
3028
3029 off = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003030 for_each_netdev(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07003031 if (off++ == *pos)
3032 return dev;
3033
3034 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035}
3036
3037void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3038{
Denis V. Luneve372c412007-11-19 22:31:54 -08003039 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040 ++*pos;
Pavel Emelianov7562f872007-05-03 15:13:45 -07003041 return v == SEQ_START_TOKEN ?
Eric W. Biederman881d9662007-09-17 11:56:21 -07003042 first_net_device(net) : next_net_device((struct net_device *)v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043}
3044
3045void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08003046 __releases(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047{
3048 read_unlock(&dev_base_lock);
3049}
3050
3051static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3052{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08003053 const struct net_device_stats *stats = dev_get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054
Rusty Russell5a1b5892007-04-28 21:04:03 -07003055 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3056 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3057 dev->name, stats->rx_bytes, stats->rx_packets,
3058 stats->rx_errors,
3059 stats->rx_dropped + stats->rx_missed_errors,
3060 stats->rx_fifo_errors,
3061 stats->rx_length_errors + stats->rx_over_errors +
3062 stats->rx_crc_errors + stats->rx_frame_errors,
3063 stats->rx_compressed, stats->multicast,
3064 stats->tx_bytes, stats->tx_packets,
3065 stats->tx_errors, stats->tx_dropped,
3066 stats->tx_fifo_errors, stats->collisions,
3067 stats->tx_carrier_errors +
3068 stats->tx_aborted_errors +
3069 stats->tx_window_errors +
3070 stats->tx_heartbeat_errors,
3071 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003072}
3073
3074/*
3075 * Called from the PROCfs module. This now uses the new arbitrary sized
3076 * /proc/net interface to create /proc/net/dev
3077 */
3078static int dev_seq_show(struct seq_file *seq, void *v)
3079{
3080 if (v == SEQ_START_TOKEN)
3081 seq_puts(seq, "Inter-| Receive "
3082 " | Transmit\n"
3083 " face |bytes packets errs drop fifo frame "
3084 "compressed multicast|bytes packets errs "
3085 "drop fifo colls carrier compressed\n");
3086 else
3087 dev_seq_printf_stats(seq, v);
3088 return 0;
3089}
3090
3091static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3092{
3093 struct netif_rx_stats *rc = NULL;
3094
Mike Travis0c0b0ac2008-05-02 16:43:08 -07003095 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003096 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003097 rc = &per_cpu(netdev_rx_stat, *pos);
3098 break;
3099 } else
3100 ++*pos;
3101 return rc;
3102}
3103
3104static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3105{
3106 return softnet_get_online(pos);
3107}
3108
3109static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3110{
3111 ++*pos;
3112 return softnet_get_online(pos);
3113}
3114
3115static void softnet_seq_stop(struct seq_file *seq, void *v)
3116{
3117}
3118
3119static int softnet_seq_show(struct seq_file *seq, void *v)
3120{
3121 struct netif_rx_stats *s = v;
3122
3123 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07003124 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07003125 0, 0, 0, 0, /* was fastroute */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003126 s->cpu_collision);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003127 return 0;
3128}
3129
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003130static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131 .start = dev_seq_start,
3132 .next = dev_seq_next,
3133 .stop = dev_seq_stop,
3134 .show = dev_seq_show,
3135};
3136
3137static int dev_seq_open(struct inode *inode, struct file *file)
3138{
Denis V. Luneve372c412007-11-19 22:31:54 -08003139 return seq_open_net(inode, file, &dev_seq_ops,
3140 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003141}
3142
Arjan van de Ven9a321442007-02-12 00:55:35 -08003143static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144 .owner = THIS_MODULE,
3145 .open = dev_seq_open,
3146 .read = seq_read,
3147 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003148 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149};
3150
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003151static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003152 .start = softnet_seq_start,
3153 .next = softnet_seq_next,
3154 .stop = softnet_seq_stop,
3155 .show = softnet_seq_show,
3156};
3157
3158static int softnet_seq_open(struct inode *inode, struct file *file)
3159{
3160 return seq_open(file, &softnet_seq_ops);
3161}
3162
Arjan van de Ven9a321442007-02-12 00:55:35 -08003163static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164 .owner = THIS_MODULE,
3165 .open = softnet_seq_open,
3166 .read = seq_read,
3167 .llseek = seq_lseek,
3168 .release = seq_release,
3169};
3170
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003171static void *ptype_get_idx(loff_t pos)
3172{
3173 struct packet_type *pt = NULL;
3174 loff_t i = 0;
3175 int t;
3176
3177 list_for_each_entry_rcu(pt, &ptype_all, list) {
3178 if (i == pos)
3179 return pt;
3180 ++i;
3181 }
3182
Pavel Emelyanov82d8a862007-11-26 20:12:58 +08003183 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003184 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3185 if (i == pos)
3186 return pt;
3187 ++i;
3188 }
3189 }
3190 return NULL;
3191}
3192
3193static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003194 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003195{
3196 rcu_read_lock();
3197 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3198}
3199
3200static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3201{
3202 struct packet_type *pt;
3203 struct list_head *nxt;
3204 int hash;
3205
3206 ++*pos;
3207 if (v == SEQ_START_TOKEN)
3208 return ptype_get_idx(0);
3209
3210 pt = v;
3211 nxt = pt->list.next;
3212 if (pt->type == htons(ETH_P_ALL)) {
3213 if (nxt != &ptype_all)
3214 goto found;
3215 hash = 0;
3216 nxt = ptype_base[0].next;
3217 } else
Pavel Emelyanov82d8a862007-11-26 20:12:58 +08003218 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003219
3220 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a862007-11-26 20:12:58 +08003221 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003222 return NULL;
3223 nxt = ptype_base[hash].next;
3224 }
3225found:
3226 return list_entry(nxt, struct packet_type, list);
3227}
3228
3229static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003230 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003231{
3232 rcu_read_unlock();
3233}
3234
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003235static int ptype_seq_show(struct seq_file *seq, void *v)
3236{
3237 struct packet_type *pt = v;
3238
3239 if (v == SEQ_START_TOKEN)
3240 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003241 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003242 if (pt->type == htons(ETH_P_ALL))
3243 seq_puts(seq, "ALL ");
3244 else
3245 seq_printf(seq, "%04x", ntohs(pt->type));
3246
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08003247 seq_printf(seq, " %-8s %pF\n",
3248 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003249 }
3250
3251 return 0;
3252}
3253
3254static const struct seq_operations ptype_seq_ops = {
3255 .start = ptype_seq_start,
3256 .next = ptype_seq_next,
3257 .stop = ptype_seq_stop,
3258 .show = ptype_seq_show,
3259};
3260
3261static int ptype_seq_open(struct inode *inode, struct file *file)
3262{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003263 return seq_open_net(inode, file, &ptype_seq_ops,
3264 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003265}
3266
3267static const struct file_operations ptype_seq_fops = {
3268 .owner = THIS_MODULE,
3269 .open = ptype_seq_open,
3270 .read = seq_read,
3271 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003272 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003273};
3274
3275
Pavel Emelyanov46650792007-10-08 20:38:39 -07003276static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277{
3278 int rc = -ENOMEM;
3279
Eric W. Biederman881d9662007-09-17 11:56:21 -07003280 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003282 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003284 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003285 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003286
Eric W. Biederman881d9662007-09-17 11:56:21 -07003287 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003288 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289 rc = 0;
3290out:
3291 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003292out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003293 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003294out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003295 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003297 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298 goto out;
3299}
Eric W. Biederman881d9662007-09-17 11:56:21 -07003300
Pavel Emelyanov46650792007-10-08 20:38:39 -07003301static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003302{
3303 wext_proc_exit(net);
3304
3305 proc_net_remove(net, "ptype");
3306 proc_net_remove(net, "softnet_stat");
3307 proc_net_remove(net, "dev");
3308}
3309
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003310static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003311 .init = dev_proc_net_init,
3312 .exit = dev_proc_net_exit,
3313};
3314
3315static int __init dev_proc_init(void)
3316{
3317 return register_pernet_subsys(&dev_proc_ops);
3318}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003319#else
3320#define dev_proc_init() 0
3321#endif /* CONFIG_PROC_FS */
3322
3323
3324/**
3325 * netdev_set_master - set up master/slave pair
3326 * @slave: slave device
3327 * @master: new master device
3328 *
3329 * Changes the master device of the slave. Pass %NULL to break the
3330 * bonding. The caller must hold the RTNL semaphore. On a failure
3331 * a negative errno code is returned. On success the reference counts
3332 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3333 * function returns zero.
3334 */
3335int netdev_set_master(struct net_device *slave, struct net_device *master)
3336{
3337 struct net_device *old = slave->master;
3338
3339 ASSERT_RTNL();
3340
3341 if (master) {
3342 if (old)
3343 return -EBUSY;
3344 dev_hold(master);
3345 }
3346
3347 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003348
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 synchronize_net();
3350
3351 if (old)
3352 dev_put(old);
3353
3354 if (master)
3355 slave->flags |= IFF_SLAVE;
3356 else
3357 slave->flags &= ~IFF_SLAVE;
3358
3359 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3360 return 0;
3361}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003362EXPORT_SYMBOL(netdev_set_master);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003364static void dev_change_rx_flags(struct net_device *dev, int flags)
3365{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003366 const struct net_device_ops *ops = dev->netdev_ops;
3367
3368 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3369 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003370}
3371
Wang Chendad9b332008-06-18 01:48:28 -07003372static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07003373{
3374 unsigned short old_flags = dev->flags;
David Howells8192b0c2008-11-14 10:39:10 +11003375 uid_t uid;
3376 gid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07003377
Patrick McHardy24023452007-07-14 18:51:31 -07003378 ASSERT_RTNL();
3379
Wang Chendad9b332008-06-18 01:48:28 -07003380 dev->flags |= IFF_PROMISC;
3381 dev->promiscuity += inc;
3382 if (dev->promiscuity == 0) {
3383 /*
3384 * Avoid overflow.
3385 * If inc causes overflow, untouch promisc and return error.
3386 */
3387 if (inc < 0)
3388 dev->flags &= ~IFF_PROMISC;
3389 else {
3390 dev->promiscuity -= inc;
3391 printk(KERN_WARNING "%s: promiscuity touches roof, "
3392 "set promiscuity failed, promiscuity feature "
3393 "of device might be broken.\n", dev->name);
3394 return -EOVERFLOW;
3395 }
3396 }
Patrick McHardy4417da62007-06-27 01:28:10 -07003397 if (dev->flags != old_flags) {
3398 printk(KERN_INFO "device %s %s promiscuous mode\n",
3399 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3400 "left");
David Howells8192b0c2008-11-14 10:39:10 +11003401 if (audit_enabled) {
3402 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003403 audit_log(current->audit_context, GFP_ATOMIC,
3404 AUDIT_ANOM_PROMISCUOUS,
3405 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3406 dev->name, (dev->flags & IFF_PROMISC),
3407 (old_flags & IFF_PROMISC),
3408 audit_get_loginuid(current),
David Howells8192b0c2008-11-14 10:39:10 +11003409 uid, gid,
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003410 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11003411 }
Patrick McHardy24023452007-07-14 18:51:31 -07003412
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003413 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07003414 }
Wang Chendad9b332008-06-18 01:48:28 -07003415 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003416}
3417
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418/**
3419 * dev_set_promiscuity - update promiscuity count on a device
3420 * @dev: device
3421 * @inc: modifier
3422 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07003423 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424 * remains above zero the interface remains promiscuous. Once it hits zero
3425 * the device reverts back to normal filtering operation. A negative inc
3426 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07003427 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003428 */
Wang Chendad9b332008-06-18 01:48:28 -07003429int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430{
3431 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07003432 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433
Wang Chendad9b332008-06-18 01:48:28 -07003434 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07003435 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07003436 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07003437 if (dev->flags != old_flags)
3438 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07003439 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003441EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442
3443/**
3444 * dev_set_allmulti - update allmulti count on a device
3445 * @dev: device
3446 * @inc: modifier
3447 *
3448 * Add or remove reception of all multicast frames to a device. While the
3449 * count in the device remains above zero the interface remains listening
3450 * to all interfaces. Once it hits zero the device reverts back to normal
3451 * filtering operation. A negative @inc value is used to drop the counter
3452 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07003453 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454 */
3455
Wang Chendad9b332008-06-18 01:48:28 -07003456int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457{
3458 unsigned short old_flags = dev->flags;
3459
Patrick McHardy24023452007-07-14 18:51:31 -07003460 ASSERT_RTNL();
3461
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07003463 dev->allmulti += inc;
3464 if (dev->allmulti == 0) {
3465 /*
3466 * Avoid overflow.
3467 * If inc causes overflow, untouch allmulti and return error.
3468 */
3469 if (inc < 0)
3470 dev->flags &= ~IFF_ALLMULTI;
3471 else {
3472 dev->allmulti -= inc;
3473 printk(KERN_WARNING "%s: allmulti touches roof, "
3474 "set allmulti failed, allmulti feature of "
3475 "device might be broken.\n", dev->name);
3476 return -EOVERFLOW;
3477 }
3478 }
Patrick McHardy24023452007-07-14 18:51:31 -07003479 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003480 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003481 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003482 }
Wang Chendad9b332008-06-18 01:48:28 -07003483 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003484}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003485EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07003486
3487/*
3488 * Upload unicast and multicast address lists to device and
3489 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003490 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003491 * are present.
3492 */
3493void __dev_set_rx_mode(struct net_device *dev)
3494{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003495 const struct net_device_ops *ops = dev->netdev_ops;
3496
Patrick McHardy4417da62007-06-27 01:28:10 -07003497 /* dev_open will call this function so the list will stay sane. */
3498 if (!(dev->flags&IFF_UP))
3499 return;
3500
3501 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003502 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003503
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003504 if (ops->ndo_set_rx_mode)
3505 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003506 else {
3507 /* Unicast addresses changes may only happen under the rtnl,
3508 * therefore calling __dev_set_promiscuity here is safe.
3509 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003510 if (dev->uc.count > 0 && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003511 __dev_set_promiscuity(dev, 1);
3512 dev->uc_promisc = 1;
Jiri Pirko31278e72009-06-17 01:12:19 +00003513 } else if (dev->uc.count == 0 && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003514 __dev_set_promiscuity(dev, -1);
3515 dev->uc_promisc = 0;
3516 }
3517
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003518 if (ops->ndo_set_multicast_list)
3519 ops->ndo_set_multicast_list(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003520 }
3521}
3522
3523void dev_set_rx_mode(struct net_device *dev)
3524{
David S. Millerb9e40852008-07-15 00:15:08 -07003525 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003526 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003527 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528}
3529
Jiri Pirkof001fde2009-05-05 02:48:28 +00003530/* hw addresses list handling functions */
3531
Jiri Pirko31278e72009-06-17 01:12:19 +00003532static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3533 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003534{
3535 struct netdev_hw_addr *ha;
3536 int alloc_size;
3537
3538 if (addr_len > MAX_ADDR_LEN)
3539 return -EINVAL;
3540
Jiri Pirko31278e72009-06-17 01:12:19 +00003541 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad22009-05-22 23:22:17 +00003542 if (!memcmp(ha->addr, addr, addr_len) &&
3543 ha->type == addr_type) {
3544 ha->refcount++;
3545 return 0;
3546 }
3547 }
3548
3549
Jiri Pirkof001fde2009-05-05 02:48:28 +00003550 alloc_size = sizeof(*ha);
3551 if (alloc_size < L1_CACHE_BYTES)
3552 alloc_size = L1_CACHE_BYTES;
3553 ha = kmalloc(alloc_size, GFP_ATOMIC);
3554 if (!ha)
3555 return -ENOMEM;
3556 memcpy(ha->addr, addr, addr_len);
3557 ha->type = addr_type;
Jiri Pirkoccffad22009-05-22 23:22:17 +00003558 ha->refcount = 1;
3559 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003560 list_add_tail_rcu(&ha->list, &list->list);
3561 list->count++;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003562 return 0;
3563}
3564
3565static void ha_rcu_free(struct rcu_head *head)
3566{
3567 struct netdev_hw_addr *ha;
3568
3569 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3570 kfree(ha);
3571}
3572
Jiri Pirko31278e72009-06-17 01:12:19 +00003573static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3574 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003575{
3576 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003577
Jiri Pirko31278e72009-06-17 01:12:19 +00003578 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad22009-05-22 23:22:17 +00003579 if (!memcmp(ha->addr, addr, addr_len) &&
Jiri Pirkof001fde2009-05-05 02:48:28 +00003580 (ha->type == addr_type || !addr_type)) {
Jiri Pirkoccffad22009-05-22 23:22:17 +00003581 if (--ha->refcount)
3582 return 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003583 list_del_rcu(&ha->list);
3584 call_rcu(&ha->rcu_head, ha_rcu_free);
Jiri Pirko31278e72009-06-17 01:12:19 +00003585 list->count--;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003586 return 0;
3587 }
3588 }
3589 return -ENOENT;
3590}
3591
Jiri Pirko31278e72009-06-17 01:12:19 +00003592static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3593 struct netdev_hw_addr_list *from_list,
3594 int addr_len,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003595 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003596{
3597 int err;
3598 struct netdev_hw_addr *ha, *ha2;
3599 unsigned char type;
3600
Jiri Pirko31278e72009-06-17 01:12:19 +00003601 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003602 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003603 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003604 if (err)
3605 goto unroll;
3606 }
3607 return 0;
3608
3609unroll:
Jiri Pirko31278e72009-06-17 01:12:19 +00003610 list_for_each_entry(ha2, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003611 if (ha2 == ha)
3612 break;
3613 type = addr_type ? addr_type : ha2->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003614 __hw_addr_del(to_list, ha2->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003615 }
3616 return err;
3617}
3618
Jiri Pirko31278e72009-06-17 01:12:19 +00003619static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
3620 struct netdev_hw_addr_list *from_list,
3621 int addr_len,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003622 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003623{
3624 struct netdev_hw_addr *ha;
3625 unsigned char type;
3626
Jiri Pirko31278e72009-06-17 01:12:19 +00003627 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003628 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003629 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003630 }
3631}
3632
Jiri Pirko31278e72009-06-17 01:12:19 +00003633static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3634 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003635 int addr_len)
3636{
3637 int err = 0;
3638 struct netdev_hw_addr *ha, *tmp;
3639
Jiri Pirko31278e72009-06-17 01:12:19 +00003640 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad22009-05-22 23:22:17 +00003641 if (!ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003642 err = __hw_addr_add(to_list, ha->addr,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003643 addr_len, ha->type);
3644 if (err)
3645 break;
3646 ha->synced = true;
3647 ha->refcount++;
3648 } else if (ha->refcount == 1) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003649 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
3650 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
Jiri Pirkoccffad22009-05-22 23:22:17 +00003651 }
3652 }
3653 return err;
3654}
3655
Jiri Pirko31278e72009-06-17 01:12:19 +00003656static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3657 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003658 int addr_len)
3659{
3660 struct netdev_hw_addr *ha, *tmp;
3661
Jiri Pirko31278e72009-06-17 01:12:19 +00003662 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad22009-05-22 23:22:17 +00003663 if (ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003664 __hw_addr_del(to_list, ha->addr,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003665 addr_len, ha->type);
3666 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003667 __hw_addr_del(from_list, ha->addr,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003668 addr_len, ha->type);
3669 }
3670 }
3671}
3672
Jiri Pirko31278e72009-06-17 01:12:19 +00003673static void __hw_addr_flush(struct netdev_hw_addr_list *list)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003674{
3675 struct netdev_hw_addr *ha, *tmp;
3676
Jiri Pirko31278e72009-06-17 01:12:19 +00003677 list_for_each_entry_safe(ha, tmp, &list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003678 list_del_rcu(&ha->list);
3679 call_rcu(&ha->rcu_head, ha_rcu_free);
3680 }
Jiri Pirko31278e72009-06-17 01:12:19 +00003681 list->count = 0;
3682}
3683
3684static void __hw_addr_init(struct netdev_hw_addr_list *list)
3685{
3686 INIT_LIST_HEAD(&list->list);
3687 list->count = 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003688}
3689
3690/* Device addresses handling functions */
3691
3692static void dev_addr_flush(struct net_device *dev)
3693{
3694 /* rtnl_mutex must be held here */
3695
Jiri Pirko31278e72009-06-17 01:12:19 +00003696 __hw_addr_flush(&dev->dev_addrs);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003697 dev->dev_addr = NULL;
3698}
3699
3700static int dev_addr_init(struct net_device *dev)
3701{
3702 unsigned char addr[MAX_ADDR_LEN];
3703 struct netdev_hw_addr *ha;
3704 int err;
3705
3706 /* rtnl_mutex must be held here */
3707
Jiri Pirko31278e72009-06-17 01:12:19 +00003708 __hw_addr_init(&dev->dev_addrs);
Eric Dumazet0c279222009-06-08 03:49:24 +00003709 memset(addr, 0, sizeof(addr));
Jiri Pirko31278e72009-06-17 01:12:19 +00003710 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
Jiri Pirkof001fde2009-05-05 02:48:28 +00003711 NETDEV_HW_ADDR_T_LAN);
3712 if (!err) {
3713 /*
3714 * Get the first (previously created) address from the list
3715 * and set dev_addr pointer to this location.
3716 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003717 ha = list_first_entry(&dev->dev_addrs.list,
Jiri Pirkof001fde2009-05-05 02:48:28 +00003718 struct netdev_hw_addr, list);
3719 dev->dev_addr = ha->addr;
3720 }
3721 return err;
3722}
3723
3724/**
3725 * dev_addr_add - Add a device address
3726 * @dev: device
3727 * @addr: address to add
3728 * @addr_type: address type
3729 *
3730 * Add a device address to the device or increase the reference count if
3731 * it already exists.
3732 *
3733 * The caller must hold the rtnl_mutex.
3734 */
3735int dev_addr_add(struct net_device *dev, unsigned char *addr,
3736 unsigned char addr_type)
3737{
3738 int err;
3739
3740 ASSERT_RTNL();
3741
Jiri Pirko31278e72009-06-17 01:12:19 +00003742 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003743 if (!err)
3744 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3745 return err;
3746}
3747EXPORT_SYMBOL(dev_addr_add);
3748
3749/**
3750 * dev_addr_del - Release a device address.
3751 * @dev: device
3752 * @addr: address to delete
3753 * @addr_type: address type
3754 *
3755 * Release reference to a device address and remove it from the device
3756 * if the reference count drops to zero.
3757 *
3758 * The caller must hold the rtnl_mutex.
3759 */
3760int dev_addr_del(struct net_device *dev, unsigned char *addr,
3761 unsigned char addr_type)
3762{
3763 int err;
Jiri Pirkoccffad22009-05-22 23:22:17 +00003764 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003765
3766 ASSERT_RTNL();
3767
Jiri Pirkoccffad22009-05-22 23:22:17 +00003768 /*
3769 * We can not remove the first address from the list because
3770 * dev->dev_addr points to that.
3771 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003772 ha = list_first_entry(&dev->dev_addrs.list,
3773 struct netdev_hw_addr, list);
Jiri Pirkoccffad22009-05-22 23:22:17 +00003774 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3775 return -ENOENT;
3776
Jiri Pirko31278e72009-06-17 01:12:19 +00003777 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003778 addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003779 if (!err)
3780 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3781 return err;
3782}
3783EXPORT_SYMBOL(dev_addr_del);
3784
3785/**
3786 * dev_addr_add_multiple - Add device addresses from another device
3787 * @to_dev: device to which addresses will be added
3788 * @from_dev: device from which addresses will be added
3789 * @addr_type: address type - 0 means type will be used from from_dev
3790 *
3791 * Add device addresses of the one device to another.
3792 **
3793 * The caller must hold the rtnl_mutex.
3794 */
3795int dev_addr_add_multiple(struct net_device *to_dev,
3796 struct net_device *from_dev,
3797 unsigned char addr_type)
3798{
3799 int err;
3800
3801 ASSERT_RTNL();
3802
3803 if (from_dev->addr_len != to_dev->addr_len)
3804 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003805 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003806 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003807 if (!err)
3808 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3809 return err;
3810}
3811EXPORT_SYMBOL(dev_addr_add_multiple);
3812
3813/**
3814 * dev_addr_del_multiple - Delete device addresses by another device
3815 * @to_dev: device where the addresses will be deleted
3816 * @from_dev: device by which addresses the addresses will be deleted
3817 * @addr_type: address type - 0 means type will used from from_dev
3818 *
3819 * Deletes addresses in to device by the list of addresses in from device.
3820 *
3821 * The caller must hold the rtnl_mutex.
3822 */
3823int dev_addr_del_multiple(struct net_device *to_dev,
3824 struct net_device *from_dev,
3825 unsigned char addr_type)
3826{
3827 ASSERT_RTNL();
3828
3829 if (from_dev->addr_len != to_dev->addr_len)
3830 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003831 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003832 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003833 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3834 return 0;
3835}
3836EXPORT_SYMBOL(dev_addr_del_multiple);
3837
Jiri Pirko31278e72009-06-17 01:12:19 +00003838/* multicast addresses handling functions */
Jiri Pirkof001fde2009-05-05 02:48:28 +00003839
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003840int __dev_addr_delete(struct dev_addr_list **list, int *count,
3841 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003842{
3843 struct dev_addr_list *da;
3844
3845 for (; (da = *list) != NULL; list = &da->next) {
3846 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3847 alen == da->da_addrlen) {
3848 if (glbl) {
3849 int old_glbl = da->da_gusers;
3850 da->da_gusers = 0;
3851 if (old_glbl == 0)
3852 break;
3853 }
3854 if (--da->da_users)
3855 return 0;
3856
3857 *list = da->next;
3858 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003859 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07003860 return 0;
3861 }
3862 }
3863 return -ENOENT;
3864}
3865
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003866int __dev_addr_add(struct dev_addr_list **list, int *count,
3867 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003868{
3869 struct dev_addr_list *da;
3870
3871 for (da = *list; da != NULL; da = da->next) {
3872 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3873 da->da_addrlen == alen) {
3874 if (glbl) {
3875 int old_glbl = da->da_gusers;
3876 da->da_gusers = 1;
3877 if (old_glbl)
3878 return 0;
3879 }
3880 da->da_users++;
3881 return 0;
3882 }
3883 }
3884
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08003885 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07003886 if (da == NULL)
3887 return -ENOMEM;
3888 memcpy(da->da_addr, addr, alen);
3889 da->da_addrlen = alen;
3890 da->da_users = 1;
3891 da->da_gusers = glbl ? 1 : 0;
3892 da->next = *list;
3893 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003894 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07003895 return 0;
3896}
3897
Patrick McHardy4417da62007-06-27 01:28:10 -07003898/**
3899 * dev_unicast_delete - Release secondary unicast address.
3900 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003901 * @addr: address to delete
Patrick McHardy4417da62007-06-27 01:28:10 -07003902 *
3903 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003904 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07003905 *
3906 * The caller must hold the rtnl_mutex.
3907 */
Jiri Pirkoccffad22009-05-22 23:22:17 +00003908int dev_unicast_delete(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003909{
3910 int err;
3911
3912 ASSERT_RTNL();
3913
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003914 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00003915 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
3916 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003917 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003918 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003919 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003920 return err;
3921}
3922EXPORT_SYMBOL(dev_unicast_delete);
3923
3924/**
3925 * dev_unicast_add - add a secondary unicast address
3926 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07003927 * @addr: address to add
Patrick McHardy4417da62007-06-27 01:28:10 -07003928 *
3929 * Add a secondary unicast address to the device or increase
3930 * the reference count if it already exists.
3931 *
3932 * The caller must hold the rtnl_mutex.
3933 */
Jiri Pirkoccffad22009-05-22 23:22:17 +00003934int dev_unicast_add(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003935{
3936 int err;
3937
3938 ASSERT_RTNL();
3939
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003940 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00003941 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
3942 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003943 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003944 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003945 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003946 return err;
3947}
3948EXPORT_SYMBOL(dev_unicast_add);
3949
Chris Leeche83a2ea2008-01-31 16:53:23 -08003950int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3951 struct dev_addr_list **from, int *from_count)
3952{
3953 struct dev_addr_list *da, *next;
3954 int err = 0;
3955
3956 da = *from;
3957 while (da != NULL) {
3958 next = da->next;
3959 if (!da->da_synced) {
3960 err = __dev_addr_add(to, to_count,
3961 da->da_addr, da->da_addrlen, 0);
3962 if (err < 0)
3963 break;
3964 da->da_synced = 1;
3965 da->da_users++;
3966 } else if (da->da_users == 1) {
3967 __dev_addr_delete(to, to_count,
3968 da->da_addr, da->da_addrlen, 0);
3969 __dev_addr_delete(from, from_count,
3970 da->da_addr, da->da_addrlen, 0);
3971 }
3972 da = next;
3973 }
3974 return err;
3975}
Johannes Bergc4029082009-06-17 17:43:30 +02003976EXPORT_SYMBOL_GPL(__dev_addr_sync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003977
3978void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3979 struct dev_addr_list **from, int *from_count)
3980{
3981 struct dev_addr_list *da, *next;
3982
3983 da = *from;
3984 while (da != NULL) {
3985 next = da->next;
3986 if (da->da_synced) {
3987 __dev_addr_delete(to, to_count,
3988 da->da_addr, da->da_addrlen, 0);
3989 da->da_synced = 0;
3990 __dev_addr_delete(from, from_count,
3991 da->da_addr, da->da_addrlen, 0);
3992 }
3993 da = next;
3994 }
3995}
Johannes Bergc4029082009-06-17 17:43:30 +02003996EXPORT_SYMBOL_GPL(__dev_addr_unsync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003997
3998/**
3999 * dev_unicast_sync - Synchronize device's unicast list to another device
4000 * @to: destination device
4001 * @from: source device
4002 *
4003 * Add newly added addresses to the destination device and release
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004004 * addresses that have no users left. The source device must be
4005 * locked by netif_tx_lock_bh.
Chris Leeche83a2ea2008-01-31 16:53:23 -08004006 *
4007 * This function is intended to be called from the dev->set_rx_mode
4008 * function of layered software devices.
4009 */
4010int dev_unicast_sync(struct net_device *to, struct net_device *from)
4011{
4012 int err = 0;
4013
Jiri Pirkoccffad22009-05-22 23:22:17 +00004014 if (to->addr_len != from->addr_len)
4015 return -EINVAL;
4016
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004017 netif_addr_lock_bh(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004018 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004019 if (!err)
4020 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004021 netif_addr_unlock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004022 return err;
4023}
4024EXPORT_SYMBOL(dev_unicast_sync);
4025
4026/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08004027 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08004028 * @to: destination device
4029 * @from: source device
4030 *
4031 * Remove all addresses that were added to the destination device by
4032 * dev_unicast_sync(). This function is intended to be called from the
4033 * dev->stop function of layered software devices.
4034 */
4035void dev_unicast_unsync(struct net_device *to, struct net_device *from)
4036{
Jiri Pirkoccffad22009-05-22 23:22:17 +00004037 if (to->addr_len != from->addr_len)
4038 return;
4039
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004040 netif_addr_lock_bh(from);
4041 netif_addr_lock(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004042 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004043 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004044 netif_addr_unlock(to);
4045 netif_addr_unlock_bh(from);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004046}
4047EXPORT_SYMBOL(dev_unicast_unsync);
4048
Jiri Pirkoccffad22009-05-22 23:22:17 +00004049static void dev_unicast_flush(struct net_device *dev)
4050{
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004051 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004052 __hw_addr_flush(&dev->uc);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004053 netif_addr_unlock_bh(dev);
Jiri Pirkoccffad22009-05-22 23:22:17 +00004054}
4055
4056static void dev_unicast_init(struct net_device *dev)
4057{
Jiri Pirko31278e72009-06-17 01:12:19 +00004058 __hw_addr_init(&dev->uc);
Jiri Pirkoccffad22009-05-22 23:22:17 +00004059}
4060
4061
Denis Cheng12972622007-07-18 02:12:56 -07004062static void __dev_addr_discard(struct dev_addr_list **list)
4063{
4064 struct dev_addr_list *tmp;
4065
4066 while (*list != NULL) {
4067 tmp = *list;
4068 *list = tmp->next;
4069 if (tmp->da_users > tmp->da_gusers)
4070 printk("__dev_addr_discard: address leakage! "
4071 "da_users=%d\n", tmp->da_users);
4072 kfree(tmp);
4073 }
4074}
4075
Denis Cheng26cc2522007-07-18 02:12:03 -07004076static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07004077{
David S. Millerb9e40852008-07-15 00:15:08 -07004078 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07004079
Denis Cheng456ad752007-07-18 02:10:54 -07004080 __dev_addr_discard(&dev->mc_list);
4081 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07004082
David S. Millerb9e40852008-07-15 00:15:08 -07004083 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07004084}
4085
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004086/**
4087 * dev_get_flags - get flags reported to userspace
4088 * @dev: device
4089 *
4090 * Get the combination of flag bits exported through APIs to userspace.
4091 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004092unsigned dev_get_flags(const struct net_device *dev)
4093{
4094 unsigned flags;
4095
4096 flags = (dev->flags & ~(IFF_PROMISC |
4097 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004098 IFF_RUNNING |
4099 IFF_LOWER_UP |
4100 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101 (dev->gflags & (IFF_PROMISC |
4102 IFF_ALLMULTI));
4103
Stefan Rompfb00055a2006-03-20 17:09:11 -08004104 if (netif_running(dev)) {
4105 if (netif_oper_up(dev))
4106 flags |= IFF_RUNNING;
4107 if (netif_carrier_ok(dev))
4108 flags |= IFF_LOWER_UP;
4109 if (netif_dormant(dev))
4110 flags |= IFF_DORMANT;
4111 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004112
4113 return flags;
4114}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004115EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004116
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004117/**
4118 * dev_change_flags - change device settings
4119 * @dev: device
4120 * @flags: device state flags
4121 *
4122 * Change settings on device based state flags. The flags are
4123 * in the userspace exported format.
4124 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004125int dev_change_flags(struct net_device *dev, unsigned flags)
4126{
Thomas Graf7c355f52007-06-05 16:03:03 -07004127 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004128 int old_flags = dev->flags;
4129
Patrick McHardy24023452007-07-14 18:51:31 -07004130 ASSERT_RTNL();
4131
Linus Torvalds1da177e2005-04-16 15:20:36 -07004132 /*
4133 * Set the flags on our device.
4134 */
4135
4136 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4137 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4138 IFF_AUTOMEDIA)) |
4139 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4140 IFF_ALLMULTI));
4141
4142 /*
4143 * Load in the correct multicast list now the flags have changed.
4144 */
4145
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004146 if ((old_flags ^ flags) & IFF_MULTICAST)
4147 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004148
Patrick McHardy4417da62007-06-27 01:28:10 -07004149 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004150
4151 /*
4152 * Have we downed the interface. We handle IFF_UP ourselves
4153 * according to user attempts to set it, rather than blindly
4154 * setting it.
4155 */
4156
4157 ret = 0;
4158 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4159 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
4160
4161 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004162 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163 }
4164
4165 if (dev->flags & IFF_UP &&
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004166 ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004167 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004168 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004169
4170 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004171 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4172
Linus Torvalds1da177e2005-04-16 15:20:36 -07004173 dev->gflags ^= IFF_PROMISC;
4174 dev_set_promiscuity(dev, inc);
4175 }
4176
4177 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4178 is important. Some (broken) drivers set IFF_PROMISC, when
4179 IFF_ALLMULTI is requested not asking us and not reporting.
4180 */
4181 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004182 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4183
Linus Torvalds1da177e2005-04-16 15:20:36 -07004184 dev->gflags ^= IFF_ALLMULTI;
4185 dev_set_allmulti(dev, inc);
4186 }
4187
Thomas Graf7c355f52007-06-05 16:03:03 -07004188 /* Exclude state transition flags, already notified */
4189 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4190 if (changes)
4191 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192
4193 return ret;
4194}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004195EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004196
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004197/**
4198 * dev_set_mtu - Change maximum transfer unit
4199 * @dev: device
4200 * @new_mtu: new transfer unit
4201 *
4202 * Change the maximum transfer size of the network device.
4203 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004204int dev_set_mtu(struct net_device *dev, int new_mtu)
4205{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004206 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004207 int err;
4208
4209 if (new_mtu == dev->mtu)
4210 return 0;
4211
4212 /* MTU must be positive. */
4213 if (new_mtu < 0)
4214 return -EINVAL;
4215
4216 if (!netif_device_present(dev))
4217 return -ENODEV;
4218
4219 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004220 if (ops->ndo_change_mtu)
4221 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222 else
4223 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004224
Linus Torvalds1da177e2005-04-16 15:20:36 -07004225 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004226 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004227 return err;
4228}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004229EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004230
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004231/**
4232 * dev_set_mac_address - Change Media Access Control Address
4233 * @dev: device
4234 * @sa: new address
4235 *
4236 * Change the hardware (MAC) address of the device
4237 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4239{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004240 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004241 int err;
4242
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004243 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244 return -EOPNOTSUPP;
4245 if (sa->sa_family != dev->type)
4246 return -EINVAL;
4247 if (!netif_device_present(dev))
4248 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004249 err = ops->ndo_set_mac_address(dev, sa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004250 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004251 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252 return err;
4253}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004254EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004255
4256/*
Jeff Garzik14e3e072007-10-08 00:06:32 -07004257 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004258 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07004259static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004260{
4261 int err;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004262 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004263
4264 if (!dev)
4265 return -ENODEV;
4266
4267 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004268 case SIOCGIFFLAGS: /* Get interface flags */
4269 ifr->ifr_flags = (short) dev_get_flags(dev);
4270 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004271
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004272 case SIOCGIFMETRIC: /* Get the metric on the interface
4273 (currently unused) */
4274 ifr->ifr_metric = 0;
4275 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004277 case SIOCGIFMTU: /* Get the MTU of a device */
4278 ifr->ifr_mtu = dev->mtu;
4279 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004280
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004281 case SIOCGIFHWADDR:
4282 if (!dev->addr_len)
4283 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4284 else
4285 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4286 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4287 ifr->ifr_hwaddr.sa_family = dev->type;
4288 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004289
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004290 case SIOCGIFSLAVE:
4291 err = -EINVAL;
4292 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004293
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004294 case SIOCGIFMAP:
4295 ifr->ifr_map.mem_start = dev->mem_start;
4296 ifr->ifr_map.mem_end = dev->mem_end;
4297 ifr->ifr_map.base_addr = dev->base_addr;
4298 ifr->ifr_map.irq = dev->irq;
4299 ifr->ifr_map.dma = dev->dma;
4300 ifr->ifr_map.port = dev->if_port;
4301 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004302
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004303 case SIOCGIFINDEX:
4304 ifr->ifr_ifindex = dev->ifindex;
4305 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004306
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004307 case SIOCGIFTXQLEN:
4308 ifr->ifr_qlen = dev->tx_queue_len;
4309 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004310
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004311 default:
4312 /* dev_ioctl() should ensure this case
4313 * is never reached
4314 */
4315 WARN_ON(1);
4316 err = -EINVAL;
4317 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004318
4319 }
4320 return err;
4321}
4322
4323/*
4324 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4325 */
4326static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4327{
4328 int err;
4329 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004330 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004331
4332 if (!dev)
4333 return -ENODEV;
4334
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004335 ops = dev->netdev_ops;
4336
Jeff Garzik14e3e072007-10-08 00:06:32 -07004337 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004338 case SIOCSIFFLAGS: /* Set interface flags */
4339 return dev_change_flags(dev, ifr->ifr_flags);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004340
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004341 case SIOCSIFMETRIC: /* Set the metric on the interface
4342 (currently unused) */
4343 return -EOPNOTSUPP;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004344
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004345 case SIOCSIFMTU: /* Set the MTU of a device */
4346 return dev_set_mtu(dev, ifr->ifr_mtu);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004347
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004348 case SIOCSIFHWADDR:
4349 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004350
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004351 case SIOCSIFHWBROADCAST:
4352 if (ifr->ifr_hwaddr.sa_family != dev->type)
4353 return -EINVAL;
4354 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4355 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4356 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4357 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004359 case SIOCSIFMAP:
4360 if (ops->ndo_set_config) {
4361 if (!netif_device_present(dev))
4362 return -ENODEV;
4363 return ops->ndo_set_config(dev, &ifr->ifr_map);
4364 }
4365 return -EOPNOTSUPP;
4366
4367 case SIOCADDMULTI:
4368 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4369 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4370 return -EINVAL;
4371 if (!netif_device_present(dev))
4372 return -ENODEV;
4373 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4374 dev->addr_len, 1);
4375
4376 case SIOCDELMULTI:
4377 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4378 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4379 return -EINVAL;
4380 if (!netif_device_present(dev))
4381 return -ENODEV;
4382 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4383 dev->addr_len, 1);
4384
4385 case SIOCSIFTXQLEN:
4386 if (ifr->ifr_qlen < 0)
4387 return -EINVAL;
4388 dev->tx_queue_len = ifr->ifr_qlen;
4389 return 0;
4390
4391 case SIOCSIFNAME:
4392 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4393 return dev_change_name(dev, ifr->ifr_newname);
4394
4395 /*
4396 * Unknown or private ioctl
4397 */
4398 default:
4399 if ((cmd >= SIOCDEVPRIVATE &&
4400 cmd <= SIOCDEVPRIVATE + 15) ||
4401 cmd == SIOCBONDENSLAVE ||
4402 cmd == SIOCBONDRELEASE ||
4403 cmd == SIOCBONDSETHWADDR ||
4404 cmd == SIOCBONDSLAVEINFOQUERY ||
4405 cmd == SIOCBONDINFOQUERY ||
4406 cmd == SIOCBONDCHANGEACTIVE ||
4407 cmd == SIOCGMIIPHY ||
4408 cmd == SIOCGMIIREG ||
4409 cmd == SIOCSMIIREG ||
4410 cmd == SIOCBRADDIF ||
4411 cmd == SIOCBRDELIF ||
4412 cmd == SIOCSHWTSTAMP ||
4413 cmd == SIOCWANDEV) {
4414 err = -EOPNOTSUPP;
4415 if (ops->ndo_do_ioctl) {
4416 if (netif_device_present(dev))
4417 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4418 else
4419 err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004420 }
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004421 } else
4422 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004423
4424 }
4425 return err;
4426}
4427
4428/*
4429 * This function handles all "interface"-type I/O control requests. The actual
4430 * 'doing' part of this is dev_ifsioc above.
4431 */
4432
4433/**
4434 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004435 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436 * @cmd: command to issue
4437 * @arg: pointer to a struct ifreq in user space
4438 *
4439 * Issue ioctl functions to devices. This is normally called by the
4440 * user space syscall interfaces but can sometimes be useful for
4441 * other purposes. The return value is the return from the syscall if
4442 * positive or a negative errno code on error.
4443 */
4444
Eric W. Biederman881d9662007-09-17 11:56:21 -07004445int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004446{
4447 struct ifreq ifr;
4448 int ret;
4449 char *colon;
4450
4451 /* One special case: SIOCGIFCONF takes ifconf argument
4452 and requires shared lock, because it sleeps writing
4453 to user space.
4454 */
4455
4456 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004457 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004458 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004459 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004460 return ret;
4461 }
4462 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004463 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004464
4465 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4466 return -EFAULT;
4467
4468 ifr.ifr_name[IFNAMSIZ-1] = 0;
4469
4470 colon = strchr(ifr.ifr_name, ':');
4471 if (colon)
4472 *colon = 0;
4473
4474 /*
4475 * See which interface the caller is talking about.
4476 */
4477
4478 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004479 /*
4480 * These ioctl calls:
4481 * - can be done by all.
4482 * - atomic and do not require locking.
4483 * - return a value
4484 */
4485 case SIOCGIFFLAGS:
4486 case SIOCGIFMETRIC:
4487 case SIOCGIFMTU:
4488 case SIOCGIFHWADDR:
4489 case SIOCGIFSLAVE:
4490 case SIOCGIFMAP:
4491 case SIOCGIFINDEX:
4492 case SIOCGIFTXQLEN:
4493 dev_load(net, ifr.ifr_name);
4494 read_lock(&dev_base_lock);
4495 ret = dev_ifsioc_locked(net, &ifr, cmd);
4496 read_unlock(&dev_base_lock);
4497 if (!ret) {
4498 if (colon)
4499 *colon = ':';
4500 if (copy_to_user(arg, &ifr,
4501 sizeof(struct ifreq)))
4502 ret = -EFAULT;
4503 }
4504 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004505
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004506 case SIOCETHTOOL:
4507 dev_load(net, ifr.ifr_name);
4508 rtnl_lock();
4509 ret = dev_ethtool(net, &ifr);
4510 rtnl_unlock();
4511 if (!ret) {
4512 if (colon)
4513 *colon = ':';
4514 if (copy_to_user(arg, &ifr,
4515 sizeof(struct ifreq)))
4516 ret = -EFAULT;
4517 }
4518 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004519
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004520 /*
4521 * These ioctl calls:
4522 * - require superuser power.
4523 * - require strict serialization.
4524 * - return a value
4525 */
4526 case SIOCGMIIPHY:
4527 case SIOCGMIIREG:
4528 case SIOCSIFNAME:
4529 if (!capable(CAP_NET_ADMIN))
4530 return -EPERM;
4531 dev_load(net, ifr.ifr_name);
4532 rtnl_lock();
4533 ret = dev_ifsioc(net, &ifr, cmd);
4534 rtnl_unlock();
4535 if (!ret) {
4536 if (colon)
4537 *colon = ':';
4538 if (copy_to_user(arg, &ifr,
4539 sizeof(struct ifreq)))
4540 ret = -EFAULT;
4541 }
4542 return ret;
4543
4544 /*
4545 * These ioctl calls:
4546 * - require superuser power.
4547 * - require strict serialization.
4548 * - do not return a value
4549 */
4550 case SIOCSIFFLAGS:
4551 case SIOCSIFMETRIC:
4552 case SIOCSIFMTU:
4553 case SIOCSIFMAP:
4554 case SIOCSIFHWADDR:
4555 case SIOCSIFSLAVE:
4556 case SIOCADDMULTI:
4557 case SIOCDELMULTI:
4558 case SIOCSIFHWBROADCAST:
4559 case SIOCSIFTXQLEN:
4560 case SIOCSMIIREG:
4561 case SIOCBONDENSLAVE:
4562 case SIOCBONDRELEASE:
4563 case SIOCBONDSETHWADDR:
4564 case SIOCBONDCHANGEACTIVE:
4565 case SIOCBRADDIF:
4566 case SIOCBRDELIF:
4567 case SIOCSHWTSTAMP:
4568 if (!capable(CAP_NET_ADMIN))
4569 return -EPERM;
4570 /* fall through */
4571 case SIOCBONDSLAVEINFOQUERY:
4572 case SIOCBONDINFOQUERY:
4573 dev_load(net, ifr.ifr_name);
4574 rtnl_lock();
4575 ret = dev_ifsioc(net, &ifr, cmd);
4576 rtnl_unlock();
4577 return ret;
4578
4579 case SIOCGIFMEM:
4580 /* Get the per device memory space. We can add this but
4581 * currently do not support it */
4582 case SIOCSIFMEM:
4583 /* Set the per device memory buffer space.
4584 * Not applicable in our case */
4585 case SIOCSIFLINK:
4586 return -EINVAL;
4587
4588 /*
4589 * Unknown or private ioctl.
4590 */
4591 default:
4592 if (cmd == SIOCWANDEV ||
4593 (cmd >= SIOCDEVPRIVATE &&
4594 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004595 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004597 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004598 rtnl_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004599 if (!ret && copy_to_user(arg, &ifr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004600 sizeof(struct ifreq)))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004601 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004602 return ret;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004603 }
4604 /* Take care of Wireless Extensions */
4605 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4606 return wext_handle_ioctl(net, &ifr, cmd, arg);
4607 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004608 }
4609}
4610
4611
4612/**
4613 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004614 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615 *
4616 * Returns a suitable unique value for a new device interface
4617 * number. The caller must hold the rtnl semaphore or the
4618 * dev_base_lock to be sure it remains unique.
4619 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004620static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004621{
4622 static int ifindex;
4623 for (;;) {
4624 if (++ifindex <= 0)
4625 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004626 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004627 return ifindex;
4628 }
4629}
4630
Linus Torvalds1da177e2005-04-16 15:20:36 -07004631/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004632static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004633
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004634static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004635{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004636 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004637}
4638
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004639static void rollback_registered(struct net_device *dev)
4640{
4641 BUG_ON(dev_boot_phase);
4642 ASSERT_RTNL();
4643
4644 /* Some devices call without registering for initialization unwind. */
4645 if (dev->reg_state == NETREG_UNINITIALIZED) {
4646 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
4647 "was registered\n", dev->name, dev);
4648
4649 WARN_ON(1);
4650 return;
4651 }
4652
4653 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4654
4655 /* If device is running, close it first. */
4656 dev_close(dev);
4657
4658 /* And unlink it from device chain. */
4659 unlist_netdevice(dev);
4660
4661 dev->reg_state = NETREG_UNREGISTERING;
4662
4663 synchronize_net();
4664
4665 /* Shutdown queueing discipline. */
4666 dev_shutdown(dev);
4667
4668
4669 /* Notify protocols, that we are about to destroy
4670 this device. They should clean all the things.
4671 */
4672 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4673
4674 /*
4675 * Flush the unicast and multicast chains
4676 */
Jiri Pirkoccffad22009-05-22 23:22:17 +00004677 dev_unicast_flush(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004678 dev_addr_discard(dev);
4679
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004680 if (dev->netdev_ops->ndo_uninit)
4681 dev->netdev_ops->ndo_uninit(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004682
4683 /* Notifier chain MUST detach us from master device. */
Ilpo Järvinen547b7922008-07-25 21:43:18 -07004684 WARN_ON(dev->master);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004685
4686 /* Remove entries from kobject tree */
4687 netdev_unregister_kobject(dev);
4688
4689 synchronize_net();
4690
4691 dev_put(dev);
4692}
4693
David S. Millere8a04642008-07-17 00:34:19 -07004694static void __netdev_init_queue_locks_one(struct net_device *dev,
4695 struct netdev_queue *dev_queue,
4696 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07004697{
4698 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004699 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07004700 dev_queue->xmit_lock_owner = -1;
4701}
4702
4703static void netdev_init_queue_locks(struct net_device *dev)
4704{
David S. Millere8a04642008-07-17 00:34:19 -07004705 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4706 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07004707}
4708
Herbert Xub63365a2008-10-23 01:11:29 -07004709unsigned long netdev_fix_features(unsigned long features, const char *name)
4710{
4711 /* Fix illegal SG+CSUM combinations. */
4712 if ((features & NETIF_F_SG) &&
4713 !(features & NETIF_F_ALL_CSUM)) {
4714 if (name)
4715 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4716 "checksum feature.\n", name);
4717 features &= ~NETIF_F_SG;
4718 }
4719
4720 /* TSO requires that SG is present as well. */
4721 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4722 if (name)
4723 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4724 "SG feature.\n", name);
4725 features &= ~NETIF_F_TSO;
4726 }
4727
4728 if (features & NETIF_F_UFO) {
4729 if (!(features & NETIF_F_GEN_CSUM)) {
4730 if (name)
4731 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4732 "since no NETIF_F_HW_CSUM feature.\n",
4733 name);
4734 features &= ~NETIF_F_UFO;
4735 }
4736
4737 if (!(features & NETIF_F_SG)) {
4738 if (name)
4739 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4740 "since no NETIF_F_SG feature.\n", name);
4741 features &= ~NETIF_F_UFO;
4742 }
4743 }
4744
4745 return features;
4746}
4747EXPORT_SYMBOL(netdev_fix_features);
4748
Linus Torvalds1da177e2005-04-16 15:20:36 -07004749/**
4750 * register_netdevice - register a network device
4751 * @dev: device to register
4752 *
4753 * Take a completed network device structure and add it to the kernel
4754 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4755 * chain. 0 is returned on success. A negative errno code is returned
4756 * on a failure to set up the device, or if the name is a duplicate.
4757 *
4758 * Callers must hold the rtnl semaphore. You may want
4759 * register_netdev() instead of this.
4760 *
4761 * BUGS:
4762 * The locking appears insufficient to guarantee two parallel registers
4763 * will not get the same name.
4764 */
4765
4766int register_netdevice(struct net_device *dev)
4767{
4768 struct hlist_head *head;
4769 struct hlist_node *p;
4770 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004771 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004772
4773 BUG_ON(dev_boot_phase);
4774 ASSERT_RTNL();
4775
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004776 might_sleep();
4777
Linus Torvalds1da177e2005-04-16 15:20:36 -07004778 /* When net_device's are persistent, this will be fatal. */
4779 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004780 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004781
David S. Millerf1f28aa2008-07-15 00:08:33 -07004782 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004783 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07004784 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004785
Linus Torvalds1da177e2005-04-16 15:20:36 -07004786 dev->iflink = -1;
4787
4788 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004789 if (dev->netdev_ops->ndo_init) {
4790 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004791 if (ret) {
4792 if (ret > 0)
4793 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08004794 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004795 }
4796 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004797
Linus Torvalds1da177e2005-04-16 15:20:36 -07004798 if (!dev_valid_name(dev->name)) {
4799 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004800 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004801 }
4802
Eric W. Biederman881d9662007-09-17 11:56:21 -07004803 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804 if (dev->iflink == -1)
4805 dev->iflink = dev->ifindex;
4806
4807 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004808 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004809 hlist_for_each(p, head) {
4810 struct net_device *d
4811 = hlist_entry(p, struct net_device, name_hlist);
4812 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4813 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004814 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004815 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004816 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004817
Stephen Hemmingerd212f872007-06-27 00:47:37 -07004818 /* Fix illegal checksum combinations */
4819 if ((dev->features & NETIF_F_HW_CSUM) &&
4820 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4821 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4822 dev->name);
4823 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4824 }
4825
4826 if ((dev->features & NETIF_F_NO_CSUM) &&
4827 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4828 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4829 dev->name);
4830 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4831 }
4832
Herbert Xub63365a2008-10-23 01:11:29 -07004833 dev->features = netdev_fix_features(dev->features, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004834
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07004835 /* Enable software GSO if SG is supported. */
4836 if (dev->features & NETIF_F_SG)
4837 dev->features |= NETIF_F_GSO;
4838
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004839 netdev_initialize_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004840 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004841 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004842 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004843 dev->reg_state = NETREG_REGISTERED;
4844
Linus Torvalds1da177e2005-04-16 15:20:36 -07004845 /*
4846 * Default initial state at registry is that the
4847 * device is present.
4848 */
4849
4850 set_bit(__LINK_STATE_PRESENT, &dev->state);
4851
Linus Torvalds1da177e2005-04-16 15:20:36 -07004852 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004853 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004854 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004855
4856 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004857 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07004858 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004859 if (ret) {
4860 rollback_registered(dev);
4861 dev->reg_state = NETREG_UNREGISTERED;
4862 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004863
4864out:
4865 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004866
4867err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004868 if (dev->netdev_ops->ndo_uninit)
4869 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004870 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004871}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004872EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004873
4874/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08004875 * init_dummy_netdev - init a dummy network device for NAPI
4876 * @dev: device to init
4877 *
4878 * This takes a network device structure and initialize the minimum
4879 * amount of fields so it can be used to schedule NAPI polls without
4880 * registering a full blown interface. This is to be used by drivers
4881 * that need to tie several hardware interfaces to a single NAPI
4882 * poll scheduler due to HW limitations.
4883 */
4884int init_dummy_netdev(struct net_device *dev)
4885{
4886 /* Clear everything. Note we don't initialize spinlocks
4887 * are they aren't supposed to be taken by any of the
4888 * NAPI code and this dummy netdev is supposed to be
4889 * only ever used for NAPI polls
4890 */
4891 memset(dev, 0, sizeof(struct net_device));
4892
4893 /* make sure we BUG if trying to hit standard
4894 * register/unregister code path
4895 */
4896 dev->reg_state = NETREG_DUMMY;
4897
4898 /* initialize the ref count */
4899 atomic_set(&dev->refcnt, 1);
4900
4901 /* NAPI wants this */
4902 INIT_LIST_HEAD(&dev->napi_list);
4903
4904 /* a dummy interface is started by default */
4905 set_bit(__LINK_STATE_PRESENT, &dev->state);
4906 set_bit(__LINK_STATE_START, &dev->state);
4907
4908 return 0;
4909}
4910EXPORT_SYMBOL_GPL(init_dummy_netdev);
4911
4912
4913/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004914 * register_netdev - register a network device
4915 * @dev: device to register
4916 *
4917 * Take a completed network device structure and add it to the kernel
4918 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4919 * chain. 0 is returned on success. A negative errno code is returned
4920 * on a failure to set up the device, or if the name is a duplicate.
4921 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07004922 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07004923 * and expands the device name if you passed a format string to
4924 * alloc_netdev.
4925 */
4926int register_netdev(struct net_device *dev)
4927{
4928 int err;
4929
4930 rtnl_lock();
4931
4932 /*
4933 * If the name is a format string the caller wants us to do a
4934 * name allocation.
4935 */
4936 if (strchr(dev->name, '%')) {
4937 err = dev_alloc_name(dev, dev->name);
4938 if (err < 0)
4939 goto out;
4940 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004941
Linus Torvalds1da177e2005-04-16 15:20:36 -07004942 err = register_netdevice(dev);
4943out:
4944 rtnl_unlock();
4945 return err;
4946}
4947EXPORT_SYMBOL(register_netdev);
4948
4949/*
4950 * netdev_wait_allrefs - wait until all references are gone.
4951 *
4952 * This is called when unregistering network devices.
4953 *
4954 * Any protocol or device that holds a reference should register
4955 * for netdevice notification, and cleanup and put back the
4956 * reference if they receive an UNREGISTER event.
4957 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004958 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004959 */
4960static void netdev_wait_allrefs(struct net_device *dev)
4961{
4962 unsigned long rebroadcast_time, warning_time;
4963
4964 rebroadcast_time = warning_time = jiffies;
4965 while (atomic_read(&dev->refcnt) != 0) {
4966 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004967 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004968
4969 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004970 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004971
4972 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4973 &dev->state)) {
4974 /* We must not have linkwatch events
4975 * pending on unregister. If this
4976 * happens, we simply run the queue
4977 * unscheduled, resulting in a noop
4978 * for this device.
4979 */
4980 linkwatch_run_queue();
4981 }
4982
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004983 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004984
4985 rebroadcast_time = jiffies;
4986 }
4987
4988 msleep(250);
4989
4990 if (time_after(jiffies, warning_time + 10 * HZ)) {
4991 printk(KERN_EMERG "unregister_netdevice: "
4992 "waiting for %s to become free. Usage "
4993 "count = %d\n",
4994 dev->name, atomic_read(&dev->refcnt));
4995 warning_time = jiffies;
4996 }
4997 }
4998}
4999
5000/* The sequence is:
5001 *
5002 * rtnl_lock();
5003 * ...
5004 * register_netdevice(x1);
5005 * register_netdevice(x2);
5006 * ...
5007 * unregister_netdevice(y1);
5008 * unregister_netdevice(y2);
5009 * ...
5010 * rtnl_unlock();
5011 * free_netdev(y1);
5012 * free_netdev(y2);
5013 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07005014 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07005015 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005016 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07005017 * without deadlocking with linkwatch via keventd.
5018 * 2) Since we run with the RTNL semaphore not held, we can sleep
5019 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07005020 *
5021 * We must not return until all unregister events added during
5022 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005023 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005024void netdev_run_todo(void)
5025{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005026 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005027
Linus Torvalds1da177e2005-04-16 15:20:36 -07005028 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005029 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07005030
5031 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005032
Linus Torvalds1da177e2005-04-16 15:20:36 -07005033 while (!list_empty(&list)) {
5034 struct net_device *dev
5035 = list_entry(list.next, struct net_device, todo_list);
5036 list_del(&dev->todo_list);
5037
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005038 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005039 printk(KERN_ERR "network todo '%s' but state %d\n",
5040 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005041 dump_stack();
5042 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005043 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005044
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005045 dev->reg_state = NETREG_UNREGISTERED;
5046
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005047 on_each_cpu(flush_backlog, dev, 1);
5048
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005049 netdev_wait_allrefs(dev);
5050
5051 /* paranoia */
5052 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07005053 WARN_ON(dev->ip_ptr);
5054 WARN_ON(dev->ip6_ptr);
5055 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005056
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005057 if (dev->destructor)
5058 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07005059
5060 /* Free network device */
5061 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005062 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005063}
5064
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005065/**
5066 * dev_get_stats - get network device statistics
5067 * @dev: device to get statistics from
5068 *
5069 * Get network statistics from device. The device driver may provide
5070 * its own method by setting dev->netdev_ops->get_stats; otherwise
5071 * the internal statistics structure is used.
5072 */
5073const struct net_device_stats *dev_get_stats(struct net_device *dev)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005074{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005075 const struct net_device_ops *ops = dev->netdev_ops;
5076
5077 if (ops->ndo_get_stats)
5078 return ops->ndo_get_stats(dev);
Eric Dumazet7004bf22009-05-18 00:34:33 +00005079 else {
5080 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5081 struct net_device_stats *stats = &dev->stats;
5082 unsigned int i;
5083 struct netdev_queue *txq;
5084
5085 for (i = 0; i < dev->num_tx_queues; i++) {
5086 txq = netdev_get_tx_queue(dev, i);
5087 tx_bytes += txq->tx_bytes;
5088 tx_packets += txq->tx_packets;
5089 tx_dropped += txq->tx_dropped;
5090 }
5091 if (tx_bytes || tx_packets || tx_dropped) {
5092 stats->tx_bytes = tx_bytes;
5093 stats->tx_packets = tx_packets;
5094 stats->tx_dropped = tx_dropped;
5095 }
5096 return stats;
5097 }
Rusty Russellc45d2862007-03-28 14:29:08 -07005098}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005099EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005100
David S. Millerdc2b4842008-07-08 17:18:23 -07005101static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07005102 struct netdev_queue *queue,
5103 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07005104{
David S. Millerdc2b4842008-07-08 17:18:23 -07005105 queue->dev = dev;
5106}
5107
David S. Millerbb949fb2008-07-08 16:55:56 -07005108static void netdev_init_queues(struct net_device *dev)
5109{
David S. Millere8a04642008-07-17 00:34:19 -07005110 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5111 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07005112 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07005113}
5114
Linus Torvalds1da177e2005-04-16 15:20:36 -07005115/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005116 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005117 * @sizeof_priv: size of private data to allocate space for
5118 * @name: device name format string
5119 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005120 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005121 *
5122 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005123 * and performs basic initialization. Also allocates subquue structs
5124 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005125 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005126struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5127 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005128{
David S. Millere8a04642008-07-17 00:34:19 -07005129 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005130 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005131 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005132 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005133
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005134 BUG_ON(strlen(name) >= sizeof(dev->name));
5135
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005136 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005137 if (sizeof_priv) {
5138 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005139 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005140 alloc_size += sizeof_priv;
5141 }
5142 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005143 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005144
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005145 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005146 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005147 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005148 return NULL;
5149 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005150
Stephen Hemminger79439862008-07-21 13:28:44 -07005151 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07005152 if (!tx) {
5153 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5154 "tx qdiscs.\n");
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005155 goto free_p;
David S. Millere8a04642008-07-17 00:34:19 -07005156 }
5157
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005158 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005159 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005160
5161 if (dev_addr_init(dev))
5162 goto free_tx;
5163
Jiri Pirkoccffad22009-05-22 23:22:17 +00005164 dev_unicast_init(dev);
5165
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005166 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005167
David S. Millere8a04642008-07-17 00:34:19 -07005168 dev->_tx = tx;
5169 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005170 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07005171
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005172 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005173
David S. Millerbb949fb2008-07-08 16:55:56 -07005174 netdev_init_queues(dev);
5175
Herbert Xud565b0a2008-12-15 23:38:52 -08005176 INIT_LIST_HEAD(&dev->napi_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005177 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005178 setup(dev);
5179 strcpy(dev->name, name);
5180 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005181
5182free_tx:
5183 kfree(tx);
5184
5185free_p:
5186 kfree(p);
5187 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005188}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005189EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005190
5191/**
5192 * free_netdev - free network device
5193 * @dev: device
5194 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005195 * This function does the last stage of destroying an allocated device
5196 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005197 * If this is the last reference then it will be freed.
5198 */
5199void free_netdev(struct net_device *dev)
5200{
Herbert Xud565b0a2008-12-15 23:38:52 -08005201 struct napi_struct *p, *n;
5202
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005203 release_net(dev_net(dev));
5204
David S. Millere8a04642008-07-17 00:34:19 -07005205 kfree(dev->_tx);
5206
Jiri Pirkof001fde2009-05-05 02:48:28 +00005207 /* Flush device addresses */
5208 dev_addr_flush(dev);
5209
Herbert Xud565b0a2008-12-15 23:38:52 -08005210 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5211 netif_napi_del(p);
5212
Stephen Hemminger3041a062006-05-26 13:25:24 -07005213 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005214 if (dev->reg_state == NETREG_UNINITIALIZED) {
5215 kfree((char *)dev - dev->padded);
5216 return;
5217 }
5218
5219 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5220 dev->reg_state = NETREG_RELEASED;
5221
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005222 /* will free via device release */
5223 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005224}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005225EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005226
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005227/**
5228 * synchronize_net - Synchronize with packet receive processing
5229 *
5230 * Wait for packets currently being received to be done.
5231 * Does not block later packets from starting.
5232 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005233void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005234{
5235 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07005236 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005237}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005238EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005239
5240/**
5241 * unregister_netdevice - remove device from the kernel
5242 * @dev: device
5243 *
5244 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005245 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005246 *
5247 * Callers must hold the rtnl semaphore. You may want
5248 * unregister_netdev() instead of this.
5249 */
5250
Stephen Hemminger22f8cde2007-02-07 00:09:58 -08005251void unregister_netdevice(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005252{
Herbert Xua6620712007-12-12 19:21:56 -08005253 ASSERT_RTNL();
5254
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005255 rollback_registered(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005256 /* Finish processing unregister after unlock */
5257 net_set_todo(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005258}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005259EXPORT_SYMBOL(unregister_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005260
5261/**
5262 * unregister_netdev - remove device from the kernel
5263 * @dev: device
5264 *
5265 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005266 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005267 *
5268 * This is just a wrapper for unregister_netdevice that takes
5269 * the rtnl semaphore. In general you want to use this and not
5270 * unregister_netdevice.
5271 */
5272void unregister_netdev(struct net_device *dev)
5273{
5274 rtnl_lock();
5275 unregister_netdevice(dev);
5276 rtnl_unlock();
5277}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005278EXPORT_SYMBOL(unregister_netdev);
5279
Eric W. Biedermance286d32007-09-12 13:53:49 +02005280/**
5281 * dev_change_net_namespace - move device to different nethost namespace
5282 * @dev: device
5283 * @net: network namespace
5284 * @pat: If not NULL name pattern to try if the current device name
5285 * is already taken in the destination network namespace.
5286 *
5287 * This function shuts down a device interface and moves it
5288 * to a new network namespace. On success 0 is returned, on
5289 * a failure a netagive errno code is returned.
5290 *
5291 * Callers must hold the rtnl semaphore.
5292 */
5293
5294int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5295{
5296 char buf[IFNAMSIZ];
5297 const char *destname;
5298 int err;
5299
5300 ASSERT_RTNL();
5301
5302 /* Don't allow namespace local devices to be moved. */
5303 err = -EINVAL;
5304 if (dev->features & NETIF_F_NETNS_LOCAL)
5305 goto out;
5306
Eric W. Biederman38918452008-10-27 17:51:47 -07005307#ifdef CONFIG_SYSFS
5308 /* Don't allow real devices to be moved when sysfs
5309 * is enabled.
5310 */
5311 err = -EINVAL;
5312 if (dev->dev.parent)
5313 goto out;
5314#endif
5315
Eric W. Biedermance286d32007-09-12 13:53:49 +02005316 /* Ensure the device has been registrered */
5317 err = -EINVAL;
5318 if (dev->reg_state != NETREG_REGISTERED)
5319 goto out;
5320
5321 /* Get out if there is nothing todo */
5322 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005323 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005324 goto out;
5325
5326 /* Pick the destination device name, and ensure
5327 * we can use it in the destination network namespace.
5328 */
5329 err = -EEXIST;
5330 destname = dev->name;
5331 if (__dev_get_by_name(net, destname)) {
5332 /* We get here if we can't use the current device name */
5333 if (!pat)
5334 goto out;
5335 if (!dev_valid_name(pat))
5336 goto out;
5337 if (strchr(pat, '%')) {
5338 if (__dev_alloc_name(net, pat, buf) < 0)
5339 goto out;
5340 destname = buf;
5341 } else
5342 destname = pat;
5343 if (__dev_get_by_name(net, destname))
5344 goto out;
5345 }
5346
5347 /*
5348 * And now a mini version of register_netdevice unregister_netdevice.
5349 */
5350
5351 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005352 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005353
5354 /* And unlink it from device chain */
5355 err = -ENODEV;
5356 unlist_netdevice(dev);
5357
5358 synchronize_net();
5359
5360 /* Shutdown queueing discipline. */
5361 dev_shutdown(dev);
5362
5363 /* Notify protocols, that we are about to destroy
5364 this device. They should clean all the things.
5365 */
5366 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5367
5368 /*
5369 * Flush the unicast and multicast chains
5370 */
Jiri Pirkoccffad22009-05-22 23:22:17 +00005371 dev_unicast_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005372 dev_addr_discard(dev);
5373
Eric W. Biederman38918452008-10-27 17:51:47 -07005374 netdev_unregister_kobject(dev);
5375
Eric W. Biedermance286d32007-09-12 13:53:49 +02005376 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005377 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005378
5379 /* Assign the new device name */
5380 if (destname != dev->name)
5381 strcpy(dev->name, destname);
5382
5383 /* If there is an ifindex conflict assign a new one */
5384 if (__dev_get_by_index(net, dev->ifindex)) {
5385 int iflink = (dev->iflink == dev->ifindex);
5386 dev->ifindex = dev_new_index(net);
5387 if (iflink)
5388 dev->iflink = dev->ifindex;
5389 }
5390
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005391 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005392 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005393 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005394
5395 /* Add the device back in the hashes */
5396 list_netdevice(dev);
5397
5398 /* Notify protocols, that a new device appeared. */
5399 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5400
5401 synchronize_net();
5402 err = 0;
5403out:
5404 return err;
5405}
Johannes Berg463d0182009-07-14 00:33:35 +02005406EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005407
Linus Torvalds1da177e2005-04-16 15:20:36 -07005408static int dev_cpu_callback(struct notifier_block *nfb,
5409 unsigned long action,
5410 void *ocpu)
5411{
5412 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07005413 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005414 struct sk_buff *skb;
5415 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5416 struct softnet_data *sd, *oldsd;
5417
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005418 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005419 return NOTIFY_OK;
5420
5421 local_irq_disable();
5422 cpu = smp_processor_id();
5423 sd = &per_cpu(softnet_data, cpu);
5424 oldsd = &per_cpu(softnet_data, oldcpu);
5425
5426 /* Find end of our completion_queue. */
5427 list_skb = &sd->completion_queue;
5428 while (*list_skb)
5429 list_skb = &(*list_skb)->next;
5430 /* Append completion queue from offline CPU. */
5431 *list_skb = oldsd->completion_queue;
5432 oldsd->completion_queue = NULL;
5433
5434 /* Find end of our output_queue. */
5435 list_net = &sd->output_queue;
5436 while (*list_net)
5437 list_net = &(*list_net)->next_sched;
5438 /* Append output queue from offline CPU. */
5439 *list_net = oldsd->output_queue;
5440 oldsd->output_queue = NULL;
5441
5442 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5443 local_irq_enable();
5444
5445 /* Process offline CPU's input_pkt_queue */
5446 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5447 netif_rx(skb);
5448
5449 return NOTIFY_OK;
5450}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005451
5452
Herbert Xu7f353bf2007-08-10 15:47:58 -07005453/**
Herbert Xub63365a2008-10-23 01:11:29 -07005454 * netdev_increment_features - increment feature set by one
5455 * @all: current feature set
5456 * @one: new feature set
5457 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07005458 *
5459 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07005460 * @one to the master device with current feature set @all. Will not
5461 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07005462 */
Herbert Xub63365a2008-10-23 01:11:29 -07005463unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5464 unsigned long mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07005465{
Herbert Xub63365a2008-10-23 01:11:29 -07005466 /* If device needs checksumming, downgrade to it. */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005467 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
Herbert Xub63365a2008-10-23 01:11:29 -07005468 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5469 else if (mask & NETIF_F_ALL_CSUM) {
5470 /* If one device supports v4/v6 checksumming, set for all. */
5471 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5472 !(all & NETIF_F_GEN_CSUM)) {
5473 all &= ~NETIF_F_ALL_CSUM;
5474 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5475 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005476
Herbert Xub63365a2008-10-23 01:11:29 -07005477 /* If one device supports hw checksumming, set for all. */
5478 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5479 all &= ~NETIF_F_ALL_CSUM;
5480 all |= NETIF_F_HW_CSUM;
5481 }
5482 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005483
Herbert Xub63365a2008-10-23 01:11:29 -07005484 one |= NETIF_F_ALL_CSUM;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005485
Herbert Xub63365a2008-10-23 01:11:29 -07005486 one |= all & NETIF_F_ONE_FOR_ALL;
5487 all &= one | NETIF_F_LLTX | NETIF_F_GSO;
5488 all |= one & mask & NETIF_F_ONE_FOR_ALL;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005489
5490 return all;
5491}
Herbert Xub63365a2008-10-23 01:11:29 -07005492EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07005493
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005494static struct hlist_head *netdev_create_hash(void)
5495{
5496 int i;
5497 struct hlist_head *hash;
5498
5499 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5500 if (hash != NULL)
5501 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5502 INIT_HLIST_HEAD(&hash[i]);
5503
5504 return hash;
5505}
5506
Eric W. Biederman881d9662007-09-17 11:56:21 -07005507/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07005508static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005509{
Eric W. Biederman881d9662007-09-17 11:56:21 -07005510 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07005511
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005512 net->dev_name_head = netdev_create_hash();
5513 if (net->dev_name_head == NULL)
5514 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005515
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005516 net->dev_index_head = netdev_create_hash();
5517 if (net->dev_index_head == NULL)
5518 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005519
5520 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005521
5522err_idx:
5523 kfree(net->dev_name_head);
5524err_name:
5525 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005526}
5527
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005528/**
5529 * netdev_drivername - network driver for the device
5530 * @dev: network device
5531 * @buffer: buffer for resulting name
5532 * @len: size of buffer
5533 *
5534 * Determine network driver for device.
5535 */
Stephen Hemmingercf04a4c2008-09-30 02:22:14 -07005536char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
Arjan van de Ven6579e572008-07-21 13:31:48 -07005537{
Stephen Hemmingercf04a4c2008-09-30 02:22:14 -07005538 const struct device_driver *driver;
5539 const struct device *parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07005540
5541 if (len <= 0 || !buffer)
5542 return buffer;
5543 buffer[0] = 0;
5544
5545 parent = dev->dev.parent;
5546
5547 if (!parent)
5548 return buffer;
5549
5550 driver = parent->driver;
5551 if (driver && driver->name)
5552 strlcpy(buffer, driver->name, len);
5553 return buffer;
5554}
5555
Pavel Emelyanov46650792007-10-08 20:38:39 -07005556static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005557{
5558 kfree(net->dev_name_head);
5559 kfree(net->dev_index_head);
5560}
5561
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005562static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005563 .init = netdev_init,
5564 .exit = netdev_exit,
5565};
5566
Pavel Emelyanov46650792007-10-08 20:38:39 -07005567static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005568{
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005569 struct net_device *dev;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005570 /*
5571 * Push all migratable of the network devices back to the
5572 * initial network namespace
5573 */
5574 rtnl_lock();
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005575restart:
5576 for_each_netdev(net, dev) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005577 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005578 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02005579
5580 /* Ignore unmoveable devices (i.e. loopback) */
5581 if (dev->features & NETIF_F_NETNS_LOCAL)
5582 continue;
5583
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005584 /* Delete virtual devices */
5585 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
5586 dev->rtnl_link_ops->dellink(dev);
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005587 goto restart;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005588 }
5589
Eric W. Biedermance286d32007-09-12 13:53:49 +02005590 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005591 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5592 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005593 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005594 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02005595 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005596 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02005597 }
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005598 goto restart;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005599 }
5600 rtnl_unlock();
5601}
5602
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005603static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005604 .exit = default_device_exit,
5605};
5606
Linus Torvalds1da177e2005-04-16 15:20:36 -07005607/*
5608 * Initialize the DEV module. At boot time this walks the device list and
5609 * unhooks any devices that fail to initialise (normally hardware not
5610 * present) and leaves us with a valid list of present and active devices.
5611 *
5612 */
5613
5614/*
5615 * This is called single threaded during boot, so no need
5616 * to take the rtnl semaphore.
5617 */
5618static int __init net_dev_init(void)
5619{
5620 int i, rc = -ENOMEM;
5621
5622 BUG_ON(!dev_boot_phase);
5623
Linus Torvalds1da177e2005-04-16 15:20:36 -07005624 if (dev_proc_init())
5625 goto out;
5626
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005627 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07005628 goto out;
5629
5630 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a862007-11-26 20:12:58 +08005631 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005632 INIT_LIST_HEAD(&ptype_base[i]);
5633
Eric W. Biederman881d9662007-09-17 11:56:21 -07005634 if (register_pernet_subsys(&netdev_net_ops))
5635 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005636
5637 /*
5638 * Initialise the packet receive queues.
5639 */
5640
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07005641 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005642 struct softnet_data *queue;
5643
5644 queue = &per_cpu(softnet_data, i);
5645 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005646 queue->completion_queue = NULL;
5647 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005648
5649 queue->backlog.poll = process_backlog;
5650 queue->backlog.weight = weight_p;
Herbert Xud565b0a2008-12-15 23:38:52 -08005651 queue->backlog.gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005652 queue->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005653 }
5654
Linus Torvalds1da177e2005-04-16 15:20:36 -07005655 dev_boot_phase = 0;
5656
Eric W. Biederman505d4f72008-11-07 22:54:20 -08005657 /* The loopback device is special if any other network devices
5658 * is present in a network namespace the loopback device must
5659 * be present. Since we now dynamically allocate and free the
5660 * loopback device ensure this invariant is maintained by
5661 * keeping the loopback device as the first device on the
5662 * list of network devices. Ensuring the loopback devices
5663 * is the first device that appears and the last network device
5664 * that disappears.
5665 */
5666 if (register_pernet_device(&loopback_net_ops))
5667 goto out;
5668
5669 if (register_pernet_device(&default_device_ops))
5670 goto out;
5671
Carlos R. Mafra962cf362008-05-15 11:15:37 -03005672 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5673 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005674
5675 hotcpu_notifier(dev_cpu_callback, 0);
5676 dst_init();
5677 dev_mcast_init();
5678 rc = 0;
5679out:
5680 return rc;
5681}
5682
5683subsys_initcall(net_dev_init);
5684
Krishna Kumare88721f2009-02-18 17:55:02 -08005685static int __init initialize_hashrnd(void)
5686{
5687 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5688 return 0;
5689}
5690
5691late_initcall_sync(initialize_hashrnd);
5692