blob: dcc68cc19da68690d341aa204969b4f5b98358c7 [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Carolyn Wyborny4297f992011-06-29 01:16:10 +00004 Copyright(c) 2007-2011 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
Jeff Kirsher876d2d62011-10-21 20:01:34 +000028#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
Auke Kok9d5c8242008-01-24 02:22:38 -080030#include <linux/module.h>
31#include <linux/types.h>
32#include <linux/init.h>
Jiri Pirkob2cb09b2011-07-21 03:27:27 +000033#include <linux/bitops.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080034#include <linux/vmalloc.h>
35#include <linux/pagemap.h>
36#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080037#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080039#include <net/checksum.h>
40#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000041#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080042#include <linux/mii.h>
43#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000044#include <linux/if.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080045#include <linux/if_vlan.h>
46#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070047#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080048#include <linux/delay.h>
49#include <linux/interrupt.h>
Alexander Duyck7d13a7d2011-08-26 07:44:32 +000050#include <linux/ip.h>
51#include <linux/tcp.h>
52#include <linux/sctp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080053#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080054#include <linux/aer.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040055#include <linux/prefetch.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070056#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070057#include <linux/dca.h>
58#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080059#include "igb.h"
60
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080061#define MAJ 3
Carolyn Wybornya28dc432011-10-07 07:00:27 +000062#define MIN 2
63#define BUILD 10
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080064#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
Carolyn Wyborny929dd042011-05-26 03:02:26 +000065__stringify(BUILD) "-k"
Auke Kok9d5c8242008-01-24 02:22:38 -080066char igb_driver_name[] = "igb";
67char igb_driver_version[] = DRV_VERSION;
68static const char igb_driver_string[] =
69 "Intel(R) Gigabit Ethernet Network Driver";
Carolyn Wyborny4c4b42c2011-02-17 09:02:30 +000070static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080071
Auke Kok9d5c8242008-01-24 02:22:38 -080072static const struct e1000_info *igb_info_tbl[] = {
73 [board_82575] = &e1000_82575_info,
74};
75
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000076static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +000077 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000081 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
Carolyn Wyborny6493d242011-01-14 05:33:46 +000083 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000084 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Joseph Gasparakis308fb392010-09-22 17:56:44 +000087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
Gasparakis, Joseph1b5dda32010-12-09 01:41:01 +000089 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070091 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000092 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000093 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070094 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000096 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +000097 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000098 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -080099 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
102 /* required last entry */
103 {0, }
104};
105
106MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
107
108void igb_reset(struct igb_adapter *);
109static int igb_setup_all_tx_resources(struct igb_adapter *);
110static int igb_setup_all_rx_resources(struct igb_adapter *);
111static void igb_free_all_tx_resources(struct igb_adapter *);
112static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +0000113static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800114static int igb_probe(struct pci_dev *, const struct pci_device_id *);
115static void __devexit igb_remove(struct pci_dev *pdev);
Anders Berggren673b8b72011-02-04 07:32:32 +0000116static void igb_init_hw_timer(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800117static int igb_sw_init(struct igb_adapter *);
118static int igb_open(struct net_device *);
119static int igb_close(struct net_device *);
120static void igb_configure_tx(struct igb_adapter *);
121static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800122static void igb_clean_all_tx_rings(struct igb_adapter *);
123static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700124static void igb_clean_tx_ring(struct igb_ring *);
125static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000126static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800127static void igb_update_phy_info(unsigned long);
128static void igb_watchdog(unsigned long);
129static void igb_watchdog_task(struct work_struct *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000130static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
Eric Dumazet12dcd862010-10-15 17:27:10 +0000131static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
132 struct rtnl_link_stats64 *stats);
Auke Kok9d5c8242008-01-24 02:22:38 -0800133static int igb_change_mtu(struct net_device *, int);
134static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000135static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800136static irqreturn_t igb_intr(int irq, void *);
137static irqreturn_t igb_intr_msi(int irq, void *);
138static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000139static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700140#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000141static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700142static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700143#endif /* CONFIG_IGB_DCA */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700144static int igb_poll(struct napi_struct *, int);
Alexander Duyck13fde972011-10-05 13:35:24 +0000145static bool igb_clean_tx_irq(struct igb_q_vector *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000146static bool igb_clean_rx_irq(struct igb_q_vector *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800147static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
148static void igb_tx_timeout(struct net_device *);
149static void igb_reset_task(struct work_struct *);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000150static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
Jiri Pirko8e586132011-12-08 19:52:37 -0500151static int igb_vlan_rx_add_vid(struct net_device *, u16);
152static int igb_vlan_rx_kill_vid(struct net_device *, u16);
Auke Kok9d5c8242008-01-24 02:22:38 -0800153static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000154static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800155static void igb_ping_all_vfs(struct igb_adapter *);
156static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800157static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000158static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800159static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000160static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
161static int igb_ndo_set_vf_vlan(struct net_device *netdev,
162 int vf, u16 vlan, u8 qos);
163static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
164static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
165 struct ifla_vf_info *ivi);
Lior Levy17dc5662011-02-08 02:28:46 +0000166static void igb_check_vf_rate_limit(struct igb_adapter *);
RongQing Li46a01692011-10-18 22:52:35 +0000167
168#ifdef CONFIG_PCI_IOV
Greg Rose0224d662011-10-14 02:57:14 +0000169static int igb_vf_configure(struct igb_adapter *adapter, int vf);
170static int igb_find_enabled_vfs(struct igb_adapter *adapter);
171static int igb_check_vf_assignment(struct igb_adapter *adapter);
RongQing Li46a01692011-10-18 22:52:35 +0000172#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800173
Auke Kok9d5c8242008-01-24 02:22:38 -0800174#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +0000175static int igb_suspend(struct pci_dev *, pm_message_t);
Auke Kok9d5c8242008-01-24 02:22:38 -0800176static int igb_resume(struct pci_dev *);
177#endif
178static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700179#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700180static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
181static struct notifier_block dca_notifier = {
182 .notifier_call = igb_notify_dca,
183 .next = NULL,
184 .priority = 0
185};
186#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800187#ifdef CONFIG_NET_POLL_CONTROLLER
188/* for netdump / net console */
189static void igb_netpoll(struct net_device *);
190#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800191#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000192static unsigned int max_vfs = 0;
193module_param(max_vfs, uint, 0);
194MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
195 "per physical function");
196#endif /* CONFIG_PCI_IOV */
197
Auke Kok9d5c8242008-01-24 02:22:38 -0800198static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
199 pci_channel_state_t);
200static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
201static void igb_io_resume(struct pci_dev *);
202
203static struct pci_error_handlers igb_err_handler = {
204 .error_detected = igb_io_error_detected,
205 .slot_reset = igb_io_slot_reset,
206 .resume = igb_io_resume,
207};
208
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +0000209static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
Auke Kok9d5c8242008-01-24 02:22:38 -0800210
211static struct pci_driver igb_driver = {
212 .name = igb_driver_name,
213 .id_table = igb_pci_tbl,
214 .probe = igb_probe,
215 .remove = __devexit_p(igb_remove),
216#ifdef CONFIG_PM
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300217 /* Power Management Hooks */
Auke Kok9d5c8242008-01-24 02:22:38 -0800218 .suspend = igb_suspend,
219 .resume = igb_resume,
220#endif
221 .shutdown = igb_shutdown,
222 .err_handler = &igb_err_handler
223};
224
225MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
226MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
227MODULE_LICENSE("GPL");
228MODULE_VERSION(DRV_VERSION);
229
Taku Izumic97ec422010-04-27 14:39:30 +0000230struct igb_reg_info {
231 u32 ofs;
232 char *name;
233};
234
235static const struct igb_reg_info igb_reg_info_tbl[] = {
236
237 /* General Registers */
238 {E1000_CTRL, "CTRL"},
239 {E1000_STATUS, "STATUS"},
240 {E1000_CTRL_EXT, "CTRL_EXT"},
241
242 /* Interrupt Registers */
243 {E1000_ICR, "ICR"},
244
245 /* RX Registers */
246 {E1000_RCTL, "RCTL"},
247 {E1000_RDLEN(0), "RDLEN"},
248 {E1000_RDH(0), "RDH"},
249 {E1000_RDT(0), "RDT"},
250 {E1000_RXDCTL(0), "RXDCTL"},
251 {E1000_RDBAL(0), "RDBAL"},
252 {E1000_RDBAH(0), "RDBAH"},
253
254 /* TX Registers */
255 {E1000_TCTL, "TCTL"},
256 {E1000_TDBAL(0), "TDBAL"},
257 {E1000_TDBAH(0), "TDBAH"},
258 {E1000_TDLEN(0), "TDLEN"},
259 {E1000_TDH(0), "TDH"},
260 {E1000_TDT(0), "TDT"},
261 {E1000_TXDCTL(0), "TXDCTL"},
262 {E1000_TDFH, "TDFH"},
263 {E1000_TDFT, "TDFT"},
264 {E1000_TDFHS, "TDFHS"},
265 {E1000_TDFPC, "TDFPC"},
266
267 /* List Terminator */
268 {}
269};
270
271/*
272 * igb_regdump - register printout routine
273 */
274static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
275{
276 int n = 0;
277 char rname[16];
278 u32 regs[8];
279
280 switch (reginfo->ofs) {
281 case E1000_RDLEN(0):
282 for (n = 0; n < 4; n++)
283 regs[n] = rd32(E1000_RDLEN(n));
284 break;
285 case E1000_RDH(0):
286 for (n = 0; n < 4; n++)
287 regs[n] = rd32(E1000_RDH(n));
288 break;
289 case E1000_RDT(0):
290 for (n = 0; n < 4; n++)
291 regs[n] = rd32(E1000_RDT(n));
292 break;
293 case E1000_RXDCTL(0):
294 for (n = 0; n < 4; n++)
295 regs[n] = rd32(E1000_RXDCTL(n));
296 break;
297 case E1000_RDBAL(0):
298 for (n = 0; n < 4; n++)
299 regs[n] = rd32(E1000_RDBAL(n));
300 break;
301 case E1000_RDBAH(0):
302 for (n = 0; n < 4; n++)
303 regs[n] = rd32(E1000_RDBAH(n));
304 break;
305 case E1000_TDBAL(0):
306 for (n = 0; n < 4; n++)
307 regs[n] = rd32(E1000_RDBAL(n));
308 break;
309 case E1000_TDBAH(0):
310 for (n = 0; n < 4; n++)
311 regs[n] = rd32(E1000_TDBAH(n));
312 break;
313 case E1000_TDLEN(0):
314 for (n = 0; n < 4; n++)
315 regs[n] = rd32(E1000_TDLEN(n));
316 break;
317 case E1000_TDH(0):
318 for (n = 0; n < 4; n++)
319 regs[n] = rd32(E1000_TDH(n));
320 break;
321 case E1000_TDT(0):
322 for (n = 0; n < 4; n++)
323 regs[n] = rd32(E1000_TDT(n));
324 break;
325 case E1000_TXDCTL(0):
326 for (n = 0; n < 4; n++)
327 regs[n] = rd32(E1000_TXDCTL(n));
328 break;
329 default:
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000330 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
Taku Izumic97ec422010-04-27 14:39:30 +0000331 return;
332 }
333
334 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000335 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
336 regs[2], regs[3]);
Taku Izumic97ec422010-04-27 14:39:30 +0000337}
338
339/*
340 * igb_dump - Print registers, tx-rings and rx-rings
341 */
342static void igb_dump(struct igb_adapter *adapter)
343{
344 struct net_device *netdev = adapter->netdev;
345 struct e1000_hw *hw = &adapter->hw;
346 struct igb_reg_info *reginfo;
Taku Izumic97ec422010-04-27 14:39:30 +0000347 struct igb_ring *tx_ring;
348 union e1000_adv_tx_desc *tx_desc;
349 struct my_u0 { u64 a; u64 b; } *u0;
Taku Izumic97ec422010-04-27 14:39:30 +0000350 struct igb_ring *rx_ring;
351 union e1000_adv_rx_desc *rx_desc;
352 u32 staterr;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +0000353 u16 i, n;
Taku Izumic97ec422010-04-27 14:39:30 +0000354
355 if (!netif_msg_hw(adapter))
356 return;
357
358 /* Print netdevice Info */
359 if (netdev) {
360 dev_info(&adapter->pdev->dev, "Net device Info\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000361 pr_info("Device Name state trans_start "
362 "last_rx\n");
363 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
364 netdev->state, netdev->trans_start, netdev->last_rx);
Taku Izumic97ec422010-04-27 14:39:30 +0000365 }
366
367 /* Print Registers */
368 dev_info(&adapter->pdev->dev, "Register Dump\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000369 pr_info(" Register Name Value\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000370 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
371 reginfo->name; reginfo++) {
372 igb_regdump(hw, reginfo);
373 }
374
375 /* Print TX Ring Summary */
376 if (!netdev || !netif_running(netdev))
377 goto exit;
378
379 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000380 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000381 for (n = 0; n < adapter->num_tx_queues; n++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000382 struct igb_tx_buffer *buffer_info;
Taku Izumic97ec422010-04-27 14:39:30 +0000383 tx_ring = adapter->tx_ring[n];
Alexander Duyck06034642011-08-26 07:44:22 +0000384 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000385 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
386 n, tx_ring->next_to_use, tx_ring->next_to_clean,
387 (u64)buffer_info->dma,
388 buffer_info->length,
389 buffer_info->next_to_watch,
390 (u64)buffer_info->time_stamp);
Taku Izumic97ec422010-04-27 14:39:30 +0000391 }
392
393 /* Print TX Rings */
394 if (!netif_msg_tx_done(adapter))
395 goto rx_ring_summary;
396
397 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
398
399 /* Transmit Descriptor Formats
400 *
401 * Advanced Transmit Descriptor
402 * +--------------------------------------------------------------+
403 * 0 | Buffer Address [63:0] |
404 * +--------------------------------------------------------------+
405 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
406 * +--------------------------------------------------------------+
407 * 63 46 45 40 39 38 36 35 32 31 24 15 0
408 */
409
410 for (n = 0; n < adapter->num_tx_queues; n++) {
411 tx_ring = adapter->tx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000412 pr_info("------------------------------------\n");
413 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
414 pr_info("------------------------------------\n");
415 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
416 "[bi->dma ] leng ntw timestamp "
417 "bi->skb\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000418
419 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000420 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000421 struct igb_tx_buffer *buffer_info;
Alexander Duyck601369062011-08-26 07:44:05 +0000422 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +0000423 buffer_info = &tx_ring->tx_buffer_info[i];
Taku Izumic97ec422010-04-27 14:39:30 +0000424 u0 = (struct my_u0 *)tx_desc;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000425 if (i == tx_ring->next_to_use &&
426 i == tx_ring->next_to_clean)
427 next_desc = " NTC/U";
428 else if (i == tx_ring->next_to_use)
429 next_desc = " NTU";
430 else if (i == tx_ring->next_to_clean)
431 next_desc = " NTC";
432 else
433 next_desc = "";
434
435 pr_info("T [0x%03X] %016llX %016llX %016llX"
436 " %04X %p %016llX %p%s\n", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000437 le64_to_cpu(u0->a),
438 le64_to_cpu(u0->b),
439 (u64)buffer_info->dma,
440 buffer_info->length,
441 buffer_info->next_to_watch,
442 (u64)buffer_info->time_stamp,
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000443 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000444
445 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
446 print_hex_dump(KERN_INFO, "",
447 DUMP_PREFIX_ADDRESS,
448 16, 1, phys_to_virt(buffer_info->dma),
449 buffer_info->length, true);
450 }
451 }
452
453 /* Print RX Rings Summary */
454rx_ring_summary:
455 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000456 pr_info("Queue [NTU] [NTC]\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000457 for (n = 0; n < adapter->num_rx_queues; n++) {
458 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000459 pr_info(" %5d %5X %5X\n",
460 n, rx_ring->next_to_use, rx_ring->next_to_clean);
Taku Izumic97ec422010-04-27 14:39:30 +0000461 }
462
463 /* Print RX Rings */
464 if (!netif_msg_rx_status(adapter))
465 goto exit;
466
467 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
468
469 /* Advanced Receive Descriptor (Read) Format
470 * 63 1 0
471 * +-----------------------------------------------------+
472 * 0 | Packet Buffer Address [63:1] |A0/NSE|
473 * +----------------------------------------------+------+
474 * 8 | Header Buffer Address [63:1] | DD |
475 * +-----------------------------------------------------+
476 *
477 *
478 * Advanced Receive Descriptor (Write-Back) Format
479 *
480 * 63 48 47 32 31 30 21 20 17 16 4 3 0
481 * +------------------------------------------------------+
482 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
483 * | Checksum Ident | | | | Type | Type |
484 * +------------------------------------------------------+
485 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
486 * +------------------------------------------------------+
487 * 63 48 47 32 31 20 19 0
488 */
489
490 for (n = 0; n < adapter->num_rx_queues; n++) {
491 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000492 pr_info("------------------------------------\n");
493 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
494 pr_info("------------------------------------\n");
495 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
496 "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
497 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
498 "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000499
500 for (i = 0; i < rx_ring->count; i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000501 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000502 struct igb_rx_buffer *buffer_info;
503 buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +0000504 rx_desc = IGB_RX_DESC(rx_ring, i);
Taku Izumic97ec422010-04-27 14:39:30 +0000505 u0 = (struct my_u0 *)rx_desc;
506 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000507
508 if (i == rx_ring->next_to_use)
509 next_desc = " NTU";
510 else if (i == rx_ring->next_to_clean)
511 next_desc = " NTC";
512 else
513 next_desc = "";
514
Taku Izumic97ec422010-04-27 14:39:30 +0000515 if (staterr & E1000_RXD_STAT_DD) {
516 /* Descriptor Done */
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000517 pr_info("%s[0x%03X] %016llX %016llX -------"
518 "--------- %p%s\n", "RWB", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000519 le64_to_cpu(u0->a),
520 le64_to_cpu(u0->b),
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000521 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000522 } else {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000523 pr_info("%s[0x%03X] %016llX %016llX %016llX"
524 " %p%s\n", "R ", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000525 le64_to_cpu(u0->a),
526 le64_to_cpu(u0->b),
527 (u64)buffer_info->dma,
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000528 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000529
530 if (netif_msg_pktdata(adapter)) {
531 print_hex_dump(KERN_INFO, "",
532 DUMP_PREFIX_ADDRESS,
533 16, 1,
534 phys_to_virt(buffer_info->dma),
Alexander Duyck44390ca2011-08-26 07:43:38 +0000535 IGB_RX_HDR_LEN, true);
536 print_hex_dump(KERN_INFO, "",
537 DUMP_PREFIX_ADDRESS,
538 16, 1,
539 phys_to_virt(
540 buffer_info->page_dma +
541 buffer_info->page_offset),
542 PAGE_SIZE/2, true);
Taku Izumic97ec422010-04-27 14:39:30 +0000543 }
544 }
Taku Izumic97ec422010-04-27 14:39:30 +0000545 }
546 }
547
548exit:
549 return;
550}
551
552
Patrick Ohly38c845c2009-02-12 05:03:41 +0000553/**
Patrick Ohly38c845c2009-02-12 05:03:41 +0000554 * igb_read_clock - read raw cycle counter (to be used by time counter)
555 */
556static cycle_t igb_read_clock(const struct cyclecounter *tc)
557{
558 struct igb_adapter *adapter =
559 container_of(tc, struct igb_adapter, cycles);
560 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000561 u64 stamp = 0;
562 int shift = 0;
Patrick Ohly38c845c2009-02-12 05:03:41 +0000563
Alexander Duyck55cac242009-11-19 12:42:21 +0000564 /*
565 * The timestamp latches on lowest register read. For the 82580
566 * the lowest register is SYSTIMR instead of SYSTIML. However we never
567 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
568 */
Alexander Duyck06218a82011-08-26 07:46:55 +0000569 if (hw->mac.type >= e1000_82580) {
Alexander Duyck55cac242009-11-19 12:42:21 +0000570 stamp = rd32(E1000_SYSTIMR) >> 8;
571 shift = IGB_82580_TSYNC_SHIFT;
572 }
573
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000574 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
575 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
Patrick Ohly38c845c2009-02-12 05:03:41 +0000576 return stamp;
577}
578
Auke Kok9d5c8242008-01-24 02:22:38 -0800579/**
Alexander Duyckc0410762010-03-25 13:10:08 +0000580 * igb_get_hw_dev - return device
Auke Kok9d5c8242008-01-24 02:22:38 -0800581 * used by hardware layer to print debugging information
582 **/
Alexander Duyckc0410762010-03-25 13:10:08 +0000583struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
Auke Kok9d5c8242008-01-24 02:22:38 -0800584{
585 struct igb_adapter *adapter = hw->back;
Alexander Duyckc0410762010-03-25 13:10:08 +0000586 return adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800587}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000588
589/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800590 * igb_init_module - Driver Registration Routine
591 *
592 * igb_init_module is the first routine called when the driver is
593 * loaded. All it does is register with the PCI subsystem.
594 **/
595static int __init igb_init_module(void)
596{
597 int ret;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000598 pr_info("%s - version %s\n",
Auke Kok9d5c8242008-01-24 02:22:38 -0800599 igb_driver_string, igb_driver_version);
600
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000601 pr_info("%s\n", igb_copyright);
Auke Kok9d5c8242008-01-24 02:22:38 -0800602
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700603#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700604 dca_register_notify(&dca_notifier);
605#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800606 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800607 return ret;
608}
609
610module_init(igb_init_module);
611
612/**
613 * igb_exit_module - Driver Exit Cleanup Routine
614 *
615 * igb_exit_module is called just before the driver is removed
616 * from memory.
617 **/
618static void __exit igb_exit_module(void)
619{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700620#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700621 dca_unregister_notify(&dca_notifier);
622#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800623 pci_unregister_driver(&igb_driver);
624}
625
626module_exit(igb_exit_module);
627
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800628#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
629/**
630 * igb_cache_ring_register - Descriptor ring to register mapping
631 * @adapter: board private structure to initialize
632 *
633 * Once we know the feature-set enabled for the device, we'll cache
634 * the register offset the descriptor ring is assigned to.
635 **/
636static void igb_cache_ring_register(struct igb_adapter *adapter)
637{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000638 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000639 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800640
641 switch (adapter->hw.mac.type) {
642 case e1000_82576:
643 /* The queues are allocated for virtualization such that VF 0
644 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
645 * In order to avoid collision we start at the first free queue
646 * and continue consuming queues in the same sequence
647 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000648 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000649 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000650 adapter->rx_ring[i]->reg_idx = rbase_offset +
651 Q_IDX_82576(i);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000652 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800653 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000654 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000655 case e1000_i350:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800656 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000657 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000658 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000659 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000660 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800661 break;
662 }
663}
664
Alexander Duyck047e0032009-10-27 15:49:27 +0000665static void igb_free_queues(struct igb_adapter *adapter)
666{
Alexander Duyck3025a442010-02-17 01:02:39 +0000667 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000668
Alexander Duyck3025a442010-02-17 01:02:39 +0000669 for (i = 0; i < adapter->num_tx_queues; i++) {
670 kfree(adapter->tx_ring[i]);
671 adapter->tx_ring[i] = NULL;
672 }
673 for (i = 0; i < adapter->num_rx_queues; i++) {
674 kfree(adapter->rx_ring[i]);
675 adapter->rx_ring[i] = NULL;
676 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000677 adapter->num_rx_queues = 0;
678 adapter->num_tx_queues = 0;
679}
680
Auke Kok9d5c8242008-01-24 02:22:38 -0800681/**
682 * igb_alloc_queues - Allocate memory for all rings
683 * @adapter: board private structure to initialize
684 *
685 * We allocate one ring per queue at run-time since we don't know the
686 * number of queues at compile-time.
687 **/
688static int igb_alloc_queues(struct igb_adapter *adapter)
689{
Alexander Duyck3025a442010-02-17 01:02:39 +0000690 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800691 int i;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000692 int orig_node = adapter->node;
Auke Kok9d5c8242008-01-24 02:22:38 -0800693
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700694 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000695 if (orig_node == -1) {
696 int cur_node = next_online_node(adapter->node);
697 if (cur_node == MAX_NUMNODES)
698 cur_node = first_online_node;
699 adapter->node = cur_node;
700 }
701 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
702 adapter->node);
703 if (!ring)
704 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000705 if (!ring)
706 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800707 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700708 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000709 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000710 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000711 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000712 /* For 82575, context index must be unique per ring. */
713 if (adapter->hw.mac.type == e1000_82575)
Alexander Duyck866cff02011-08-26 07:45:36 +0000714 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
Alexander Duyck3025a442010-02-17 01:02:39 +0000715 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700716 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000717 /* Restore the adapter's original node */
718 adapter->node = orig_node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000719
Auke Kok9d5c8242008-01-24 02:22:38 -0800720 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000721 if (orig_node == -1) {
722 int cur_node = next_online_node(adapter->node);
723 if (cur_node == MAX_NUMNODES)
724 cur_node = first_online_node;
725 adapter->node = cur_node;
726 }
727 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
728 adapter->node);
729 if (!ring)
730 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000731 if (!ring)
732 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800733 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700734 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000735 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000736 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000737 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000738 /* set flag indicating ring supports SCTP checksum offload */
739 if (adapter->hw.mac.type >= e1000_82576)
Alexander Duyck866cff02011-08-26 07:45:36 +0000740 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
Alexander Duyck8be10e92011-08-26 07:47:11 +0000741
742 /* On i350, loopback VLAN packets have the tag byte-swapped. */
743 if (adapter->hw.mac.type == e1000_i350)
744 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
745
Alexander Duyck3025a442010-02-17 01:02:39 +0000746 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800747 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000748 /* Restore the adapter's original node */
749 adapter->node = orig_node;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800750
751 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000752
Auke Kok9d5c8242008-01-24 02:22:38 -0800753 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800754
Alexander Duyck047e0032009-10-27 15:49:27 +0000755err:
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000756 /* Restore the adapter's original node */
757 adapter->node = orig_node;
Alexander Duyck047e0032009-10-27 15:49:27 +0000758 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700759
Alexander Duyck047e0032009-10-27 15:49:27 +0000760 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700761}
762
Alexander Duyck4be000c2011-08-26 07:45:52 +0000763/**
764 * igb_write_ivar - configure ivar for given MSI-X vector
765 * @hw: pointer to the HW structure
766 * @msix_vector: vector number we are allocating to a given ring
767 * @index: row index of IVAR register to write within IVAR table
768 * @offset: column offset of in IVAR, should be multiple of 8
769 *
770 * This function is intended to handle the writing of the IVAR register
771 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
772 * each containing an cause allocation for an Rx and Tx ring, and a
773 * variable number of rows depending on the number of queues supported.
774 **/
775static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
776 int index, int offset)
777{
778 u32 ivar = array_rd32(E1000_IVAR0, index);
779
780 /* clear any bits that are currently set */
781 ivar &= ~((u32)0xFF << offset);
782
783 /* write vector and valid bit */
784 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
785
786 array_wr32(E1000_IVAR0, index, ivar);
787}
788
Auke Kok9d5c8242008-01-24 02:22:38 -0800789#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000790static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800791{
Alexander Duyck047e0032009-10-27 15:49:27 +0000792 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800793 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck047e0032009-10-27 15:49:27 +0000794 int rx_queue = IGB_N0_QUEUE;
795 int tx_queue = IGB_N0_QUEUE;
Alexander Duyck4be000c2011-08-26 07:45:52 +0000796 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000797
Alexander Duyck0ba82992011-08-26 07:45:47 +0000798 if (q_vector->rx.ring)
799 rx_queue = q_vector->rx.ring->reg_idx;
800 if (q_vector->tx.ring)
801 tx_queue = q_vector->tx.ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700802
803 switch (hw->mac.type) {
804 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800805 /* The 82575 assigns vectors using a bitmask, which matches the
806 bitmask for the EICR/EIMS/EIMC registers. To assign one
807 or more queues to a vector, we write the appropriate bits
808 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000809 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800810 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000811 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800812 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000813 if (!adapter->msix_entries && msix_vector == 0)
814 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800815 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000816 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700817 break;
818 case e1000_82576:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000819 /*
820 * 82576 uses a table that essentially consists of 2 columns
821 * with 8 rows. The ordering is column-major so we use the
822 * lower 3 bits as the row index, and the 4th bit as the
823 * column offset.
824 */
825 if (rx_queue > IGB_N0_QUEUE)
826 igb_write_ivar(hw, msix_vector,
827 rx_queue & 0x7,
828 (rx_queue & 0x8) << 1);
829 if (tx_queue > IGB_N0_QUEUE)
830 igb_write_ivar(hw, msix_vector,
831 tx_queue & 0x7,
832 ((tx_queue & 0x8) << 1) + 8);
Alexander Duyck047e0032009-10-27 15:49:27 +0000833 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700834 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000835 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000836 case e1000_i350:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000837 /*
838 * On 82580 and newer adapters the scheme is similar to 82576
839 * however instead of ordering column-major we have things
840 * ordered row-major. So we traverse the table by using
841 * bit 0 as the column offset, and the remaining bits as the
842 * row index.
843 */
844 if (rx_queue > IGB_N0_QUEUE)
845 igb_write_ivar(hw, msix_vector,
846 rx_queue >> 1,
847 (rx_queue & 0x1) << 4);
848 if (tx_queue > IGB_N0_QUEUE)
849 igb_write_ivar(hw, msix_vector,
850 tx_queue >> 1,
851 ((tx_queue & 0x1) << 4) + 8);
Alexander Duyck55cac242009-11-19 12:42:21 +0000852 q_vector->eims_value = 1 << msix_vector;
853 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700854 default:
855 BUG();
856 break;
857 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000858
859 /* add q_vector eims value to global eims_enable_mask */
860 adapter->eims_enable_mask |= q_vector->eims_value;
861
862 /* configure q_vector to set itr on first interrupt */
863 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800864}
865
866/**
867 * igb_configure_msix - Configure MSI-X hardware
868 *
869 * igb_configure_msix sets up the hardware to properly
870 * generate MSI-X interrupts.
871 **/
872static void igb_configure_msix(struct igb_adapter *adapter)
873{
874 u32 tmp;
875 int i, vector = 0;
876 struct e1000_hw *hw = &adapter->hw;
877
878 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800879
880 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700881 switch (hw->mac.type) {
882 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800883 tmp = rd32(E1000_CTRL_EXT);
884 /* enable MSI-X PBA support*/
885 tmp |= E1000_CTRL_EXT_PBA_CLR;
886
887 /* Auto-Mask interrupts upon ICR read. */
888 tmp |= E1000_CTRL_EXT_EIAME;
889 tmp |= E1000_CTRL_EXT_IRCA;
890
891 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000892
893 /* enable msix_other interrupt */
894 array_wr32(E1000_MSIXBM(0), vector++,
895 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700896 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800897
Alexander Duyck2d064c02008-07-08 15:10:12 -0700898 break;
899
900 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000901 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000902 case e1000_i350:
Alexander Duyck047e0032009-10-27 15:49:27 +0000903 /* Turn on MSI-X capability first, or our settings
904 * won't stick. And it will take days to debug. */
905 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
906 E1000_GPIE_PBA | E1000_GPIE_EIAME |
907 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700908
Alexander Duyck047e0032009-10-27 15:49:27 +0000909 /* enable msix_other interrupt */
910 adapter->eims_other = 1 << vector;
911 tmp = (vector++ | E1000_IVAR_VALID) << 8;
912
913 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700914 break;
915 default:
916 /* do nothing, since nothing else supports MSI-X */
917 break;
918 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000919
920 adapter->eims_enable_mask |= adapter->eims_other;
921
Alexander Duyck26b39272010-02-17 01:00:41 +0000922 for (i = 0; i < adapter->num_q_vectors; i++)
923 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000924
Auke Kok9d5c8242008-01-24 02:22:38 -0800925 wrfl();
926}
927
928/**
929 * igb_request_msix - Initialize MSI-X interrupts
930 *
931 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
932 * kernel.
933 **/
934static int igb_request_msix(struct igb_adapter *adapter)
935{
936 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000937 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800938 int i, err = 0, vector = 0;
939
Auke Kok9d5c8242008-01-24 02:22:38 -0800940 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800941 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800942 if (err)
943 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000944 vector++;
945
946 for (i = 0; i < adapter->num_q_vectors; i++) {
947 struct igb_q_vector *q_vector = adapter->q_vector[i];
948
949 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
950
Alexander Duyck0ba82992011-08-26 07:45:47 +0000951 if (q_vector->rx.ring && q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000952 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000953 q_vector->rx.ring->queue_index);
954 else if (q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000955 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000956 q_vector->tx.ring->queue_index);
957 else if (q_vector->rx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000958 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000959 q_vector->rx.ring->queue_index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000960 else
961 sprintf(q_vector->name, "%s-unused", netdev->name);
962
963 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800964 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000965 q_vector);
966 if (err)
967 goto out;
968 vector++;
969 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800970
Auke Kok9d5c8242008-01-24 02:22:38 -0800971 igb_configure_msix(adapter);
972 return 0;
973out:
974 return err;
975}
976
977static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
978{
979 if (adapter->msix_entries) {
980 pci_disable_msix(adapter->pdev);
981 kfree(adapter->msix_entries);
982 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000983 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800984 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000985 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800986}
987
Alexander Duyck047e0032009-10-27 15:49:27 +0000988/**
989 * igb_free_q_vectors - Free memory allocated for interrupt vectors
990 * @adapter: board private structure to initialize
991 *
992 * This function frees the memory allocated to the q_vectors. In addition if
993 * NAPI is enabled it will delete any references to the NAPI struct prior
994 * to freeing the q_vector.
995 **/
996static void igb_free_q_vectors(struct igb_adapter *adapter)
997{
998 int v_idx;
999
1000 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1001 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1002 adapter->q_vector[v_idx] = NULL;
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001003 if (!q_vector)
1004 continue;
Alexander Duyck047e0032009-10-27 15:49:27 +00001005 netif_napi_del(&q_vector->napi);
1006 kfree(q_vector);
1007 }
1008 adapter->num_q_vectors = 0;
1009}
1010
1011/**
1012 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1013 *
1014 * This function resets the device so that it has 0 rx queues, tx queues, and
1015 * MSI-X interrupts allocated.
1016 */
1017static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1018{
1019 igb_free_queues(adapter);
1020 igb_free_q_vectors(adapter);
1021 igb_reset_interrupt_capability(adapter);
1022}
Auke Kok9d5c8242008-01-24 02:22:38 -08001023
1024/**
1025 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1026 *
1027 * Attempt to configure interrupts using the best available
1028 * capabilities of the hardware and kernel.
1029 **/
Ben Hutchings21adef32010-09-27 08:28:39 +00001030static int igb_set_interrupt_capability(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08001031{
1032 int err;
1033 int numvecs, i;
1034
Alexander Duyck83b71802009-02-06 23:15:45 +00001035 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +00001036 adapter->num_rx_queues = adapter->rss_queues;
Greg Rose5fa85172010-07-01 13:38:16 +00001037 if (adapter->vfs_allocated_count)
1038 adapter->num_tx_queues = 1;
1039 else
1040 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +00001041
Alexander Duyck047e0032009-10-27 15:49:27 +00001042 /* start with one vector for every rx queue */
1043 numvecs = adapter->num_rx_queues;
1044
Daniel Mack3ad2f3f2010-02-03 08:01:28 +08001045 /* if tx handler is separate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +00001046 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1047 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +00001048
1049 /* store the number of vectors reserved for queues */
1050 adapter->num_q_vectors = numvecs;
1051
1052 /* add 1 vector for link status interrupts */
1053 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -08001054 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1055 GFP_KERNEL);
1056 if (!adapter->msix_entries)
1057 goto msi_only;
1058
1059 for (i = 0; i < numvecs; i++)
1060 adapter->msix_entries[i].entry = i;
1061
1062 err = pci_enable_msix(adapter->pdev,
1063 adapter->msix_entries,
1064 numvecs);
1065 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -07001066 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -08001067
1068 igb_reset_interrupt_capability(adapter);
1069
1070 /* If we can't do MSI-X, try MSI */
1071msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001072#ifdef CONFIG_PCI_IOV
1073 /* disable SR-IOV for non MSI-X configurations */
1074 if (adapter->vf_data) {
1075 struct e1000_hw *hw = &adapter->hw;
1076 /* disable iov and allow time for transactions to clear */
1077 pci_disable_sriov(adapter->pdev);
1078 msleep(500);
1079
1080 kfree(adapter->vf_data);
1081 adapter->vf_data = NULL;
1082 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00001083 wrfl();
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001084 msleep(100);
1085 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1086 }
1087#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001088 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +00001089 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001090 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -08001091 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001092 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001093 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001094 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001095 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -07001096out:
Ben Hutchings21adef32010-09-27 08:28:39 +00001097 /* Notify the stack of the (possibly) reduced queue counts. */
1098 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1099 return netif_set_real_num_rx_queues(adapter->netdev,
1100 adapter->num_rx_queues);
Auke Kok9d5c8242008-01-24 02:22:38 -08001101}
1102
1103/**
Alexander Duyck047e0032009-10-27 15:49:27 +00001104 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1105 * @adapter: board private structure to initialize
1106 *
1107 * We allocate one q_vector per queue interrupt. If allocation fails we
1108 * return -ENOMEM.
1109 **/
1110static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1111{
1112 struct igb_q_vector *q_vector;
1113 struct e1000_hw *hw = &adapter->hw;
1114 int v_idx;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001115 int orig_node = adapter->node;
Alexander Duyck047e0032009-10-27 15:49:27 +00001116
1117 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001118 if ((adapter->num_q_vectors == (adapter->num_rx_queues +
1119 adapter->num_tx_queues)) &&
1120 (adapter->num_rx_queues == v_idx))
1121 adapter->node = orig_node;
1122 if (orig_node == -1) {
1123 int cur_node = next_online_node(adapter->node);
1124 if (cur_node == MAX_NUMNODES)
1125 cur_node = first_online_node;
1126 adapter->node = cur_node;
1127 }
1128 q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
1129 adapter->node);
1130 if (!q_vector)
1131 q_vector = kzalloc(sizeof(struct igb_q_vector),
1132 GFP_KERNEL);
Alexander Duyck047e0032009-10-27 15:49:27 +00001133 if (!q_vector)
1134 goto err_out;
1135 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00001136 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1137 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001138 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1139 adapter->q_vector[v_idx] = q_vector;
1140 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001141 /* Restore the adapter's original node */
1142 adapter->node = orig_node;
1143
Alexander Duyck047e0032009-10-27 15:49:27 +00001144 return 0;
1145
1146err_out:
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001147 /* Restore the adapter's original node */
1148 adapter->node = orig_node;
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001149 igb_free_q_vectors(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001150 return -ENOMEM;
1151}
1152
1153static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1154 int ring_idx, int v_idx)
1155{
Alexander Duyck3025a442010-02-17 01:02:39 +00001156 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001157
Alexander Duyck0ba82992011-08-26 07:45:47 +00001158 q_vector->rx.ring = adapter->rx_ring[ring_idx];
1159 q_vector->rx.ring->q_vector = q_vector;
1160 q_vector->rx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001161 q_vector->itr_val = adapter->rx_itr_setting;
1162 if (q_vector->itr_val && q_vector->itr_val <= 3)
1163 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001164}
1165
1166static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1167 int ring_idx, int v_idx)
1168{
Alexander Duyck3025a442010-02-17 01:02:39 +00001169 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001170
Alexander Duyck0ba82992011-08-26 07:45:47 +00001171 q_vector->tx.ring = adapter->tx_ring[ring_idx];
1172 q_vector->tx.ring->q_vector = q_vector;
1173 q_vector->tx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001174 q_vector->itr_val = adapter->tx_itr_setting;
Alexander Duyck0ba82992011-08-26 07:45:47 +00001175 q_vector->tx.work_limit = adapter->tx_work_limit;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001176 if (q_vector->itr_val && q_vector->itr_val <= 3)
1177 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001178}
1179
1180/**
1181 * igb_map_ring_to_vector - maps allocated queues to vectors
1182 *
1183 * This function maps the recently allocated queues to vectors.
1184 **/
1185static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1186{
1187 int i;
1188 int v_idx = 0;
1189
1190 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1191 (adapter->num_q_vectors < adapter->num_tx_queues))
1192 return -ENOMEM;
1193
1194 if (adapter->num_q_vectors >=
1195 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1196 for (i = 0; i < adapter->num_rx_queues; i++)
1197 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1198 for (i = 0; i < adapter->num_tx_queues; i++)
1199 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1200 } else {
1201 for (i = 0; i < adapter->num_rx_queues; i++) {
1202 if (i < adapter->num_tx_queues)
1203 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1204 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1205 }
1206 for (; i < adapter->num_tx_queues; i++)
1207 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1208 }
1209 return 0;
1210}
1211
1212/**
1213 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1214 *
1215 * This function initializes the interrupts and allocates all of the queues.
1216 **/
1217static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1218{
1219 struct pci_dev *pdev = adapter->pdev;
1220 int err;
1221
Ben Hutchings21adef32010-09-27 08:28:39 +00001222 err = igb_set_interrupt_capability(adapter);
1223 if (err)
1224 return err;
Alexander Duyck047e0032009-10-27 15:49:27 +00001225
1226 err = igb_alloc_q_vectors(adapter);
1227 if (err) {
1228 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1229 goto err_alloc_q_vectors;
1230 }
1231
1232 err = igb_alloc_queues(adapter);
1233 if (err) {
1234 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1235 goto err_alloc_queues;
1236 }
1237
1238 err = igb_map_ring_to_vector(adapter);
1239 if (err) {
1240 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1241 goto err_map_queues;
1242 }
1243
1244
1245 return 0;
1246err_map_queues:
1247 igb_free_queues(adapter);
1248err_alloc_queues:
1249 igb_free_q_vectors(adapter);
1250err_alloc_q_vectors:
1251 igb_reset_interrupt_capability(adapter);
1252 return err;
1253}
1254
1255/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001256 * igb_request_irq - initialize interrupts
1257 *
1258 * Attempts to configure interrupts using the best available
1259 * capabilities of the hardware and kernel.
1260 **/
1261static int igb_request_irq(struct igb_adapter *adapter)
1262{
1263 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +00001264 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001265 int err = 0;
1266
1267 if (adapter->msix_entries) {
1268 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001269 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001270 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08001271 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +00001272 igb_clear_interrupt_scheme(adapter);
Alexander Duyckc74d5882011-08-26 07:46:45 +00001273 if (!pci_enable_msi(pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001274 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001275 igb_free_all_tx_resources(adapter);
1276 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001277 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001278 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001279 adapter->num_q_vectors = 1;
1280 err = igb_alloc_q_vectors(adapter);
1281 if (err) {
1282 dev_err(&pdev->dev,
1283 "Unable to allocate memory for vectors\n");
1284 goto request_done;
1285 }
1286 err = igb_alloc_queues(adapter);
1287 if (err) {
1288 dev_err(&pdev->dev,
1289 "Unable to allocate memory for queues\n");
1290 igb_free_q_vectors(adapter);
1291 goto request_done;
1292 }
1293 igb_setup_all_tx_resources(adapter);
1294 igb_setup_all_rx_resources(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001295 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001296
Alexander Duyckc74d5882011-08-26 07:46:45 +00001297 igb_assign_vector(adapter->q_vector[0], 0);
1298
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001299 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Alexander Duyckc74d5882011-08-26 07:46:45 +00001300 err = request_irq(pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +00001301 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001302 if (!err)
1303 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +00001304
Auke Kok9d5c8242008-01-24 02:22:38 -08001305 /* fall back to legacy interrupts */
1306 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001307 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001308 }
1309
Alexander Duyckc74d5882011-08-26 07:46:45 +00001310 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +00001311 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001312
Andy Gospodarek6cb5e572008-02-15 14:05:25 -08001313 if (err)
Alexander Duyckc74d5882011-08-26 07:46:45 +00001314 dev_err(&pdev->dev, "Error %d getting interrupt\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001315 err);
Auke Kok9d5c8242008-01-24 02:22:38 -08001316
1317request_done:
1318 return err;
1319}
1320
1321static void igb_free_irq(struct igb_adapter *adapter)
1322{
Auke Kok9d5c8242008-01-24 02:22:38 -08001323 if (adapter->msix_entries) {
1324 int vector = 0, i;
1325
Alexander Duyck047e0032009-10-27 15:49:27 +00001326 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001327
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001328 for (i = 0; i < adapter->num_q_vectors; i++)
Alexander Duyck047e0032009-10-27 15:49:27 +00001329 free_irq(adapter->msix_entries[vector++].vector,
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001330 adapter->q_vector[i]);
Alexander Duyck047e0032009-10-27 15:49:27 +00001331 } else {
1332 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001333 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001334}
1335
1336/**
1337 * igb_irq_disable - Mask off interrupt generation on the NIC
1338 * @adapter: board private structure
1339 **/
1340static void igb_irq_disable(struct igb_adapter *adapter)
1341{
1342 struct e1000_hw *hw = &adapter->hw;
1343
Alexander Duyck25568a52009-10-27 23:49:59 +00001344 /*
1345 * we need to be careful when disabling interrupts. The VFs are also
1346 * mapped into these registers and so clearing the bits can cause
1347 * issues on the VF drivers so we only need to clear what we set
1348 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001349 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001350 u32 regval = rd32(E1000_EIAM);
1351 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1352 wr32(E1000_EIMC, adapter->eims_enable_mask);
1353 regval = rd32(E1000_EIAC);
1354 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001355 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001356
1357 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001358 wr32(E1000_IMC, ~0);
1359 wrfl();
Emil Tantilov81a61852010-08-02 14:40:52 +00001360 if (adapter->msix_entries) {
1361 int i;
1362 for (i = 0; i < adapter->num_q_vectors; i++)
1363 synchronize_irq(adapter->msix_entries[i].vector);
1364 } else {
1365 synchronize_irq(adapter->pdev->irq);
1366 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001367}
1368
1369/**
1370 * igb_irq_enable - Enable default interrupt generation settings
1371 * @adapter: board private structure
1372 **/
1373static void igb_irq_enable(struct igb_adapter *adapter)
1374{
1375 struct e1000_hw *hw = &adapter->hw;
1376
1377 if (adapter->msix_entries) {
Alexander Duyck06218a82011-08-26 07:46:55 +00001378 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001379 u32 regval = rd32(E1000_EIAC);
1380 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1381 regval = rd32(E1000_EIAM);
1382 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001383 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001384 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001385 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001386 ims |= E1000_IMS_VMMB;
1387 }
1388 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001389 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001390 wr32(E1000_IMS, IMS_ENABLE_MASK |
1391 E1000_IMS_DRSTA);
1392 wr32(E1000_IAM, IMS_ENABLE_MASK |
1393 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001394 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001395}
1396
1397static void igb_update_mng_vlan(struct igb_adapter *adapter)
1398{
Alexander Duyck51466232009-10-27 23:47:35 +00001399 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001400 u16 vid = adapter->hw.mng_cookie.vlan_id;
1401 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001402
Alexander Duyck51466232009-10-27 23:47:35 +00001403 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1404 /* add VID to filter table */
1405 igb_vfta_set(hw, vid, true);
1406 adapter->mng_vlan_id = vid;
1407 } else {
1408 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1409 }
1410
1411 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1412 (vid != old_vid) &&
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001413 !test_bit(old_vid, adapter->active_vlans)) {
Alexander Duyck51466232009-10-27 23:47:35 +00001414 /* remove VID from filter table */
1415 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001416 }
1417}
1418
1419/**
1420 * igb_release_hw_control - release control of the h/w to f/w
1421 * @adapter: address of board private structure
1422 *
1423 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1424 * For ASF and Pass Through versions of f/w this means that the
1425 * driver is no longer loaded.
1426 *
1427 **/
1428static void igb_release_hw_control(struct igb_adapter *adapter)
1429{
1430 struct e1000_hw *hw = &adapter->hw;
1431 u32 ctrl_ext;
1432
1433 /* Let firmware take over control of h/w */
1434 ctrl_ext = rd32(E1000_CTRL_EXT);
1435 wr32(E1000_CTRL_EXT,
1436 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1437}
1438
Auke Kok9d5c8242008-01-24 02:22:38 -08001439/**
1440 * igb_get_hw_control - get control of the h/w from f/w
1441 * @adapter: address of board private structure
1442 *
1443 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1444 * For ASF and Pass Through versions of f/w this means that
1445 * the driver is loaded.
1446 *
1447 **/
1448static void igb_get_hw_control(struct igb_adapter *adapter)
1449{
1450 struct e1000_hw *hw = &adapter->hw;
1451 u32 ctrl_ext;
1452
1453 /* Let firmware know the driver has taken over */
1454 ctrl_ext = rd32(E1000_CTRL_EXT);
1455 wr32(E1000_CTRL_EXT,
1456 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1457}
1458
Auke Kok9d5c8242008-01-24 02:22:38 -08001459/**
1460 * igb_configure - configure the hardware for RX and TX
1461 * @adapter: private board structure
1462 **/
1463static void igb_configure(struct igb_adapter *adapter)
1464{
1465 struct net_device *netdev = adapter->netdev;
1466 int i;
1467
1468 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001469 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001470
1471 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001472
Alexander Duyck85b430b2009-10-27 15:50:29 +00001473 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001474 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001475 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001476
1477 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001478 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001479
1480 igb_rx_fifo_flush_82575(&adapter->hw);
1481
Alexander Duyckc493ea42009-03-20 00:16:50 +00001482 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001483 * at least 1 descriptor unused to make sure
1484 * next_to_use != next_to_clean */
1485 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001486 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckcd392f52011-08-26 07:43:59 +00001487 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001488 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001489}
1490
Nick Nunley88a268c2010-02-17 01:01:59 +00001491/**
1492 * igb_power_up_link - Power up the phy/serdes link
1493 * @adapter: address of board private structure
1494 **/
1495void igb_power_up_link(struct igb_adapter *adapter)
1496{
1497 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1498 igb_power_up_phy_copper(&adapter->hw);
1499 else
1500 igb_power_up_serdes_link_82575(&adapter->hw);
1501}
1502
1503/**
1504 * igb_power_down_link - Power down the phy/serdes link
1505 * @adapter: address of board private structure
1506 */
1507static void igb_power_down_link(struct igb_adapter *adapter)
1508{
1509 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1510 igb_power_down_phy_copper_82575(&adapter->hw);
1511 else
1512 igb_shutdown_serdes_link_82575(&adapter->hw);
1513}
Auke Kok9d5c8242008-01-24 02:22:38 -08001514
1515/**
1516 * igb_up - Open the interface and prepare it to handle traffic
1517 * @adapter: board private structure
1518 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001519int igb_up(struct igb_adapter *adapter)
1520{
1521 struct e1000_hw *hw = &adapter->hw;
1522 int i;
1523
1524 /* hardware has been reset, we need to reload some things */
1525 igb_configure(adapter);
1526
1527 clear_bit(__IGB_DOWN, &adapter->state);
1528
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001529 for (i = 0; i < adapter->num_q_vectors; i++)
1530 napi_enable(&(adapter->q_vector[i]->napi));
1531
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001532 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001533 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001534 else
1535 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001536
1537 /* Clear any pending interrupts. */
1538 rd32(E1000_ICR);
1539 igb_irq_enable(adapter);
1540
Alexander Duyckd4960302009-10-27 15:53:45 +00001541 /* notify VFs that reset has been completed */
1542 if (adapter->vfs_allocated_count) {
1543 u32 reg_data = rd32(E1000_CTRL_EXT);
1544 reg_data |= E1000_CTRL_EXT_PFRSTD;
1545 wr32(E1000_CTRL_EXT, reg_data);
1546 }
1547
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001548 netif_tx_start_all_queues(adapter->netdev);
1549
Alexander Duyck25568a52009-10-27 23:49:59 +00001550 /* start the watchdog. */
1551 hw->mac.get_link_status = 1;
1552 schedule_work(&adapter->watchdog_task);
1553
Auke Kok9d5c8242008-01-24 02:22:38 -08001554 return 0;
1555}
1556
1557void igb_down(struct igb_adapter *adapter)
1558{
Auke Kok9d5c8242008-01-24 02:22:38 -08001559 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001560 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001561 u32 tctl, rctl;
1562 int i;
1563
1564 /* signal that we're down so the interrupt handler does not
1565 * reschedule our watchdog timer */
1566 set_bit(__IGB_DOWN, &adapter->state);
1567
1568 /* disable receives in the hardware */
1569 rctl = rd32(E1000_RCTL);
1570 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1571 /* flush and sleep below */
1572
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001573 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001574
1575 /* disable transmits in the hardware */
1576 tctl = rd32(E1000_TCTL);
1577 tctl &= ~E1000_TCTL_EN;
1578 wr32(E1000_TCTL, tctl);
1579 /* flush both disables and wait for them to finish */
1580 wrfl();
1581 msleep(10);
1582
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001583 for (i = 0; i < adapter->num_q_vectors; i++)
1584 napi_disable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08001585
Auke Kok9d5c8242008-01-24 02:22:38 -08001586 igb_irq_disable(adapter);
1587
1588 del_timer_sync(&adapter->watchdog_timer);
1589 del_timer_sync(&adapter->phy_info_timer);
1590
Auke Kok9d5c8242008-01-24 02:22:38 -08001591 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001592
1593 /* record the stats before reset*/
Eric Dumazet12dcd862010-10-15 17:27:10 +00001594 spin_lock(&adapter->stats64_lock);
1595 igb_update_stats(adapter, &adapter->stats64);
1596 spin_unlock(&adapter->stats64_lock);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001597
Auke Kok9d5c8242008-01-24 02:22:38 -08001598 adapter->link_speed = 0;
1599 adapter->link_duplex = 0;
1600
Jeff Kirsher30236822008-06-24 17:01:15 -07001601 if (!pci_channel_offline(adapter->pdev))
1602 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001603 igb_clean_all_tx_rings(adapter);
1604 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001605#ifdef CONFIG_IGB_DCA
1606
1607 /* since we reset the hardware DCA settings were cleared */
1608 igb_setup_dca(adapter);
1609#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001610}
1611
1612void igb_reinit_locked(struct igb_adapter *adapter)
1613{
1614 WARN_ON(in_interrupt());
1615 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1616 msleep(1);
1617 igb_down(adapter);
1618 igb_up(adapter);
1619 clear_bit(__IGB_RESETTING, &adapter->state);
1620}
1621
1622void igb_reset(struct igb_adapter *adapter)
1623{
Alexander Duyck090b1792009-10-27 23:51:55 +00001624 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001625 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001626 struct e1000_mac_info *mac = &hw->mac;
1627 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001628 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1629 u16 hwm;
1630
1631 /* Repartition Pba for greater than 9k mtu
1632 * To take effect CTRL.RST is required.
1633 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001634 switch (mac->type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00001635 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00001636 case e1000_82580:
1637 pba = rd32(E1000_RXPBS);
1638 pba = igb_rxpbs_adjust_82580(pba);
1639 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001640 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001641 pba = rd32(E1000_RXPBS);
1642 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001643 break;
1644 case e1000_82575:
1645 default:
1646 pba = E1000_PBA_34K;
1647 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001648 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001649
Alexander Duyck2d064c02008-07-08 15:10:12 -07001650 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1651 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001652 /* adjust PBA for jumbo frames */
1653 wr32(E1000_PBA, pba);
1654
1655 /* To maintain wire speed transmits, the Tx FIFO should be
1656 * large enough to accommodate two full transmit packets,
1657 * rounded up to the next 1KB and expressed in KB. Likewise,
1658 * the Rx FIFO should be large enough to accommodate at least
1659 * one full receive packet and is similarly rounded up and
1660 * expressed in KB. */
1661 pba = rd32(E1000_PBA);
1662 /* upper 16 bits has Tx packet buffer allocation size in KB */
1663 tx_space = pba >> 16;
1664 /* lower 16 bits has Rx packet buffer allocation size in KB */
1665 pba &= 0xffff;
1666 /* the tx fifo also stores 16 bytes of information about the tx
1667 * but don't include ethernet FCS because hardware appends it */
1668 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001669 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001670 ETH_FCS_LEN) * 2;
1671 min_tx_space = ALIGN(min_tx_space, 1024);
1672 min_tx_space >>= 10;
1673 /* software strips receive CRC, so leave room for it */
1674 min_rx_space = adapter->max_frame_size;
1675 min_rx_space = ALIGN(min_rx_space, 1024);
1676 min_rx_space >>= 10;
1677
1678 /* If current Tx allocation is less than the min Tx FIFO size,
1679 * and the min Tx FIFO size is less than the current Rx FIFO
1680 * allocation, take space away from current Rx allocation */
1681 if (tx_space < min_tx_space &&
1682 ((min_tx_space - tx_space) < pba)) {
1683 pba = pba - (min_tx_space - tx_space);
1684
1685 /* if short on rx space, rx wins and must trump tx
1686 * adjustment */
1687 if (pba < min_rx_space)
1688 pba = min_rx_space;
1689 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001690 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001691 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001692
1693 /* flow control settings */
1694 /* The high water mark must be low enough to fit one full frame
1695 * (or the size used for early receive) above it in the Rx FIFO.
1696 * Set it to the lower of:
1697 * - 90% of the Rx FIFO size, or
1698 * - the full Rx FIFO size minus one full frame */
1699 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001700 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001701
Alexander Duyckd405ea32009-12-23 13:21:27 +00001702 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1703 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001704 fc->pause_time = 0xFFFF;
1705 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001706 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001707
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001708 /* disable receive for all VFs and wait one second */
1709 if (adapter->vfs_allocated_count) {
1710 int i;
1711 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Greg Rose8fa7e0f2010-11-06 05:43:21 +00001712 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001713
1714 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001715 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001716
1717 /* disable transmits and receives */
1718 wr32(E1000_VFRE, 0);
1719 wr32(E1000_VFTE, 0);
1720 }
1721
Auke Kok9d5c8242008-01-24 02:22:38 -08001722 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001723 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001724 wr32(E1000_WUC, 0);
1725
Alexander Duyck330a6d62009-10-27 23:51:35 +00001726 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001727 dev_err(&pdev->dev, "Hardware Error\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08001728
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00001729 igb_init_dmac(adapter, pba);
Nick Nunley88a268c2010-02-17 01:01:59 +00001730 if (!netif_running(adapter->netdev))
1731 igb_power_down_link(adapter);
1732
Auke Kok9d5c8242008-01-24 02:22:38 -08001733 igb_update_mng_vlan(adapter);
1734
1735 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1736 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1737
Alexander Duyck330a6d62009-10-27 23:51:35 +00001738 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001739}
1740
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001741static netdev_features_t igb_fix_features(struct net_device *netdev,
1742 netdev_features_t features)
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001743{
1744 /*
1745 * Since there is no support for separate rx/tx vlan accel
1746 * enable/disable make sure tx flag is always in same state as rx.
1747 */
1748 if (features & NETIF_F_HW_VLAN_RX)
1749 features |= NETIF_F_HW_VLAN_TX;
1750 else
1751 features &= ~NETIF_F_HW_VLAN_TX;
1752
1753 return features;
1754}
1755
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001756static int igb_set_features(struct net_device *netdev,
1757 netdev_features_t features)
Michał Mirosławac52caa2011-06-08 08:38:01 +00001758{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001759 netdev_features_t changed = netdev->features ^ features;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001760
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001761 if (changed & NETIF_F_HW_VLAN_RX)
1762 igb_vlan_mode(netdev, features);
1763
Michał Mirosławac52caa2011-06-08 08:38:01 +00001764 return 0;
1765}
1766
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001767static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001768 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001769 .ndo_stop = igb_close,
Alexander Duyckcd392f52011-08-26 07:43:59 +00001770 .ndo_start_xmit = igb_xmit_frame,
Eric Dumazet12dcd862010-10-15 17:27:10 +00001771 .ndo_get_stats64 = igb_get_stats64,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001772 .ndo_set_rx_mode = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001773 .ndo_set_mac_address = igb_set_mac,
1774 .ndo_change_mtu = igb_change_mtu,
1775 .ndo_do_ioctl = igb_ioctl,
1776 .ndo_tx_timeout = igb_tx_timeout,
1777 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001778 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1779 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001780 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1781 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1782 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1783 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001784#ifdef CONFIG_NET_POLL_CONTROLLER
1785 .ndo_poll_controller = igb_netpoll,
1786#endif
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001787 .ndo_fix_features = igb_fix_features,
1788 .ndo_set_features = igb_set_features,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001789};
1790
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001791/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001792 * igb_probe - Device Initialization Routine
1793 * @pdev: PCI device information struct
1794 * @ent: entry in igb_pci_tbl
1795 *
1796 * Returns 0 on success, negative on failure
1797 *
1798 * igb_probe initializes an adapter identified by a pci_dev structure.
1799 * The OS initialization, configuring of the adapter private structure,
1800 * and a hardware reset occur.
1801 **/
1802static int __devinit igb_probe(struct pci_dev *pdev,
1803 const struct pci_device_id *ent)
1804{
1805 struct net_device *netdev;
1806 struct igb_adapter *adapter;
1807 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001808 u16 eeprom_data = 0;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001809 s32 ret_val;
Alexander Duyck4337e992009-10-27 23:48:31 +00001810 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001811 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1812 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001813 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001814 u16 eeprom_apme_mask = IGB_EEPROM_APME;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001815 u8 part_str[E1000_PBANUM_LENGTH];
Auke Kok9d5c8242008-01-24 02:22:38 -08001816
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001817 /* Catch broken hardware that put the wrong VF device ID in
1818 * the PCIe SR-IOV capability.
1819 */
1820 if (pdev->is_virtfn) {
1821 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
1822 pci_name(pdev), pdev->vendor, pdev->device);
1823 return -EINVAL;
1824 }
1825
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001826 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001827 if (err)
1828 return err;
1829
1830 pci_using_dac = 0;
Alexander Duyck59d71982010-04-27 13:09:25 +00001831 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001832 if (!err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001833 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001834 if (!err)
1835 pci_using_dac = 1;
1836 } else {
Alexander Duyck59d71982010-04-27 13:09:25 +00001837 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001838 if (err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001839 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001840 if (err) {
1841 dev_err(&pdev->dev, "No usable DMA "
1842 "configuration, aborting\n");
1843 goto err_dma;
1844 }
1845 }
1846 }
1847
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001848 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1849 IORESOURCE_MEM),
1850 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001851 if (err)
1852 goto err_pci_reg;
1853
Frans Pop19d5afd2009-10-02 10:04:12 -07001854 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001855
Auke Kok9d5c8242008-01-24 02:22:38 -08001856 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001857 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001858
1859 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001860 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00001861 IGB_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001862 if (!netdev)
1863 goto err_alloc_etherdev;
1864
1865 SET_NETDEV_DEV(netdev, &pdev->dev);
1866
1867 pci_set_drvdata(pdev, netdev);
1868 adapter = netdev_priv(netdev);
1869 adapter->netdev = netdev;
1870 adapter->pdev = pdev;
1871 hw = &adapter->hw;
1872 hw->back = adapter;
1873 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1874
1875 mmio_start = pci_resource_start(pdev, 0);
1876 mmio_len = pci_resource_len(pdev, 0);
1877
1878 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001879 hw->hw_addr = ioremap(mmio_start, mmio_len);
1880 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001881 goto err_ioremap;
1882
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001883 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001884 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001885 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001886
1887 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1888
1889 netdev->mem_start = mmio_start;
1890 netdev->mem_end = mmio_start + mmio_len;
1891
Auke Kok9d5c8242008-01-24 02:22:38 -08001892 /* PCI config space info */
1893 hw->vendor_id = pdev->vendor;
1894 hw->device_id = pdev->device;
1895 hw->revision_id = pdev->revision;
1896 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1897 hw->subsystem_device_id = pdev->subsystem_device;
1898
Auke Kok9d5c8242008-01-24 02:22:38 -08001899 /* Copy the default MAC, PHY and NVM function pointers */
1900 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1901 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1902 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1903 /* Initialize skew-specific constants */
1904 err = ei->get_invariants(hw);
1905 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001906 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001907
Alexander Duyck450c87c2009-02-06 23:22:11 +00001908 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001909 err = igb_sw_init(adapter);
1910 if (err)
1911 goto err_sw_init;
1912
1913 igb_get_bus_info_pcie(hw);
1914
1915 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08001916
1917 /* Copper options */
1918 if (hw->phy.media_type == e1000_media_type_copper) {
1919 hw->phy.mdix = AUTO_ALL_MODES;
1920 hw->phy.disable_polarity_correction = false;
1921 hw->phy.ms_type = e1000_ms_hw_default;
1922 }
1923
1924 if (igb_check_reset_block(hw))
1925 dev_info(&pdev->dev,
1926 "PHY reset is blocked due to SOL/IDER session.\n");
1927
Alexander Duyck077887c2011-08-26 07:46:29 +00001928 /*
1929 * features is initialized to 0 in allocation, it might have bits
1930 * set by igb_sw_init so we should use an or instead of an
1931 * assignment.
1932 */
1933 netdev->features |= NETIF_F_SG |
1934 NETIF_F_IP_CSUM |
1935 NETIF_F_IPV6_CSUM |
1936 NETIF_F_TSO |
1937 NETIF_F_TSO6 |
1938 NETIF_F_RXHASH |
1939 NETIF_F_RXCSUM |
1940 NETIF_F_HW_VLAN_RX |
1941 NETIF_F_HW_VLAN_TX;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001942
Alexander Duyck077887c2011-08-26 07:46:29 +00001943 /* copy netdev features into list of user selectable features */
1944 netdev->hw_features |= netdev->features;
Auke Kok9d5c8242008-01-24 02:22:38 -08001945
Alexander Duyck077887c2011-08-26 07:46:29 +00001946 /* set this bit last since it cannot be part of hw_features */
1947 netdev->features |= NETIF_F_HW_VLAN_FILTER;
1948
1949 netdev->vlan_features |= NETIF_F_TSO |
1950 NETIF_F_TSO6 |
1951 NETIF_F_IP_CSUM |
1952 NETIF_F_IPV6_CSUM |
1953 NETIF_F_SG;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001954
Yi Zou7b872a52010-09-22 17:57:58 +00001955 if (pci_using_dac) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001956 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00001957 netdev->vlan_features |= NETIF_F_HIGHDMA;
1958 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001959
Michał Mirosławac52caa2011-06-08 08:38:01 +00001960 if (hw->mac.type >= e1000_82576) {
1961 netdev->hw_features |= NETIF_F_SCTP_CSUM;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001962 netdev->features |= NETIF_F_SCTP_CSUM;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001963 }
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001964
Jiri Pirko01789342011-08-16 06:29:00 +00001965 netdev->priv_flags |= IFF_UNICAST_FLT;
1966
Alexander Duyck330a6d62009-10-27 23:51:35 +00001967 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001968
1969 /* before reading the NVM, reset the controller to put the device in a
1970 * known good starting state */
1971 hw->mac.ops.reset_hw(hw);
1972
1973 /* make sure the NVM is good */
Carolyn Wyborny4322e562011-03-11 20:43:18 -08001974 if (hw->nvm.ops.validate(hw) < 0) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001975 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1976 err = -EIO;
1977 goto err_eeprom;
1978 }
1979
1980 /* copy the MAC address out of the NVM */
1981 if (hw->mac.ops.read_mac_addr(hw))
1982 dev_err(&pdev->dev, "NVM Read Error\n");
1983
1984 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1985 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1986
1987 if (!is_valid_ether_addr(netdev->perm_addr)) {
1988 dev_err(&pdev->dev, "Invalid MAC Address\n");
1989 err = -EIO;
1990 goto err_eeprom;
1991 }
1992
Joe Perchesc061b182010-08-23 18:20:03 +00001993 setup_timer(&adapter->watchdog_timer, igb_watchdog,
Alexander Duyck0e340482009-03-20 00:17:08 +00001994 (unsigned long) adapter);
Joe Perchesc061b182010-08-23 18:20:03 +00001995 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
Alexander Duyck0e340482009-03-20 00:17:08 +00001996 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001997
1998 INIT_WORK(&adapter->reset_task, igb_reset_task);
1999 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2000
Alexander Duyck450c87c2009-02-06 23:22:11 +00002001 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08002002 adapter->fc_autoneg = true;
2003 hw->mac.autoneg = true;
2004 hw->phy.autoneg_advertised = 0x2f;
2005
Alexander Duyck0cce1192009-07-23 18:10:24 +00002006 hw->fc.requested_mode = e1000_fc_default;
2007 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08002008
Auke Kok9d5c8242008-01-24 02:22:38 -08002009 igb_validate_mdi_setting(hw);
2010
Auke Kok9d5c8242008-01-24 02:22:38 -08002011 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
2012 * enable the ACPI Magic Packet filter
2013 */
2014
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002015 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00002016 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Carolyn Wyborny6d337dc2011-07-07 00:24:56 +00002017 else if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00002018 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2019 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2020 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002021 else if (hw->bus.func == 1)
2022 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08002023
2024 if (eeprom_data & eeprom_apme_mask)
2025 adapter->eeprom_wol |= E1000_WUFC_MAG;
2026
2027 /* now that we have the eeprom settings, apply the special cases where
2028 * the eeprom may be wrong or the board simply won't support wake on
2029 * lan on a particular port */
2030 switch (pdev->device) {
2031 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2032 adapter->eeprom_wol = 0;
2033 break;
2034 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07002035 case E1000_DEV_ID_82576_FIBER:
2036 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08002037 /* Wake events only supported on port A for dual fiber
2038 * regardless of eeprom setting */
2039 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2040 adapter->eeprom_wol = 0;
2041 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002042 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00002043 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002044 /* if quad port adapter, disable WoL on all but port A */
2045 if (global_quad_port_a != 0)
2046 adapter->eeprom_wol = 0;
2047 else
2048 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2049 /* Reset for multiple quad port adapters */
2050 if (++global_quad_port_a == 4)
2051 global_quad_port_a = 0;
2052 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08002053 }
2054
2055 /* initialize the wol settings based on the eeprom settings */
2056 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00002057 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08002058
2059 /* reset the hardware with the new settings */
2060 igb_reset(adapter);
2061
2062 /* let the f/w know that the h/w is now under the control of the
2063 * driver. */
2064 igb_get_hw_control(adapter);
2065
Auke Kok9d5c8242008-01-24 02:22:38 -08002066 strcpy(netdev->name, "eth%d");
2067 err = register_netdev(netdev);
2068 if (err)
2069 goto err_register;
2070
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002071 /* carrier off reporting is important to ethtool even BEFORE open */
2072 netif_carrier_off(netdev);
2073
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002074#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08002075 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002076 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002077 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002078 igb_setup_dca(adapter);
2079 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00002080
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002081#endif
Anders Berggren673b8b72011-02-04 07:32:32 +00002082 /* do hw tstamp init after resetting */
2083 igb_init_hw_timer(adapter);
2084
Auke Kok9d5c8242008-01-24 02:22:38 -08002085 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2086 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07002087 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08002088 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00002089 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
Alexander Duyckff846f52010-04-27 01:02:40 +00002090 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
Alexander Duyck559e9c42009-10-27 23:52:50 +00002091 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00002092 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2093 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2094 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2095 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07002096 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08002097
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002098 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2099 if (ret_val)
2100 strcpy(part_str, "Unknown");
2101 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
Auke Kok9d5c8242008-01-24 02:22:38 -08002102 dev_info(&pdev->dev,
2103 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2104 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002105 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08002106 adapter->num_rx_queues, adapter->num_tx_queues);
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002107 switch (hw->mac.type) {
2108 case e1000_i350:
2109 igb_set_eee_i350(hw);
2110 break;
2111 default:
2112 break;
2113 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002114 return 0;
2115
2116err_register:
2117 igb_release_hw_control(adapter);
2118err_eeprom:
2119 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002120 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002121
2122 if (hw->flash_address)
2123 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08002124err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00002125 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002126 iounmap(hw->hw_addr);
2127err_ioremap:
2128 free_netdev(netdev);
2129err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00002130 pci_release_selected_regions(pdev,
2131 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002132err_pci_reg:
2133err_dma:
2134 pci_disable_device(pdev);
2135 return err;
2136}
2137
2138/**
2139 * igb_remove - Device Removal Routine
2140 * @pdev: PCI device information struct
2141 *
2142 * igb_remove is called by the PCI subsystem to alert the driver
2143 * that it should release a PCI device. The could be caused by a
2144 * Hot-Plug event, or because the driver is going to be removed from
2145 * memory.
2146 **/
2147static void __devexit igb_remove(struct pci_dev *pdev)
2148{
2149 struct net_device *netdev = pci_get_drvdata(pdev);
2150 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002151 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002152
Tejun Heo760141a2010-12-12 16:45:14 +01002153 /*
2154 * The watchdog timer may be rescheduled, so explicitly
2155 * disable watchdog from being rescheduled.
2156 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002157 set_bit(__IGB_DOWN, &adapter->state);
2158 del_timer_sync(&adapter->watchdog_timer);
2159 del_timer_sync(&adapter->phy_info_timer);
2160
Tejun Heo760141a2010-12-12 16:45:14 +01002161 cancel_work_sync(&adapter->reset_task);
2162 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002163
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002164#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002165 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002166 dev_info(&pdev->dev, "DCA disabled\n");
2167 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002168 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08002169 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002170 }
2171#endif
2172
Auke Kok9d5c8242008-01-24 02:22:38 -08002173 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2174 * would have already happened in close and is redundant. */
2175 igb_release_hw_control(adapter);
2176
2177 unregister_netdev(netdev);
2178
Alexander Duyck047e0032009-10-27 15:49:27 +00002179 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002180
Alexander Duyck37680112009-02-19 20:40:30 -08002181#ifdef CONFIG_PCI_IOV
2182 /* reclaim resources allocated to VFs */
2183 if (adapter->vf_data) {
2184 /* disable iov and allow time for transactions to clear */
Greg Rose0224d662011-10-14 02:57:14 +00002185 if (!igb_check_vf_assignment(adapter)) {
2186 pci_disable_sriov(pdev);
2187 msleep(500);
2188 } else {
2189 dev_info(&pdev->dev, "VF(s) assigned to guests!\n");
2190 }
Alexander Duyck37680112009-02-19 20:40:30 -08002191
2192 kfree(adapter->vf_data);
2193 adapter->vf_data = NULL;
2194 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00002195 wrfl();
Alexander Duyck37680112009-02-19 20:40:30 -08002196 msleep(100);
2197 dev_info(&pdev->dev, "IOV Disabled\n");
2198 }
2199#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00002200
Alexander Duyck28b07592009-02-06 23:20:31 +00002201 iounmap(hw->hw_addr);
2202 if (hw->flash_address)
2203 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00002204 pci_release_selected_regions(pdev,
2205 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002206
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002207 kfree(adapter->shadow_vfta);
Auke Kok9d5c8242008-01-24 02:22:38 -08002208 free_netdev(netdev);
2209
Frans Pop19d5afd2009-10-02 10:04:12 -07002210 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002211
Auke Kok9d5c8242008-01-24 02:22:38 -08002212 pci_disable_device(pdev);
2213}
2214
2215/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00002216 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2217 * @adapter: board private structure to initialize
2218 *
2219 * This function initializes the vf specific data storage and then attempts to
2220 * allocate the VFs. The reason for ordering it this way is because it is much
2221 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2222 * the memory for the VFs.
2223 **/
2224static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2225{
2226#ifdef CONFIG_PCI_IOV
2227 struct pci_dev *pdev = adapter->pdev;
Greg Rose0224d662011-10-14 02:57:14 +00002228 int old_vfs = igb_find_enabled_vfs(adapter);
2229 int i;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002230
Greg Rose0224d662011-10-14 02:57:14 +00002231 if (old_vfs) {
2232 dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
2233 "max_vfs setting of %d\n", old_vfs, max_vfs);
2234 adapter->vfs_allocated_count = old_vfs;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002235 }
2236
Greg Rose0224d662011-10-14 02:57:14 +00002237 if (!adapter->vfs_allocated_count)
2238 return;
2239
2240 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2241 sizeof(struct vf_data_storage), GFP_KERNEL);
2242 /* if allocation failed then we do not support SR-IOV */
2243 if (!adapter->vf_data) {
Alexander Duycka6b623e2009-10-27 23:47:53 +00002244 adapter->vfs_allocated_count = 0;
Greg Rose0224d662011-10-14 02:57:14 +00002245 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2246 "Data Storage\n");
2247 goto out;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002248 }
Greg Rose0224d662011-10-14 02:57:14 +00002249
2250 if (!old_vfs) {
2251 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count))
2252 goto err_out;
2253 }
2254 dev_info(&pdev->dev, "%d VFs allocated\n",
2255 adapter->vfs_allocated_count);
2256 for (i = 0; i < adapter->vfs_allocated_count; i++)
2257 igb_vf_configure(adapter, i);
2258
2259 /* DMA Coalescing is not supported in IOV mode. */
2260 adapter->flags &= ~IGB_FLAG_DMAC;
2261 goto out;
2262err_out:
2263 kfree(adapter->vf_data);
2264 adapter->vf_data = NULL;
2265 adapter->vfs_allocated_count = 0;
2266out:
2267 return;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002268#endif /* CONFIG_PCI_IOV */
2269}
2270
Alexander Duyck115f4592009-11-12 18:37:00 +00002271/**
2272 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
2273 * @adapter: board private structure to initialize
2274 *
2275 * igb_init_hw_timer initializes the function pointer and values for the hw
2276 * timer found in hardware.
2277 **/
2278static void igb_init_hw_timer(struct igb_adapter *adapter)
2279{
2280 struct e1000_hw *hw = &adapter->hw;
2281
2282 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002283 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002284 case e1000_82580:
2285 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2286 adapter->cycles.read = igb_read_clock;
2287 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2288 adapter->cycles.mult = 1;
2289 /*
2290 * The 82580 timesync updates the system timer every 8ns by 8ns
2291 * and the value cannot be shifted. Instead we need to shift
2292 * the registers to generate a 64bit timer value. As a result
2293 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
2294 * 24 in order to generate a larger value for synchronization.
2295 */
2296 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
2297 /* disable system timer temporarily by setting bit 31 */
2298 wr32(E1000_TSAUXC, 0x80000000);
2299 wrfl();
2300
2301 /* Set registers so that rollover occurs soon to test this. */
2302 wr32(E1000_SYSTIMR, 0x00000000);
2303 wr32(E1000_SYSTIML, 0x80000000);
2304 wr32(E1000_SYSTIMH, 0x000000FF);
2305 wrfl();
2306
2307 /* enable system timer by clearing bit 31 */
2308 wr32(E1000_TSAUXC, 0x0);
2309 wrfl();
2310
2311 timecounter_init(&adapter->clock,
2312 &adapter->cycles,
2313 ktime_to_ns(ktime_get_real()));
2314 /*
2315 * Synchronize our NIC clock against system wall clock. NIC
2316 * time stamp reading requires ~3us per sample, each sample
2317 * was pretty stable even under load => only require 10
2318 * samples for each offset comparison.
2319 */
2320 memset(&adapter->compare, 0, sizeof(adapter->compare));
2321 adapter->compare.source = &adapter->clock;
2322 adapter->compare.target = ktime_get_real;
2323 adapter->compare.num_samples = 10;
2324 timecompare_update(&adapter->compare, 0);
2325 break;
Alexander Duyck115f4592009-11-12 18:37:00 +00002326 case e1000_82576:
2327 /*
2328 * Initialize hardware timer: we keep it running just in case
2329 * that some program needs it later on.
2330 */
2331 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2332 adapter->cycles.read = igb_read_clock;
2333 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2334 adapter->cycles.mult = 1;
2335 /**
2336 * Scale the NIC clock cycle by a large factor so that
2337 * relatively small clock corrections can be added or
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002338 * subtracted at each clock tick. The drawbacks of a large
Alexander Duyck115f4592009-11-12 18:37:00 +00002339 * factor are a) that the clock register overflows more quickly
2340 * (not such a big deal) and b) that the increment per tick has
2341 * to fit into 24 bits. As a result we need to use a shift of
2342 * 19 so we can fit a value of 16 into the TIMINCA register.
2343 */
2344 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
2345 wr32(E1000_TIMINCA,
2346 (1 << E1000_TIMINCA_16NS_SHIFT) |
2347 (16 << IGB_82576_TSYNC_SHIFT));
2348
2349 /* Set registers so that rollover occurs soon to test this. */
2350 wr32(E1000_SYSTIML, 0x00000000);
2351 wr32(E1000_SYSTIMH, 0xFF800000);
2352 wrfl();
2353
2354 timecounter_init(&adapter->clock,
2355 &adapter->cycles,
2356 ktime_to_ns(ktime_get_real()));
2357 /*
2358 * Synchronize our NIC clock against system wall clock. NIC
2359 * time stamp reading requires ~3us per sample, each sample
2360 * was pretty stable even under load => only require 10
2361 * samples for each offset comparison.
2362 */
2363 memset(&adapter->compare, 0, sizeof(adapter->compare));
2364 adapter->compare.source = &adapter->clock;
2365 adapter->compare.target = ktime_get_real;
2366 adapter->compare.num_samples = 10;
2367 timecompare_update(&adapter->compare, 0);
2368 break;
2369 case e1000_82575:
2370 /* 82575 does not support timesync */
2371 default:
2372 break;
2373 }
2374
2375}
2376
Alexander Duycka6b623e2009-10-27 23:47:53 +00002377/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002378 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2379 * @adapter: board private structure to initialize
2380 *
2381 * igb_sw_init initializes the Adapter private data structure.
2382 * Fields are initialized based on PCI device information and
2383 * OS network device settings (MTU size).
2384 **/
2385static int __devinit igb_sw_init(struct igb_adapter *adapter)
2386{
2387 struct e1000_hw *hw = &adapter->hw;
2388 struct net_device *netdev = adapter->netdev;
2389 struct pci_dev *pdev = adapter->pdev;
2390
2391 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2392
Alexander Duyck13fde972011-10-05 13:35:24 +00002393 /* set default ring sizes */
Alexander Duyck68fd9912008-11-20 00:48:10 -08002394 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2395 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck13fde972011-10-05 13:35:24 +00002396
2397 /* set default ITR values */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00002398 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2399 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2400
Alexander Duyck13fde972011-10-05 13:35:24 +00002401 /* set default work limits */
2402 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2403
Alexander Duyck153285f2011-08-26 07:43:32 +00002404 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2405 VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08002406 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2407
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002408 adapter->node = -1;
2409
Eric Dumazet12dcd862010-10-15 17:27:10 +00002410 spin_lock_init(&adapter->stats64_lock);
Alexander Duycka6b623e2009-10-27 23:47:53 +00002411#ifdef CONFIG_PCI_IOV
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002412 switch (hw->mac.type) {
2413 case e1000_82576:
2414 case e1000_i350:
Stefan Assmann9b082d72011-02-24 20:03:31 +00002415 if (max_vfs > 7) {
2416 dev_warn(&pdev->dev,
2417 "Maximum of 7 VFs per PF, using max\n");
2418 adapter->vfs_allocated_count = 7;
2419 } else
2420 adapter->vfs_allocated_count = max_vfs;
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002421 break;
2422 default:
2423 break;
2424 }
Alexander Duycka6b623e2009-10-27 23:47:53 +00002425#endif /* CONFIG_PCI_IOV */
Alexander Duycka99955f2009-11-12 18:37:19 +00002426 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
Williams, Mitch A665c8c82011-06-07 14:22:57 -07002427 /* i350 cannot do RSS and SR-IOV at the same time */
2428 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
2429 adapter->rss_queues = 1;
Alexander Duycka99955f2009-11-12 18:37:19 +00002430
2431 /*
2432 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2433 * then we should combine the queues into a queue pair in order to
2434 * conserve interrupts due to limited supply
2435 */
2436 if ((adapter->rss_queues > 4) ||
2437 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
2438 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2439
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002440 /* Setup and initialize a copy of the hw vlan table array */
2441 adapter->shadow_vfta = kzalloc(sizeof(u32) *
2442 E1000_VLAN_FILTER_TBL_SIZE,
2443 GFP_ATOMIC);
2444
Alexander Duycka6b623e2009-10-27 23:47:53 +00002445 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00002446 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002447 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2448 return -ENOMEM;
2449 }
2450
Alexander Duycka6b623e2009-10-27 23:47:53 +00002451 igb_probe_vfs(adapter);
2452
Auke Kok9d5c8242008-01-24 02:22:38 -08002453 /* Explicitly disable IRQ since the NIC can be in any state. */
2454 igb_irq_disable(adapter);
2455
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002456 if (hw->mac.type == e1000_i350)
2457 adapter->flags &= ~IGB_FLAG_DMAC;
2458
Auke Kok9d5c8242008-01-24 02:22:38 -08002459 set_bit(__IGB_DOWN, &adapter->state);
2460 return 0;
2461}
2462
2463/**
2464 * igb_open - Called when a network interface is made active
2465 * @netdev: network interface device structure
2466 *
2467 * Returns 0 on success, negative value on failure
2468 *
2469 * The open entry point is called when a network interface is made
2470 * active by the system (IFF_UP). At this point all resources needed
2471 * for transmit and receive operations are allocated, the interrupt
2472 * handler is registered with the OS, the watchdog timer is started,
2473 * and the stack is notified that the interface is ready.
2474 **/
2475static int igb_open(struct net_device *netdev)
2476{
2477 struct igb_adapter *adapter = netdev_priv(netdev);
2478 struct e1000_hw *hw = &adapter->hw;
2479 int err;
2480 int i;
2481
2482 /* disallow open during test */
2483 if (test_bit(__IGB_TESTING, &adapter->state))
2484 return -EBUSY;
2485
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002486 netif_carrier_off(netdev);
2487
Auke Kok9d5c8242008-01-24 02:22:38 -08002488 /* allocate transmit descriptors */
2489 err = igb_setup_all_tx_resources(adapter);
2490 if (err)
2491 goto err_setup_tx;
2492
2493 /* allocate receive descriptors */
2494 err = igb_setup_all_rx_resources(adapter);
2495 if (err)
2496 goto err_setup_rx;
2497
Nick Nunley88a268c2010-02-17 01:01:59 +00002498 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002499
Auke Kok9d5c8242008-01-24 02:22:38 -08002500 /* before we allocate an interrupt, we must be ready to handle it.
2501 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2502 * as soon as we call pci_request_irq, so we have to setup our
2503 * clean_rx handler before we do so. */
2504 igb_configure(adapter);
2505
2506 err = igb_request_irq(adapter);
2507 if (err)
2508 goto err_req_irq;
2509
2510 /* From here on the code is the same as igb_up() */
2511 clear_bit(__IGB_DOWN, &adapter->state);
2512
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00002513 for (i = 0; i < adapter->num_q_vectors; i++)
2514 napi_enable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08002515
2516 /* Clear any pending interrupts. */
2517 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002518
2519 igb_irq_enable(adapter);
2520
Alexander Duyckd4960302009-10-27 15:53:45 +00002521 /* notify VFs that reset has been completed */
2522 if (adapter->vfs_allocated_count) {
2523 u32 reg_data = rd32(E1000_CTRL_EXT);
2524 reg_data |= E1000_CTRL_EXT_PFRSTD;
2525 wr32(E1000_CTRL_EXT, reg_data);
2526 }
2527
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002528 netif_tx_start_all_queues(netdev);
2529
Alexander Duyck25568a52009-10-27 23:49:59 +00002530 /* start the watchdog. */
2531 hw->mac.get_link_status = 1;
2532 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002533
2534 return 0;
2535
2536err_req_irq:
2537 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002538 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002539 igb_free_all_rx_resources(adapter);
2540err_setup_rx:
2541 igb_free_all_tx_resources(adapter);
2542err_setup_tx:
2543 igb_reset(adapter);
2544
2545 return err;
2546}
2547
2548/**
2549 * igb_close - Disables a network interface
2550 * @netdev: network interface device structure
2551 *
2552 * Returns 0, this is not allowed to fail
2553 *
2554 * The close entry point is called when an interface is de-activated
2555 * by the OS. The hardware is still under the driver's control, but
2556 * needs to be disabled. A global MAC reset is issued to stop the
2557 * hardware, and all transmit and receive resources are freed.
2558 **/
2559static int igb_close(struct net_device *netdev)
2560{
2561 struct igb_adapter *adapter = netdev_priv(netdev);
2562
2563 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2564 igb_down(adapter);
2565
2566 igb_free_irq(adapter);
2567
2568 igb_free_all_tx_resources(adapter);
2569 igb_free_all_rx_resources(adapter);
2570
Auke Kok9d5c8242008-01-24 02:22:38 -08002571 return 0;
2572}
2573
2574/**
2575 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002576 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2577 *
2578 * Return 0 on success, negative on failure
2579 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002580int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002581{
Alexander Duyck59d71982010-04-27 13:09:25 +00002582 struct device *dev = tx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002583 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002584 int size;
2585
Alexander Duyck06034642011-08-26 07:44:22 +00002586 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002587 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
2588 if (!tx_ring->tx_buffer_info)
2589 tx_ring->tx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002590 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002591 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002592
2593 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002594 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002595 tx_ring->size = ALIGN(tx_ring->size, 4096);
2596
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002597 set_dev_node(dev, tx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002598 tx_ring->desc = dma_alloc_coherent(dev,
2599 tx_ring->size,
2600 &tx_ring->dma,
2601 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002602 set_dev_node(dev, orig_node);
2603 if (!tx_ring->desc)
2604 tx_ring->desc = dma_alloc_coherent(dev,
2605 tx_ring->size,
2606 &tx_ring->dma,
2607 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002608
2609 if (!tx_ring->desc)
2610 goto err;
2611
Auke Kok9d5c8242008-01-24 02:22:38 -08002612 tx_ring->next_to_use = 0;
2613 tx_ring->next_to_clean = 0;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002614
Auke Kok9d5c8242008-01-24 02:22:38 -08002615 return 0;
2616
2617err:
Alexander Duyck06034642011-08-26 07:44:22 +00002618 vfree(tx_ring->tx_buffer_info);
Alexander Duyck59d71982010-04-27 13:09:25 +00002619 dev_err(dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002620 "Unable to allocate memory for the transmit descriptor ring\n");
2621 return -ENOMEM;
2622}
2623
2624/**
2625 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2626 * (Descriptors) for all queues
2627 * @adapter: board private structure
2628 *
2629 * Return 0 on success, negative on failure
2630 **/
2631static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2632{
Alexander Duyck439705e2009-10-27 23:49:20 +00002633 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002634 int i, err = 0;
2635
2636 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002637 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002638 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002639 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002640 "Allocation for Tx Queue %u failed\n", i);
2641 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002642 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002643 break;
2644 }
2645 }
2646
2647 return err;
2648}
2649
2650/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002651 * igb_setup_tctl - configure the transmit control registers
2652 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002653 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002654void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002655{
Auke Kok9d5c8242008-01-24 02:22:38 -08002656 struct e1000_hw *hw = &adapter->hw;
2657 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002658
Alexander Duyck85b430b2009-10-27 15:50:29 +00002659 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2660 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002661
2662 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002663 tctl = rd32(E1000_TCTL);
2664 tctl &= ~E1000_TCTL_CT;
2665 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2666 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2667
2668 igb_config_collision_dist(hw);
2669
Auke Kok9d5c8242008-01-24 02:22:38 -08002670 /* Enable transmits */
2671 tctl |= E1000_TCTL_EN;
2672
2673 wr32(E1000_TCTL, tctl);
2674}
2675
2676/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002677 * igb_configure_tx_ring - Configure transmit ring after Reset
2678 * @adapter: board private structure
2679 * @ring: tx ring to configure
2680 *
2681 * Configure a transmit ring after a reset.
2682 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002683void igb_configure_tx_ring(struct igb_adapter *adapter,
2684 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002685{
2686 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka74420e2011-08-26 07:43:27 +00002687 u32 txdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002688 u64 tdba = ring->dma;
2689 int reg_idx = ring->reg_idx;
2690
2691 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00002692 wr32(E1000_TXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002693 wrfl();
2694 mdelay(10);
2695
2696 wr32(E1000_TDLEN(reg_idx),
2697 ring->count * sizeof(union e1000_adv_tx_desc));
2698 wr32(E1000_TDBAL(reg_idx),
2699 tdba & 0x00000000ffffffffULL);
2700 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2701
Alexander Duyckfce99e32009-10-27 15:51:27 +00002702 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00002703 wr32(E1000_TDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00002704 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002705
2706 txdctl |= IGB_TX_PTHRESH;
2707 txdctl |= IGB_TX_HTHRESH << 8;
2708 txdctl |= IGB_TX_WTHRESH << 16;
2709
2710 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2711 wr32(E1000_TXDCTL(reg_idx), txdctl);
2712}
2713
2714/**
2715 * igb_configure_tx - Configure transmit Unit after Reset
2716 * @adapter: board private structure
2717 *
2718 * Configure the Tx unit of the MAC after a reset.
2719 **/
2720static void igb_configure_tx(struct igb_adapter *adapter)
2721{
2722 int i;
2723
2724 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002725 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002726}
2727
2728/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002729 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002730 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2731 *
2732 * Returns 0 on success, negative on failure
2733 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002734int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002735{
Alexander Duyck59d71982010-04-27 13:09:25 +00002736 struct device *dev = rx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002737 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002738 int size, desc_len;
2739
Alexander Duyck06034642011-08-26 07:44:22 +00002740 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002741 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
2742 if (!rx_ring->rx_buffer_info)
2743 rx_ring->rx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002744 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002745 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002746
2747 desc_len = sizeof(union e1000_adv_rx_desc);
2748
2749 /* Round up to nearest 4K */
2750 rx_ring->size = rx_ring->count * desc_len;
2751 rx_ring->size = ALIGN(rx_ring->size, 4096);
2752
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002753 set_dev_node(dev, rx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002754 rx_ring->desc = dma_alloc_coherent(dev,
2755 rx_ring->size,
2756 &rx_ring->dma,
2757 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002758 set_dev_node(dev, orig_node);
2759 if (!rx_ring->desc)
2760 rx_ring->desc = dma_alloc_coherent(dev,
2761 rx_ring->size,
2762 &rx_ring->dma,
2763 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002764
2765 if (!rx_ring->desc)
2766 goto err;
2767
2768 rx_ring->next_to_clean = 0;
2769 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002770
Auke Kok9d5c8242008-01-24 02:22:38 -08002771 return 0;
2772
2773err:
Alexander Duyck06034642011-08-26 07:44:22 +00002774 vfree(rx_ring->rx_buffer_info);
2775 rx_ring->rx_buffer_info = NULL;
Alexander Duyck59d71982010-04-27 13:09:25 +00002776 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2777 " ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002778 return -ENOMEM;
2779}
2780
2781/**
2782 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2783 * (Descriptors) for all queues
2784 * @adapter: board private structure
2785 *
2786 * Return 0 on success, negative on failure
2787 **/
2788static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2789{
Alexander Duyck439705e2009-10-27 23:49:20 +00002790 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002791 int i, err = 0;
2792
2793 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002794 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002795 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002796 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002797 "Allocation for Rx Queue %u failed\n", i);
2798 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002799 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002800 break;
2801 }
2802 }
2803
2804 return err;
2805}
2806
2807/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002808 * igb_setup_mrqc - configure the multiple receive queue control registers
2809 * @adapter: Board private structure
2810 **/
2811static void igb_setup_mrqc(struct igb_adapter *adapter)
2812{
2813 struct e1000_hw *hw = &adapter->hw;
2814 u32 mrqc, rxcsum;
2815 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2816 union e1000_reta {
2817 u32 dword;
2818 u8 bytes[4];
2819 } reta;
2820 static const u8 rsshash[40] = {
2821 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2822 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2823 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2824 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2825
2826 /* Fill out hash function seeds */
2827 for (j = 0; j < 10; j++) {
2828 u32 rsskey = rsshash[(j * 4)];
2829 rsskey |= rsshash[(j * 4) + 1] << 8;
2830 rsskey |= rsshash[(j * 4) + 2] << 16;
2831 rsskey |= rsshash[(j * 4) + 3] << 24;
2832 array_wr32(E1000_RSSRK(0), j, rsskey);
2833 }
2834
Alexander Duycka99955f2009-11-12 18:37:19 +00002835 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002836
2837 if (adapter->vfs_allocated_count) {
2838 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2839 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002840 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002841 case e1000_82580:
2842 num_rx_queues = 1;
2843 shift = 0;
2844 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002845 case e1000_82576:
2846 shift = 3;
2847 num_rx_queues = 2;
2848 break;
2849 case e1000_82575:
2850 shift = 2;
2851 shift2 = 6;
2852 default:
2853 break;
2854 }
2855 } else {
2856 if (hw->mac.type == e1000_82575)
2857 shift = 6;
2858 }
2859
2860 for (j = 0; j < (32 * 4); j++) {
2861 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2862 if (shift2)
2863 reta.bytes[j & 3] |= num_rx_queues << shift2;
2864 if ((j & 3) == 3)
2865 wr32(E1000_RETA(j >> 2), reta.dword);
2866 }
2867
2868 /*
2869 * Disable raw packet checksumming so that RSS hash is placed in
2870 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2871 * offloads as they are enabled by default
2872 */
2873 rxcsum = rd32(E1000_RXCSUM);
2874 rxcsum |= E1000_RXCSUM_PCSD;
2875
2876 if (adapter->hw.mac.type >= e1000_82576)
2877 /* Enable Receive Checksum Offload for SCTP */
2878 rxcsum |= E1000_RXCSUM_CRCOFL;
2879
2880 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2881 wr32(E1000_RXCSUM, rxcsum);
2882
2883 /* If VMDq is enabled then we set the appropriate mode for that, else
2884 * we default to RSS so that an RSS hash is calculated per packet even
2885 * if we are only using one queue */
2886 if (adapter->vfs_allocated_count) {
2887 if (hw->mac.type > e1000_82575) {
2888 /* Set the default pool for the PF's first queue */
2889 u32 vtctl = rd32(E1000_VT_CTL);
2890 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2891 E1000_VT_CTL_DISABLE_DEF_POOL);
2892 vtctl |= adapter->vfs_allocated_count <<
2893 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2894 wr32(E1000_VT_CTL, vtctl);
2895 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002896 if (adapter->rss_queues > 1)
Alexander Duyck06cf2662009-10-27 15:53:25 +00002897 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2898 else
2899 mrqc = E1000_MRQC_ENABLE_VMDQ;
2900 } else {
2901 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2902 }
2903 igb_vmm_control(adapter);
2904
Alexander Duyck4478a9c2010-07-01 20:01:05 +00002905 /*
2906 * Generate RSS hash based on TCP port numbers and/or
2907 * IPv4/v6 src and dst addresses since UDP cannot be
2908 * hashed reliably due to IP fragmentation
2909 */
2910 mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2911 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2912 E1000_MRQC_RSS_FIELD_IPV6 |
2913 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2914 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002915
2916 wr32(E1000_MRQC, mrqc);
2917}
2918
2919/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002920 * igb_setup_rctl - configure the receive control registers
2921 * @adapter: Board private structure
2922 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002923void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002924{
2925 struct e1000_hw *hw = &adapter->hw;
2926 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002927
2928 rctl = rd32(E1000_RCTL);
2929
2930 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002931 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002932
Alexander Duyck69d728b2008-11-25 01:04:03 -08002933 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002934 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002935
Auke Kok87cb7e82008-07-08 15:08:29 -07002936 /*
2937 * enable stripping of CRC. It's unlikely this will break BMC
2938 * redirection as it did with e1000. Newer features require
2939 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002940 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002941 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002942
Alexander Duyck559e9c42009-10-27 23:52:50 +00002943 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002944 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002945
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002946 /* enable LPE to prevent packets larger than max_frame_size */
2947 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002948
Alexander Duyck952f72a2009-10-27 15:51:07 +00002949 /* disable queue 0 to prevent tail write w/o re-config */
2950 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002951
Alexander Duycke1739522009-02-19 20:39:44 -08002952 /* Attention!!! For SR-IOV PF driver operations you must enable
2953 * queue drop for all VF and PF queues to prevent head of line blocking
2954 * if an un-trusted VF does not provide descriptors to hardware.
2955 */
2956 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08002957 /* set all queue drop enable bits */
2958 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08002959 }
2960
Auke Kok9d5c8242008-01-24 02:22:38 -08002961 wr32(E1000_RCTL, rctl);
2962}
2963
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002964static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2965 int vfn)
2966{
2967 struct e1000_hw *hw = &adapter->hw;
2968 u32 vmolr;
2969
2970 /* if it isn't the PF check to see if VFs are enabled and
2971 * increase the size to support vlan tags */
2972 if (vfn < adapter->vfs_allocated_count &&
2973 adapter->vf_data[vfn].vlans_enabled)
2974 size += VLAN_TAG_SIZE;
2975
2976 vmolr = rd32(E1000_VMOLR(vfn));
2977 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2978 vmolr |= size | E1000_VMOLR_LPE;
2979 wr32(E1000_VMOLR(vfn), vmolr);
2980
2981 return 0;
2982}
2983
Auke Kok9d5c8242008-01-24 02:22:38 -08002984/**
Alexander Duycke1739522009-02-19 20:39:44 -08002985 * igb_rlpml_set - set maximum receive packet size
2986 * @adapter: board private structure
2987 *
2988 * Configure maximum receivable packet size.
2989 **/
2990static void igb_rlpml_set(struct igb_adapter *adapter)
2991{
Alexander Duyck153285f2011-08-26 07:43:32 +00002992 u32 max_frame_size = adapter->max_frame_size;
Alexander Duycke1739522009-02-19 20:39:44 -08002993 struct e1000_hw *hw = &adapter->hw;
2994 u16 pf_id = adapter->vfs_allocated_count;
2995
Alexander Duycke1739522009-02-19 20:39:44 -08002996 if (pf_id) {
2997 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck153285f2011-08-26 07:43:32 +00002998 /*
2999 * If we're in VMDQ or SR-IOV mode, then set global RLPML
3000 * to our max jumbo frame size, in case we need to enable
3001 * jumbo frames on one of the rings later.
3002 * This will not pass over-length frames into the default
3003 * queue because it's gated by the VMOLR.RLPML.
3004 */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003005 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08003006 }
3007
3008 wr32(E1000_RLPML, max_frame_size);
3009}
3010
Williams, Mitch A8151d292010-02-10 01:44:24 +00003011static inline void igb_set_vmolr(struct igb_adapter *adapter,
3012 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003013{
3014 struct e1000_hw *hw = &adapter->hw;
3015 u32 vmolr;
3016
3017 /*
3018 * This register exists only on 82576 and newer so if we are older then
3019 * we should exit and do nothing
3020 */
3021 if (hw->mac.type < e1000_82576)
3022 return;
3023
3024 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00003025 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3026 if (aupe)
3027 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3028 else
3029 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003030
3031 /* clear all bits that might not be set */
3032 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3033
Alexander Duycka99955f2009-11-12 18:37:19 +00003034 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003035 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3036 /*
3037 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3038 * multicast packets
3039 */
3040 if (vfn <= adapter->vfs_allocated_count)
3041 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3042
3043 wr32(E1000_VMOLR(vfn), vmolr);
3044}
3045
Alexander Duycke1739522009-02-19 20:39:44 -08003046/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00003047 * igb_configure_rx_ring - Configure a receive ring after Reset
3048 * @adapter: board private structure
3049 * @ring: receive ring to be configured
3050 *
3051 * Configure the Rx unit of the MAC after a reset.
3052 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003053void igb_configure_rx_ring(struct igb_adapter *adapter,
3054 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00003055{
3056 struct e1000_hw *hw = &adapter->hw;
3057 u64 rdba = ring->dma;
3058 int reg_idx = ring->reg_idx;
Alexander Duycka74420e2011-08-26 07:43:27 +00003059 u32 srrctl = 0, rxdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003060
3061 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00003062 wr32(E1000_RXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003063
3064 /* Set DMA base address registers */
3065 wr32(E1000_RDBAL(reg_idx),
3066 rdba & 0x00000000ffffffffULL);
3067 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3068 wr32(E1000_RDLEN(reg_idx),
3069 ring->count * sizeof(union e1000_adv_rx_desc));
3070
3071 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00003072 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00003073 wr32(E1000_RDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00003074 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003075
Alexander Duyck952f72a2009-10-27 15:51:07 +00003076 /* set descriptor configuration */
Alexander Duyck44390ca2011-08-26 07:43:38 +00003077 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003078#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
Alexander Duyck44390ca2011-08-26 07:43:38 +00003079 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003080#else
Alexander Duyck44390ca2011-08-26 07:43:38 +00003081 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003082#endif
Alexander Duyck44390ca2011-08-26 07:43:38 +00003083 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
Alexander Duyck06218a82011-08-26 07:46:55 +00003084 if (hw->mac.type >= e1000_82580)
Nick Nunley757b77e2010-03-26 11:36:47 +00003085 srrctl |= E1000_SRRCTL_TIMESTAMP;
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00003086 /* Only set Drop Enable if we are supporting multiple queues */
3087 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3088 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003089
3090 wr32(E1000_SRRCTL(reg_idx), srrctl);
3091
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003092 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00003093 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003094
Alexander Duyck85b430b2009-10-27 15:50:29 +00003095 rxdctl |= IGB_RX_PTHRESH;
3096 rxdctl |= IGB_RX_HTHRESH << 8;
3097 rxdctl |= IGB_RX_WTHRESH << 16;
Alexander Duycka74420e2011-08-26 07:43:27 +00003098
3099 /* enable receive descriptor fetching */
3100 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003101 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3102}
3103
3104/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003105 * igb_configure_rx - Configure receive Unit after Reset
3106 * @adapter: board private structure
3107 *
3108 * Configure the Rx unit of the MAC after a reset.
3109 **/
3110static void igb_configure_rx(struct igb_adapter *adapter)
3111{
Hannes Eder91075842009-02-18 19:36:04 -08003112 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003113
Alexander Duyck68d480c2009-10-05 06:33:08 +00003114 /* set UTA to appropriate mode */
3115 igb_set_uta(adapter);
3116
Alexander Duyck26ad9172009-10-05 06:32:49 +00003117 /* set the correct pool for the PF default MAC address in entry 0 */
3118 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3119 adapter->vfs_allocated_count);
3120
Alexander Duyck06cf2662009-10-27 15:53:25 +00003121 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3122 * the Base and Length of the Rx Descriptor Ring */
3123 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003124 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003125}
3126
3127/**
3128 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003129 * @tx_ring: Tx descriptor ring for a specific queue
3130 *
3131 * Free all transmit software resources
3132 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003133void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003134{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003135 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003136
Alexander Duyck06034642011-08-26 07:44:22 +00003137 vfree(tx_ring->tx_buffer_info);
3138 tx_ring->tx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003139
Alexander Duyck439705e2009-10-27 23:49:20 +00003140 /* if not set, then don't free */
3141 if (!tx_ring->desc)
3142 return;
3143
Alexander Duyck59d71982010-04-27 13:09:25 +00003144 dma_free_coherent(tx_ring->dev, tx_ring->size,
3145 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003146
3147 tx_ring->desc = NULL;
3148}
3149
3150/**
3151 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3152 * @adapter: board private structure
3153 *
3154 * Free all transmit software resources
3155 **/
3156static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3157{
3158 int i;
3159
3160 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003161 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003162}
3163
Alexander Duyckebe42d12011-08-26 07:45:09 +00003164void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3165 struct igb_tx_buffer *tx_buffer)
Auke Kok9d5c8242008-01-24 02:22:38 -08003166{
Alexander Duyckebe42d12011-08-26 07:45:09 +00003167 if (tx_buffer->skb) {
3168 dev_kfree_skb_any(tx_buffer->skb);
3169 if (tx_buffer->dma)
3170 dma_unmap_single(ring->dev,
3171 tx_buffer->dma,
3172 tx_buffer->length,
3173 DMA_TO_DEVICE);
3174 } else if (tx_buffer->dma) {
3175 dma_unmap_page(ring->dev,
3176 tx_buffer->dma,
3177 tx_buffer->length,
3178 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003179 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00003180 tx_buffer->next_to_watch = NULL;
3181 tx_buffer->skb = NULL;
3182 tx_buffer->dma = 0;
3183 /* buffer_info must be completely set up in the transmit path */
Auke Kok9d5c8242008-01-24 02:22:38 -08003184}
3185
3186/**
3187 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08003188 * @tx_ring: ring to be cleaned
3189 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003190static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003191{
Alexander Duyck06034642011-08-26 07:44:22 +00003192 struct igb_tx_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08003193 unsigned long size;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00003194 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003195
Alexander Duyck06034642011-08-26 07:44:22 +00003196 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003197 return;
3198 /* Free all the Tx ring sk_buffs */
3199
3200 for (i = 0; i < tx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003201 buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00003202 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08003203 }
Eric Dumazetbdbc0632012-01-04 20:23:36 +00003204 netdev_tx_reset_queue(txring_txq(tx_ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08003205
Alexander Duyck06034642011-08-26 07:44:22 +00003206 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3207 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003208
3209 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08003210 memset(tx_ring->desc, 0, tx_ring->size);
3211
3212 tx_ring->next_to_use = 0;
3213 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003214}
3215
3216/**
3217 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3218 * @adapter: board private structure
3219 **/
3220static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3221{
3222 int i;
3223
3224 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003225 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003226}
3227
3228/**
3229 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003230 * @rx_ring: ring to clean the resources from
3231 *
3232 * Free all receive software resources
3233 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003234void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003235{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003236 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003237
Alexander Duyck06034642011-08-26 07:44:22 +00003238 vfree(rx_ring->rx_buffer_info);
3239 rx_ring->rx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003240
Alexander Duyck439705e2009-10-27 23:49:20 +00003241 /* if not set, then don't free */
3242 if (!rx_ring->desc)
3243 return;
3244
Alexander Duyck59d71982010-04-27 13:09:25 +00003245 dma_free_coherent(rx_ring->dev, rx_ring->size,
3246 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003247
3248 rx_ring->desc = NULL;
3249}
3250
3251/**
3252 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3253 * @adapter: board private structure
3254 *
3255 * Free all receive software resources
3256 **/
3257static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3258{
3259 int i;
3260
3261 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003262 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003263}
3264
3265/**
3266 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003267 * @rx_ring: ring to free buffers from
3268 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003269static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003270{
Auke Kok9d5c8242008-01-24 02:22:38 -08003271 unsigned long size;
Alexander Duyckc023cd82011-08-26 07:43:43 +00003272 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003273
Alexander Duyck06034642011-08-26 07:44:22 +00003274 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003275 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00003276
Auke Kok9d5c8242008-01-24 02:22:38 -08003277 /* Free all the Rx ring sk_buffs */
3278 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003279 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08003280 if (buffer_info->dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003281 dma_unmap_single(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003282 buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00003283 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00003284 DMA_FROM_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08003285 buffer_info->dma = 0;
3286 }
3287
3288 if (buffer_info->skb) {
3289 dev_kfree_skb(buffer_info->skb);
3290 buffer_info->skb = NULL;
3291 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003292 if (buffer_info->page_dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003293 dma_unmap_page(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003294 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003295 PAGE_SIZE / 2,
Alexander Duyck59d71982010-04-27 13:09:25 +00003296 DMA_FROM_DEVICE);
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003297 buffer_info->page_dma = 0;
3298 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003299 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003300 put_page(buffer_info->page);
3301 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07003302 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003303 }
3304 }
3305
Alexander Duyck06034642011-08-26 07:44:22 +00003306 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3307 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003308
3309 /* Zero out the descriptor ring */
3310 memset(rx_ring->desc, 0, rx_ring->size);
3311
3312 rx_ring->next_to_clean = 0;
3313 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003314}
3315
3316/**
3317 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3318 * @adapter: board private structure
3319 **/
3320static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3321{
3322 int i;
3323
3324 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003325 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003326}
3327
3328/**
3329 * igb_set_mac - Change the Ethernet Address of the NIC
3330 * @netdev: network interface device structure
3331 * @p: pointer to an address structure
3332 *
3333 * Returns 0 on success, negative on failure
3334 **/
3335static int igb_set_mac(struct net_device *netdev, void *p)
3336{
3337 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00003338 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003339 struct sockaddr *addr = p;
3340
3341 if (!is_valid_ether_addr(addr->sa_data))
3342 return -EADDRNOTAVAIL;
3343
3344 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00003345 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08003346
Alexander Duyck26ad9172009-10-05 06:32:49 +00003347 /* set the correct pool for the new PF MAC address in entry 0 */
3348 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3349 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08003350
Auke Kok9d5c8242008-01-24 02:22:38 -08003351 return 0;
3352}
3353
3354/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00003355 * igb_write_mc_addr_list - write multicast addresses to MTA
3356 * @netdev: network interface device structure
3357 *
3358 * Writes multicast address list to the MTA hash table.
3359 * Returns: -ENOMEM on failure
3360 * 0 on no addresses written
3361 * X on writing X addresses to MTA
3362 **/
3363static int igb_write_mc_addr_list(struct net_device *netdev)
3364{
3365 struct igb_adapter *adapter = netdev_priv(netdev);
3366 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003367 struct netdev_hw_addr *ha;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003368 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003369 int i;
3370
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003371 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003372 /* nothing to program, so clear mc list */
3373 igb_update_mc_addr_list(hw, NULL, 0);
3374 igb_restore_vf_multicasts(adapter);
3375 return 0;
3376 }
3377
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003378 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003379 if (!mta_list)
3380 return -ENOMEM;
3381
Alexander Duyck68d480c2009-10-05 06:33:08 +00003382 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00003383 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003384 netdev_for_each_mc_addr(ha, netdev)
3385 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003386
Alexander Duyck68d480c2009-10-05 06:33:08 +00003387 igb_update_mc_addr_list(hw, mta_list, i);
3388 kfree(mta_list);
3389
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003390 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003391}
3392
3393/**
3394 * igb_write_uc_addr_list - write unicast addresses to RAR table
3395 * @netdev: network interface device structure
3396 *
3397 * Writes unicast address list to the RAR table.
3398 * Returns: -ENOMEM on failure/insufficient address space
3399 * 0 on no addresses written
3400 * X on writing X addresses to the RAR table
3401 **/
3402static int igb_write_uc_addr_list(struct net_device *netdev)
3403{
3404 struct igb_adapter *adapter = netdev_priv(netdev);
3405 struct e1000_hw *hw = &adapter->hw;
3406 unsigned int vfn = adapter->vfs_allocated_count;
3407 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3408 int count = 0;
3409
3410 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003411 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00003412 return -ENOMEM;
3413
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003414 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003415 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003416
3417 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003418 if (!rar_entries)
3419 break;
3420 igb_rar_set_qsel(adapter, ha->addr,
3421 rar_entries--,
3422 vfn);
3423 count++;
3424 }
3425 }
3426 /* write the addresses in reverse order to avoid write combining */
3427 for (; rar_entries > 0 ; rar_entries--) {
3428 wr32(E1000_RAH(rar_entries), 0);
3429 wr32(E1000_RAL(rar_entries), 0);
3430 }
3431 wrfl();
3432
3433 return count;
3434}
3435
3436/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003437 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08003438 * @netdev: network interface device structure
3439 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003440 * The set_rx_mode entry point is called whenever the unicast or multicast
3441 * address lists or the network interface flags are updated. This routine is
3442 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08003443 * promiscuous mode, and all-multi behavior.
3444 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003445static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003446{
3447 struct igb_adapter *adapter = netdev_priv(netdev);
3448 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003449 unsigned int vfn = adapter->vfs_allocated_count;
3450 u32 rctl, vmolr = 0;
3451 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08003452
3453 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08003454 rctl = rd32(E1000_RCTL);
3455
Alexander Duyck68d480c2009-10-05 06:33:08 +00003456 /* clear the effected bits */
3457 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3458
Patrick McHardy746b9f02008-07-16 20:15:45 -07003459 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003460 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003461 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07003462 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003463 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003464 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003465 vmolr |= E1000_VMOLR_MPME;
3466 } else {
3467 /*
3468 * Write addresses to the MTA, if the attempt fails
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003469 * then we should just turn on promiscuous mode so
Alexander Duyck68d480c2009-10-05 06:33:08 +00003470 * that we can at least receive multicast traffic
3471 */
3472 count = igb_write_mc_addr_list(netdev);
3473 if (count < 0) {
3474 rctl |= E1000_RCTL_MPE;
3475 vmolr |= E1000_VMOLR_MPME;
3476 } else if (count) {
3477 vmolr |= E1000_VMOLR_ROMPE;
3478 }
3479 }
3480 /*
3481 * Write addresses to available RAR registers, if there is not
3482 * sufficient space to store all the addresses then enable
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003483 * unicast promiscuous mode
Alexander Duyck68d480c2009-10-05 06:33:08 +00003484 */
3485 count = igb_write_uc_addr_list(netdev);
3486 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003487 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003488 vmolr |= E1000_VMOLR_ROPE;
3489 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003490 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003491 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003492 wr32(E1000_RCTL, rctl);
3493
Alexander Duyck68d480c2009-10-05 06:33:08 +00003494 /*
3495 * In order to support SR-IOV and eventually VMDq it is necessary to set
3496 * the VMOLR to enable the appropriate modes. Without this workaround
3497 * we will have issues with VLAN tag stripping not being done for frames
3498 * that are only arriving because we are the default pool
3499 */
3500 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003501 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003502
Alexander Duyck68d480c2009-10-05 06:33:08 +00003503 vmolr |= rd32(E1000_VMOLR(vfn)) &
3504 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3505 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003506 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003507}
3508
Greg Rose13800462010-11-06 02:08:26 +00003509static void igb_check_wvbr(struct igb_adapter *adapter)
3510{
3511 struct e1000_hw *hw = &adapter->hw;
3512 u32 wvbr = 0;
3513
3514 switch (hw->mac.type) {
3515 case e1000_82576:
3516 case e1000_i350:
3517 if (!(wvbr = rd32(E1000_WVBR)))
3518 return;
3519 break;
3520 default:
3521 break;
3522 }
3523
3524 adapter->wvbr |= wvbr;
3525}
3526
3527#define IGB_STAGGERED_QUEUE_OFFSET 8
3528
3529static void igb_spoof_check(struct igb_adapter *adapter)
3530{
3531 int j;
3532
3533 if (!adapter->wvbr)
3534 return;
3535
3536 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3537 if (adapter->wvbr & (1 << j) ||
3538 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3539 dev_warn(&adapter->pdev->dev,
3540 "Spoof event(s) detected on VF %d\n", j);
3541 adapter->wvbr &=
3542 ~((1 << j) |
3543 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3544 }
3545 }
3546}
3547
Auke Kok9d5c8242008-01-24 02:22:38 -08003548/* Need to wait a few seconds after link up to get diagnostic information from
3549 * the phy */
3550static void igb_update_phy_info(unsigned long data)
3551{
3552 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003553 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003554}
3555
3556/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003557 * igb_has_link - check shared code for link and determine up/down
3558 * @adapter: pointer to driver private info
3559 **/
Nick Nunley31455352010-02-17 01:01:21 +00003560bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003561{
3562 struct e1000_hw *hw = &adapter->hw;
3563 bool link_active = false;
3564 s32 ret_val = 0;
3565
3566 /* get_link_status is set on LSC (link status) interrupt or
3567 * rx sequence error interrupt. get_link_status will stay
3568 * false until the e1000_check_for_link establishes link
3569 * for copper adapters ONLY
3570 */
3571 switch (hw->phy.media_type) {
3572 case e1000_media_type_copper:
3573 if (hw->mac.get_link_status) {
3574 ret_val = hw->mac.ops.check_for_link(hw);
3575 link_active = !hw->mac.get_link_status;
3576 } else {
3577 link_active = true;
3578 }
3579 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003580 case e1000_media_type_internal_serdes:
3581 ret_val = hw->mac.ops.check_for_link(hw);
3582 link_active = hw->mac.serdes_has_link;
3583 break;
3584 default:
3585 case e1000_media_type_unknown:
3586 break;
3587 }
3588
3589 return link_active;
3590}
3591
Stefan Assmann563988d2011-04-05 04:27:15 +00003592static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3593{
3594 bool ret = false;
3595 u32 ctrl_ext, thstat;
3596
3597 /* check for thermal sensor event on i350, copper only */
3598 if (hw->mac.type == e1000_i350) {
3599 thstat = rd32(E1000_THSTAT);
3600 ctrl_ext = rd32(E1000_CTRL_EXT);
3601
3602 if ((hw->phy.media_type == e1000_media_type_copper) &&
3603 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3604 ret = !!(thstat & event);
3605 }
3606 }
3607
3608 return ret;
3609}
3610
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003611/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003612 * igb_watchdog - Timer Call-back
3613 * @data: pointer to adapter cast into an unsigned long
3614 **/
3615static void igb_watchdog(unsigned long data)
3616{
3617 struct igb_adapter *adapter = (struct igb_adapter *)data;
3618 /* Do the rest outside of interrupt context */
3619 schedule_work(&adapter->watchdog_task);
3620}
3621
3622static void igb_watchdog_task(struct work_struct *work)
3623{
3624 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003625 struct igb_adapter,
3626 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003627 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003628 struct net_device *netdev = adapter->netdev;
Stefan Assmann563988d2011-04-05 04:27:15 +00003629 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003630 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003631
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003632 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003633 if (link) {
3634 if (!netif_carrier_ok(netdev)) {
3635 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003636 hw->mac.ops.get_speed_and_duplex(hw,
3637 &adapter->link_speed,
3638 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003639
3640 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003641 /* Links status message must follow this format */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003642 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
3643 "Duplex, Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003644 netdev->name,
3645 adapter->link_speed,
3646 adapter->link_duplex == FULL_DUPLEX ?
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003647 "Full" : "Half",
3648 (ctrl & E1000_CTRL_TFCE) &&
3649 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
3650 (ctrl & E1000_CTRL_RFCE) ? "RX" :
3651 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
Auke Kok9d5c8242008-01-24 02:22:38 -08003652
Stefan Assmann563988d2011-04-05 04:27:15 +00003653 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003654 if (igb_thermal_sensor_event(hw,
3655 E1000_THSTAT_LINK_THROTTLE)) {
3656 netdev_info(netdev, "The network adapter link "
3657 "speed was downshifted because it "
3658 "overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003659 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003660
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003661 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003662 adapter->tx_timeout_factor = 1;
3663 switch (adapter->link_speed) {
3664 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003665 adapter->tx_timeout_factor = 14;
3666 break;
3667 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003668 /* maybe add some timeout factor ? */
3669 break;
3670 }
3671
3672 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003673
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003674 igb_ping_all_vfs(adapter);
Lior Levy17dc5662011-02-08 02:28:46 +00003675 igb_check_vf_rate_limit(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003676
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003677 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003678 if (!test_bit(__IGB_DOWN, &adapter->state))
3679 mod_timer(&adapter->phy_info_timer,
3680 round_jiffies(jiffies + 2 * HZ));
3681 }
3682 } else {
3683 if (netif_carrier_ok(netdev)) {
3684 adapter->link_speed = 0;
3685 adapter->link_duplex = 0;
Stefan Assmann563988d2011-04-05 04:27:15 +00003686
3687 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003688 if (igb_thermal_sensor_event(hw,
3689 E1000_THSTAT_PWR_DOWN)) {
3690 netdev_err(netdev, "The network adapter was "
3691 "stopped because it overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003692 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003693
Alexander Duyck527d47c2008-11-27 00:21:39 -08003694 /* Links status message must follow this format */
3695 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3696 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003697 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003698
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003699 igb_ping_all_vfs(adapter);
3700
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003701 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003702 if (!test_bit(__IGB_DOWN, &adapter->state))
3703 mod_timer(&adapter->phy_info_timer,
3704 round_jiffies(jiffies + 2 * HZ));
3705 }
3706 }
3707
Eric Dumazet12dcd862010-10-15 17:27:10 +00003708 spin_lock(&adapter->stats64_lock);
3709 igb_update_stats(adapter, &adapter->stats64);
3710 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08003711
Alexander Duyckdbabb062009-11-12 18:38:16 +00003712 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003713 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003714 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003715 /* We've lost link, so the controller stops DMA,
3716 * but we've got queued Tx work that's never going
3717 * to get done, so reset controller to flush Tx.
3718 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003719 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3720 adapter->tx_timeout_count++;
3721 schedule_work(&adapter->reset_task);
3722 /* return immediately since reset is imminent */
3723 return;
3724 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003725 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003726
Alexander Duyckdbabb062009-11-12 18:38:16 +00003727 /* Force detection of hung controller every watchdog period */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00003728 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyckdbabb062009-11-12 18:38:16 +00003729 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003730
Auke Kok9d5c8242008-01-24 02:22:38 -08003731 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003732 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003733 u32 eics = 0;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00003734 for (i = 0; i < adapter->num_q_vectors; i++)
3735 eics |= adapter->q_vector[i]->eims_value;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003736 wr32(E1000_EICS, eics);
3737 } else {
3738 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3739 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003740
Greg Rose13800462010-11-06 02:08:26 +00003741 igb_spoof_check(adapter);
3742
Auke Kok9d5c8242008-01-24 02:22:38 -08003743 /* Reset the timer */
3744 if (!test_bit(__IGB_DOWN, &adapter->state))
3745 mod_timer(&adapter->watchdog_timer,
3746 round_jiffies(jiffies + 2 * HZ));
3747}
3748
3749enum latency_range {
3750 lowest_latency = 0,
3751 low_latency = 1,
3752 bulk_latency = 2,
3753 latency_invalid = 255
3754};
3755
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003756/**
3757 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3758 *
3759 * Stores a new ITR value based on strictly on packet size. This
3760 * algorithm is less sophisticated than that used in igb_update_itr,
3761 * due to the difficulty of synchronizing statistics across multiple
Stefan Weileef35c22010-08-06 21:11:15 +02003762 * receive rings. The divisors and thresholds used by this function
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003763 * were determined based on theoretical maximum wire speed and testing
3764 * data, in order to minimize response time while increasing bulk
3765 * throughput.
3766 * This functionality is controlled by the InterruptThrottleRate module
3767 * parameter (see igb_param.c)
3768 * NOTE: This function is called only when operating in a multiqueue
3769 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003770 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003771 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003772static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003773{
Alexander Duyck047e0032009-10-27 15:49:27 +00003774 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003775 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003776 struct igb_adapter *adapter = q_vector->adapter;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003777 unsigned int packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003778
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003779 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3780 * ints/sec - ITR timer value of 120 ticks.
3781 */
3782 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003783 new_val = IGB_4K_ITR;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003784 goto set_itr_val;
3785 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003786
Alexander Duyck0ba82992011-08-26 07:45:47 +00003787 packets = q_vector->rx.total_packets;
3788 if (packets)
3789 avg_wire_size = q_vector->rx.total_bytes / packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003790
Alexander Duyck0ba82992011-08-26 07:45:47 +00003791 packets = q_vector->tx.total_packets;
3792 if (packets)
3793 avg_wire_size = max_t(u32, avg_wire_size,
3794 q_vector->tx.total_bytes / packets);
Alexander Duyck047e0032009-10-27 15:49:27 +00003795
3796 /* if avg_wire_size isn't set no work was done */
3797 if (!avg_wire_size)
3798 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003799
3800 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3801 avg_wire_size += 24;
3802
3803 /* Don't starve jumbo frames */
3804 avg_wire_size = min(avg_wire_size, 3000);
3805
3806 /* Give a little boost to mid-size frames */
3807 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3808 new_val = avg_wire_size / 3;
3809 else
3810 new_val = avg_wire_size / 2;
3811
Alexander Duyck0ba82992011-08-26 07:45:47 +00003812 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3813 if (new_val < IGB_20K_ITR &&
3814 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3815 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3816 new_val = IGB_20K_ITR;
Nick Nunleyabe1c362010-02-17 01:03:19 +00003817
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003818set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003819 if (new_val != q_vector->itr_val) {
3820 q_vector->itr_val = new_val;
3821 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003822 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003823clear_counts:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003824 q_vector->rx.total_bytes = 0;
3825 q_vector->rx.total_packets = 0;
3826 q_vector->tx.total_bytes = 0;
3827 q_vector->tx.total_packets = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003828}
3829
3830/**
3831 * igb_update_itr - update the dynamic ITR value based on statistics
3832 * Stores a new ITR value based on packets and byte
3833 * counts during the last interrupt. The advantage of per interrupt
3834 * computation is faster updates and more accurate ITR for the current
3835 * traffic pattern. Constants in this function were computed
3836 * based on theoretical maximum wire speed and thresholds were set based
3837 * on testing data as well as attempting to minimize response time
3838 * while increasing bulk throughput.
3839 * this functionality is controlled by the InterruptThrottleRate module
3840 * parameter (see igb_param.c)
3841 * NOTE: These calculations are only valid when operating in a single-
3842 * queue environment.
Alexander Duyck0ba82992011-08-26 07:45:47 +00003843 * @q_vector: pointer to q_vector
3844 * @ring_container: ring info to update the itr for
Auke Kok9d5c8242008-01-24 02:22:38 -08003845 **/
Alexander Duyck0ba82992011-08-26 07:45:47 +00003846static void igb_update_itr(struct igb_q_vector *q_vector,
3847 struct igb_ring_container *ring_container)
Auke Kok9d5c8242008-01-24 02:22:38 -08003848{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003849 unsigned int packets = ring_container->total_packets;
3850 unsigned int bytes = ring_container->total_bytes;
3851 u8 itrval = ring_container->itr;
Auke Kok9d5c8242008-01-24 02:22:38 -08003852
Alexander Duyck0ba82992011-08-26 07:45:47 +00003853 /* no packets, exit with status unchanged */
Auke Kok9d5c8242008-01-24 02:22:38 -08003854 if (packets == 0)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003855 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08003856
Alexander Duyck0ba82992011-08-26 07:45:47 +00003857 switch (itrval) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003858 case lowest_latency:
3859 /* handle TSO and jumbo frames */
3860 if (bytes/packets > 8000)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003861 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003862 else if ((packets < 5) && (bytes > 512))
Alexander Duyck0ba82992011-08-26 07:45:47 +00003863 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003864 break;
3865 case low_latency: /* 50 usec aka 20000 ints/s */
3866 if (bytes > 10000) {
3867 /* this if handles the TSO accounting */
3868 if (bytes/packets > 8000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003869 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003870 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003871 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003872 } else if ((packets > 35)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003873 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003874 }
3875 } else if (bytes/packets > 2000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003876 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003877 } else if (packets <= 2 && bytes < 512) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003878 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003879 }
3880 break;
3881 case bulk_latency: /* 250 usec aka 4000 ints/s */
3882 if (bytes > 25000) {
3883 if (packets > 35)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003884 itrval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003885 } else if (bytes < 1500) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003886 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003887 }
3888 break;
3889 }
3890
Alexander Duyck0ba82992011-08-26 07:45:47 +00003891 /* clear work counters since we have the values we need */
3892 ring_container->total_bytes = 0;
3893 ring_container->total_packets = 0;
3894
3895 /* write updated itr to ring container */
3896 ring_container->itr = itrval;
Auke Kok9d5c8242008-01-24 02:22:38 -08003897}
3898
Alexander Duyck0ba82992011-08-26 07:45:47 +00003899static void igb_set_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003900{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003901 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00003902 u32 new_itr = q_vector->itr_val;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003903 u8 current_itr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003904
3905 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3906 if (adapter->link_speed != SPEED_1000) {
3907 current_itr = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003908 new_itr = IGB_4K_ITR;
Auke Kok9d5c8242008-01-24 02:22:38 -08003909 goto set_itr_now;
3910 }
3911
Alexander Duyck0ba82992011-08-26 07:45:47 +00003912 igb_update_itr(q_vector, &q_vector->tx);
3913 igb_update_itr(q_vector, &q_vector->rx);
Auke Kok9d5c8242008-01-24 02:22:38 -08003914
Alexander Duyck0ba82992011-08-26 07:45:47 +00003915 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003916
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003917 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck0ba82992011-08-26 07:45:47 +00003918 if (current_itr == lowest_latency &&
3919 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3920 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003921 current_itr = low_latency;
3922
Auke Kok9d5c8242008-01-24 02:22:38 -08003923 switch (current_itr) {
3924 /* counts and packets in update_itr are dependent on these numbers */
3925 case lowest_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003926 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003927 break;
3928 case low_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003929 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003930 break;
3931 case bulk_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003932 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003933 break;
3934 default:
3935 break;
3936 }
3937
3938set_itr_now:
Alexander Duyck047e0032009-10-27 15:49:27 +00003939 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003940 /* this attempts to bias the interrupt rate towards Bulk
3941 * by adding intermediate steps when interrupt rate is
3942 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003943 new_itr = new_itr > q_vector->itr_val ?
3944 max((new_itr * q_vector->itr_val) /
3945 (new_itr + (q_vector->itr_val >> 2)),
Alexander Duyck0ba82992011-08-26 07:45:47 +00003946 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003947 new_itr;
3948 /* Don't write the value here; it resets the adapter's
3949 * internal timer, and causes us to delay far longer than
3950 * we should between interrupts. Instead, we write the ITR
3951 * value at the beginning of the next interrupt so the timing
3952 * ends up being correct.
3953 */
Alexander Duyck047e0032009-10-27 15:49:27 +00003954 q_vector->itr_val = new_itr;
3955 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003956 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003957}
3958
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003959void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
3960 u32 type_tucmd, u32 mss_l4len_idx)
3961{
3962 struct e1000_adv_tx_context_desc *context_desc;
3963 u16 i = tx_ring->next_to_use;
3964
3965 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
3966
3967 i++;
3968 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3969
3970 /* set bits to identify this as an advanced context descriptor */
3971 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
3972
3973 /* For 82575, context index must be unique per ring. */
Alexander Duyck866cff02011-08-26 07:45:36 +00003974 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003975 mss_l4len_idx |= tx_ring->reg_idx << 4;
3976
3977 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3978 context_desc->seqnum_seed = 0;
3979 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3980 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3981}
3982
Alexander Duyck7af40ad92011-08-26 07:45:15 +00003983static int igb_tso(struct igb_ring *tx_ring,
3984 struct igb_tx_buffer *first,
3985 u8 *hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08003986{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00003987 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003988 u32 vlan_macip_lens, type_tucmd;
3989 u32 mss_l4len_idx, l4len;
3990
3991 if (!skb_is_gso(skb))
3992 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003993
3994 if (skb_header_cloned(skb)) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00003995 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
Auke Kok9d5c8242008-01-24 02:22:38 -08003996 if (err)
3997 return err;
3998 }
3999
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004000 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4001 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
Auke Kok9d5c8242008-01-24 02:22:38 -08004002
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004003 if (first->protocol == __constant_htons(ETH_P_IP)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004004 struct iphdr *iph = ip_hdr(skb);
4005 iph->tot_len = 0;
4006 iph->check = 0;
4007 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4008 iph->daddr, 0,
4009 IPPROTO_TCP,
4010 0);
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004011 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004012 first->tx_flags |= IGB_TX_FLAGS_TSO |
4013 IGB_TX_FLAGS_CSUM |
4014 IGB_TX_FLAGS_IPV4;
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08004015 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004016 ipv6_hdr(skb)->payload_len = 0;
4017 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4018 &ipv6_hdr(skb)->daddr,
4019 0, IPPROTO_TCP, 0);
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004020 first->tx_flags |= IGB_TX_FLAGS_TSO |
4021 IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004022 }
4023
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004024 /* compute header lengths */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004025 l4len = tcp_hdrlen(skb);
4026 *hdr_len = skb_transport_offset(skb) + l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08004027
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004028 /* update gso size and bytecount with header size */
4029 first->gso_segs = skb_shinfo(skb)->gso_segs;
4030 first->bytecount += (first->gso_segs - 1) * *hdr_len;
4031
Auke Kok9d5c8242008-01-24 02:22:38 -08004032 /* MSS L4LEN IDX */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004033 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4034 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
Auke Kok9d5c8242008-01-24 02:22:38 -08004035
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004036 /* VLAN MACLEN IPLEN */
4037 vlan_macip_lens = skb_network_header_len(skb);
4038 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004039 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004040
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004041 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004042
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004043 return 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004044}
4045
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004046static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
Auke Kok9d5c8242008-01-24 02:22:38 -08004047{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004048 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004049 u32 vlan_macip_lens = 0;
4050 u32 mss_l4len_idx = 0;
4051 u32 type_tucmd = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004052
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004053 if (skb->ip_summed != CHECKSUM_PARTIAL) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004054 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4055 return;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004056 } else {
4057 u8 l4_hdr = 0;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004058 switch (first->protocol) {
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004059 case __constant_htons(ETH_P_IP):
4060 vlan_macip_lens |= skb_network_header_len(skb);
4061 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4062 l4_hdr = ip_hdr(skb)->protocol;
4063 break;
4064 case __constant_htons(ETH_P_IPV6):
4065 vlan_macip_lens |= skb_network_header_len(skb);
4066 l4_hdr = ipv6_hdr(skb)->nexthdr;
4067 break;
4068 default:
4069 if (unlikely(net_ratelimit())) {
4070 dev_warn(tx_ring->dev,
4071 "partial checksum but proto=%x!\n",
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004072 first->protocol);
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07004073 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004074 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08004075 }
4076
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004077 switch (l4_hdr) {
4078 case IPPROTO_TCP:
4079 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4080 mss_l4len_idx = tcp_hdrlen(skb) <<
4081 E1000_ADVTXD_L4LEN_SHIFT;
4082 break;
4083 case IPPROTO_SCTP:
4084 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4085 mss_l4len_idx = sizeof(struct sctphdr) <<
4086 E1000_ADVTXD_L4LEN_SHIFT;
4087 break;
4088 case IPPROTO_UDP:
4089 mss_l4len_idx = sizeof(struct udphdr) <<
4090 E1000_ADVTXD_L4LEN_SHIFT;
4091 break;
4092 default:
4093 if (unlikely(net_ratelimit())) {
4094 dev_warn(tx_ring->dev,
4095 "partial checksum but l4 proto=%x!\n",
4096 l4_hdr);
4097 }
4098 break;
4099 }
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004100
4101 /* update TX checksum flag */
4102 first->tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004103 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004104
4105 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004106 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004107
4108 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004109}
4110
Alexander Duycke032afc2011-08-26 07:44:48 +00004111static __le32 igb_tx_cmd_type(u32 tx_flags)
4112{
4113 /* set type for advanced descriptor with frame checksum insertion */
4114 __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
4115 E1000_ADVTXD_DCMD_IFCS |
4116 E1000_ADVTXD_DCMD_DEXT);
4117
4118 /* set HW vlan bit if vlan is present */
4119 if (tx_flags & IGB_TX_FLAGS_VLAN)
4120 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
4121
4122 /* set timestamp bit if present */
4123 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
4124 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
4125
4126 /* set segmentation bits for TSO */
4127 if (tx_flags & IGB_TX_FLAGS_TSO)
4128 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
4129
4130 return cmd_type;
4131}
4132
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004133static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4134 union e1000_adv_tx_desc *tx_desc,
4135 u32 tx_flags, unsigned int paylen)
Alexander Duycke032afc2011-08-26 07:44:48 +00004136{
4137 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4138
4139 /* 82575 requires a unique index per ring if any offload is enabled */
4140 if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
Alexander Duyck866cff02011-08-26 07:45:36 +00004141 test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duycke032afc2011-08-26 07:44:48 +00004142 olinfo_status |= tx_ring->reg_idx << 4;
4143
4144 /* insert L4 checksum */
4145 if (tx_flags & IGB_TX_FLAGS_CSUM) {
4146 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4147
4148 /* insert IPv4 checksum */
4149 if (tx_flags & IGB_TX_FLAGS_IPV4)
4150 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4151 }
4152
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004153 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duycke032afc2011-08-26 07:44:48 +00004154}
4155
Alexander Duyckebe42d12011-08-26 07:45:09 +00004156/*
4157 * The largest size we can write to the descriptor is 65535. In order to
4158 * maintain a power of two alignment we have to limit ourselves to 32K.
4159 */
4160#define IGB_MAX_TXD_PWR 15
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004161#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
Auke Kok9d5c8242008-01-24 02:22:38 -08004162
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004163static void igb_tx_map(struct igb_ring *tx_ring,
4164 struct igb_tx_buffer *first,
Alexander Duyckebe42d12011-08-26 07:45:09 +00004165 const u8 hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004166{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004167 struct sk_buff *skb = first->skb;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004168 struct igb_tx_buffer *tx_buffer_info;
4169 union e1000_adv_tx_desc *tx_desc;
4170 dma_addr_t dma;
4171 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
4172 unsigned int data_len = skb->data_len;
4173 unsigned int size = skb_headlen(skb);
4174 unsigned int paylen = skb->len - hdr_len;
4175 __le32 cmd_type;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004176 u32 tx_flags = first->tx_flags;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004177 u16 i = tx_ring->next_to_use;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004178
4179 tx_desc = IGB_TX_DESC(tx_ring, i);
4180
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004181 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004182 cmd_type = igb_tx_cmd_type(tx_flags);
4183
4184 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
4185 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004186 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08004187
Alexander Duyckebe42d12011-08-26 07:45:09 +00004188 /* record length, and DMA address */
4189 first->length = size;
4190 first->dma = dma;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004191 tx_desc->read.buffer_addr = cpu_to_le64(dma);
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00004192
Alexander Duyckebe42d12011-08-26 07:45:09 +00004193 for (;;) {
4194 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4195 tx_desc->read.cmd_type_len =
4196 cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
Auke Kok9d5c8242008-01-24 02:22:38 -08004197
Alexander Duyckebe42d12011-08-26 07:45:09 +00004198 i++;
4199 tx_desc++;
4200 if (i == tx_ring->count) {
4201 tx_desc = IGB_TX_DESC(tx_ring, 0);
4202 i = 0;
4203 }
4204
4205 dma += IGB_MAX_DATA_PER_TXD;
4206 size -= IGB_MAX_DATA_PER_TXD;
4207
4208 tx_desc->read.olinfo_status = 0;
4209 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4210 }
4211
4212 if (likely(!data_len))
4213 break;
4214
4215 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
4216
Alexander Duyck65689fe2009-03-20 00:17:43 +00004217 i++;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004218 tx_desc++;
4219 if (i == tx_ring->count) {
4220 tx_desc = IGB_TX_DESC(tx_ring, 0);
Alexander Duyck65689fe2009-03-20 00:17:43 +00004221 i = 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004222 }
Alexander Duyck65689fe2009-03-20 00:17:43 +00004223
Eric Dumazet9e903e02011-10-18 21:00:24 +00004224 size = skb_frag_size(frag);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004225 data_len -= size;
4226
4227 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
4228 size, DMA_TO_DEVICE);
4229 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004230 goto dma_error;
4231
Alexander Duyckebe42d12011-08-26 07:45:09 +00004232 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4233 tx_buffer_info->length = size;
4234 tx_buffer_info->dma = dma;
4235
4236 tx_desc->read.olinfo_status = 0;
4237 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4238
4239 frag++;
Auke Kok9d5c8242008-01-24 02:22:38 -08004240 }
4241
Eric Dumazetbdbc0632012-01-04 20:23:36 +00004242 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
4243
Alexander Duyckebe42d12011-08-26 07:45:09 +00004244 /* write last descriptor with RS and EOP bits */
4245 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
4246 tx_desc->read.cmd_type_len = cmd_type;
Alexander Duyck8542db02011-08-26 07:44:43 +00004247
4248 /* set the timestamp */
4249 first->time_stamp = jiffies;
4250
Alexander Duyckebe42d12011-08-26 07:45:09 +00004251 /*
4252 * Force memory writes to complete before letting h/w know there
4253 * are new descriptors to fetch. (Only applicable for weak-ordered
4254 * memory model archs, such as IA-64).
4255 *
4256 * We also need this memory barrier to make certain all of the
4257 * status bits have been updated before next_to_watch is written.
4258 */
Auke Kok9d5c8242008-01-24 02:22:38 -08004259 wmb();
4260
Alexander Duyckebe42d12011-08-26 07:45:09 +00004261 /* set next_to_watch value indicating a packet is present */
4262 first->next_to_watch = tx_desc;
4263
4264 i++;
4265 if (i == tx_ring->count)
4266 i = 0;
4267
Auke Kok9d5c8242008-01-24 02:22:38 -08004268 tx_ring->next_to_use = i;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004269
Alexander Duyckfce99e32009-10-27 15:51:27 +00004270 writel(i, tx_ring->tail);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004271
Auke Kok9d5c8242008-01-24 02:22:38 -08004272 /* we need this if more than one processor can write to our tail
4273 * at a time, it syncronizes IO on IA64/Altix systems */
4274 mmiowb();
Alexander Duyckebe42d12011-08-26 07:45:09 +00004275
4276 return;
4277
4278dma_error:
4279 dev_err(tx_ring->dev, "TX DMA map failed\n");
4280
4281 /* clear dma mappings for failed tx_buffer_info map */
4282 for (;;) {
4283 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4284 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4285 if (tx_buffer_info == first)
4286 break;
4287 if (i == 0)
4288 i = tx_ring->count;
4289 i--;
4290 }
4291
4292 tx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004293}
4294
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004295static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004296{
Alexander Duycke694e962009-10-27 15:53:06 +00004297 struct net_device *netdev = tx_ring->netdev;
4298
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004299 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004300
Auke Kok9d5c8242008-01-24 02:22:38 -08004301 /* Herbert's original patch had:
4302 * smp_mb__after_netif_stop_queue();
4303 * but since that doesn't exist yet, just open code it. */
4304 smp_mb();
4305
4306 /* We need to check again in a case another CPU has just
4307 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00004308 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004309 return -EBUSY;
4310
4311 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004312 netif_wake_subqueue(netdev, tx_ring->queue_index);
Eric Dumazet12dcd862010-10-15 17:27:10 +00004313
4314 u64_stats_update_begin(&tx_ring->tx_syncp2);
4315 tx_ring->tx_stats.restart_queue2++;
4316 u64_stats_update_end(&tx_ring->tx_syncp2);
4317
Auke Kok9d5c8242008-01-24 02:22:38 -08004318 return 0;
4319}
4320
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004321static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004322{
Alexander Duyckc493ea42009-03-20 00:16:50 +00004323 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004324 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00004325 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08004326}
4327
Alexander Duyckcd392f52011-08-26 07:43:59 +00004328netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4329 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004330{
Alexander Duyck8542db02011-08-26 07:44:43 +00004331 struct igb_tx_buffer *first;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004332 int tso;
Nick Nunley91d4ee32010-02-17 01:04:56 +00004333 u32 tx_flags = 0;
Alexander Duyck31f6adb2011-08-26 07:44:53 +00004334 __be16 protocol = vlan_get_protocol(skb);
Nick Nunley91d4ee32010-02-17 01:04:56 +00004335 u8 hdr_len = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004336
Auke Kok9d5c8242008-01-24 02:22:38 -08004337 /* need: 1 descriptor per page,
4338 * + 2 desc gap to keep tail from touching head,
4339 * + 1 desc for skb->data,
4340 * + 1 desc for context descriptor,
4341 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00004342 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004343 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08004344 return NETDEV_TX_BUSY;
4345 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004346
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004347 /* record the location of the first descriptor for this packet */
4348 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4349 first->skb = skb;
4350 first->bytecount = skb->len;
4351 first->gso_segs = 1;
4352
Oliver Hartkopp2244d072010-08-17 08:59:14 +00004353 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4354 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004355 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004356 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004357
Jesse Grosseab6d182010-10-20 13:56:03 +00004358 if (vlan_tx_tag_present(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004359 tx_flags |= IGB_TX_FLAGS_VLAN;
4360 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4361 }
4362
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004363 /* record initial flags and protocol */
4364 first->tx_flags = tx_flags;
4365 first->protocol = protocol;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004366
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004367 tso = igb_tso(tx_ring, first, &hdr_len);
4368 if (tso < 0)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004369 goto out_drop;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004370 else if (!tso)
4371 igb_tx_csum(tx_ring, first);
Auke Kok9d5c8242008-01-24 02:22:38 -08004372
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004373 igb_tx_map(tx_ring, first, hdr_len);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004374
4375 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00004376 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004377
Auke Kok9d5c8242008-01-24 02:22:38 -08004378 return NETDEV_TX_OK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004379
4380out_drop:
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004381 igb_unmap_and_free_tx_resource(tx_ring, first);
4382
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004383 return NETDEV_TX_OK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004384}
4385
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004386static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4387 struct sk_buff *skb)
4388{
4389 unsigned int r_idx = skb->queue_mapping;
4390
4391 if (r_idx >= adapter->num_tx_queues)
4392 r_idx = r_idx % adapter->num_tx_queues;
4393
4394 return adapter->tx_ring[r_idx];
4395}
4396
Alexander Duyckcd392f52011-08-26 07:43:59 +00004397static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4398 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08004399{
4400 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckb1a436c2009-10-27 15:54:43 +00004401
4402 if (test_bit(__IGB_DOWN, &adapter->state)) {
4403 dev_kfree_skb_any(skb);
4404 return NETDEV_TX_OK;
4405 }
4406
4407 if (skb->len <= 0) {
4408 dev_kfree_skb_any(skb);
4409 return NETDEV_TX_OK;
4410 }
4411
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004412 /*
4413 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4414 * in order to meet this minimum size requirement.
4415 */
4416 if (skb->len < 17) {
4417 if (skb_padto(skb, 17))
4418 return NETDEV_TX_OK;
4419 skb->len = 17;
4420 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004421
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004422 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
Auke Kok9d5c8242008-01-24 02:22:38 -08004423}
4424
4425/**
4426 * igb_tx_timeout - Respond to a Tx Hang
4427 * @netdev: network interface device structure
4428 **/
4429static void igb_tx_timeout(struct net_device *netdev)
4430{
4431 struct igb_adapter *adapter = netdev_priv(netdev);
4432 struct e1000_hw *hw = &adapter->hw;
4433
4434 /* Do the reset outside of interrupt context */
4435 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00004436
Alexander Duyck06218a82011-08-26 07:46:55 +00004437 if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00004438 hw->dev_spec._82575.global_device_reset = true;
4439
Auke Kok9d5c8242008-01-24 02:22:38 -08004440 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00004441 wr32(E1000_EICS,
4442 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08004443}
4444
4445static void igb_reset_task(struct work_struct *work)
4446{
4447 struct igb_adapter *adapter;
4448 adapter = container_of(work, struct igb_adapter, reset_task);
4449
Taku Izumic97ec422010-04-27 14:39:30 +00004450 igb_dump(adapter);
4451 netdev_err(adapter->netdev, "Reset adapter\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004452 igb_reinit_locked(adapter);
4453}
4454
4455/**
Eric Dumazet12dcd862010-10-15 17:27:10 +00004456 * igb_get_stats64 - Get System Network Statistics
Auke Kok9d5c8242008-01-24 02:22:38 -08004457 * @netdev: network interface device structure
Eric Dumazet12dcd862010-10-15 17:27:10 +00004458 * @stats: rtnl_link_stats64 pointer
Auke Kok9d5c8242008-01-24 02:22:38 -08004459 *
Auke Kok9d5c8242008-01-24 02:22:38 -08004460 **/
Eric Dumazet12dcd862010-10-15 17:27:10 +00004461static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4462 struct rtnl_link_stats64 *stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004463{
Eric Dumazet12dcd862010-10-15 17:27:10 +00004464 struct igb_adapter *adapter = netdev_priv(netdev);
4465
4466 spin_lock(&adapter->stats64_lock);
4467 igb_update_stats(adapter, &adapter->stats64);
4468 memcpy(stats, &adapter->stats64, sizeof(*stats));
4469 spin_unlock(&adapter->stats64_lock);
4470
4471 return stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08004472}
4473
4474/**
4475 * igb_change_mtu - Change the Maximum Transfer Unit
4476 * @netdev: network interface device structure
4477 * @new_mtu: new value for maximum frame size
4478 *
4479 * Returns 0 on success, negative on failure
4480 **/
4481static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4482{
4483 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004484 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck153285f2011-08-26 07:43:32 +00004485 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08004486
Alexander Duyckc809d222009-10-27 23:52:13 +00004487 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004488 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004489 return -EINVAL;
4490 }
4491
Alexander Duyck153285f2011-08-26 07:43:32 +00004492#define MAX_STD_JUMBO_FRAME_SIZE 9238
Auke Kok9d5c8242008-01-24 02:22:38 -08004493 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004494 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004495 return -EINVAL;
4496 }
4497
4498 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4499 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004500
Auke Kok9d5c8242008-01-24 02:22:38 -08004501 /* igb_down has a dependency on max_frame_size */
4502 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00004503
Alexander Duyck4c844852009-10-27 15:52:07 +00004504 if (netif_running(netdev))
4505 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08004506
Alexander Duyck090b1792009-10-27 23:51:55 +00004507 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08004508 netdev->mtu, new_mtu);
4509 netdev->mtu = new_mtu;
4510
4511 if (netif_running(netdev))
4512 igb_up(adapter);
4513 else
4514 igb_reset(adapter);
4515
4516 clear_bit(__IGB_RESETTING, &adapter->state);
4517
4518 return 0;
4519}
4520
4521/**
4522 * igb_update_stats - Update the board statistics counters
4523 * @adapter: board private structure
4524 **/
4525
Eric Dumazet12dcd862010-10-15 17:27:10 +00004526void igb_update_stats(struct igb_adapter *adapter,
4527 struct rtnl_link_stats64 *net_stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004528{
4529 struct e1000_hw *hw = &adapter->hw;
4530 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004531 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004532 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004533 int i;
4534 u64 bytes, packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004535 unsigned int start;
4536 u64 _bytes, _packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004537
4538#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4539
4540 /*
4541 * Prevent stats update while adapter is being reset, or if the pci
4542 * connection is down.
4543 */
4544 if (adapter->link_speed == 0)
4545 return;
4546 if (pci_channel_offline(pdev))
4547 return;
4548
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004549 bytes = 0;
4550 packets = 0;
4551 for (i = 0; i < adapter->num_rx_queues; i++) {
4552 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
Alexander Duyck3025a442010-02-17 01:02:39 +00004553 struct igb_ring *ring = adapter->rx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004554
Alexander Duyck3025a442010-02-17 01:02:39 +00004555 ring->rx_stats.drops += rqdpc_tmp;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004556 net_stats->rx_fifo_errors += rqdpc_tmp;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004557
4558 do {
4559 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4560 _bytes = ring->rx_stats.bytes;
4561 _packets = ring->rx_stats.packets;
4562 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4563 bytes += _bytes;
4564 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004565 }
4566
Alexander Duyck128e45e2009-11-12 18:37:38 +00004567 net_stats->rx_bytes = bytes;
4568 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004569
4570 bytes = 0;
4571 packets = 0;
4572 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004573 struct igb_ring *ring = adapter->tx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004574 do {
4575 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4576 _bytes = ring->tx_stats.bytes;
4577 _packets = ring->tx_stats.packets;
4578 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4579 bytes += _bytes;
4580 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004581 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00004582 net_stats->tx_bytes = bytes;
4583 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004584
4585 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004586 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4587 adapter->stats.gprc += rd32(E1000_GPRC);
4588 adapter->stats.gorc += rd32(E1000_GORCL);
4589 rd32(E1000_GORCH); /* clear GORCL */
4590 adapter->stats.bprc += rd32(E1000_BPRC);
4591 adapter->stats.mprc += rd32(E1000_MPRC);
4592 adapter->stats.roc += rd32(E1000_ROC);
4593
4594 adapter->stats.prc64 += rd32(E1000_PRC64);
4595 adapter->stats.prc127 += rd32(E1000_PRC127);
4596 adapter->stats.prc255 += rd32(E1000_PRC255);
4597 adapter->stats.prc511 += rd32(E1000_PRC511);
4598 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4599 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4600 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4601 adapter->stats.sec += rd32(E1000_SEC);
4602
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004603 mpc = rd32(E1000_MPC);
4604 adapter->stats.mpc += mpc;
4605 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004606 adapter->stats.scc += rd32(E1000_SCC);
4607 adapter->stats.ecol += rd32(E1000_ECOL);
4608 adapter->stats.mcc += rd32(E1000_MCC);
4609 adapter->stats.latecol += rd32(E1000_LATECOL);
4610 adapter->stats.dc += rd32(E1000_DC);
4611 adapter->stats.rlec += rd32(E1000_RLEC);
4612 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4613 adapter->stats.xontxc += rd32(E1000_XONTXC);
4614 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4615 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4616 adapter->stats.fcruc += rd32(E1000_FCRUC);
4617 adapter->stats.gptc += rd32(E1000_GPTC);
4618 adapter->stats.gotc += rd32(E1000_GOTCL);
4619 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004620 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004621 adapter->stats.ruc += rd32(E1000_RUC);
4622 adapter->stats.rfc += rd32(E1000_RFC);
4623 adapter->stats.rjc += rd32(E1000_RJC);
4624 adapter->stats.tor += rd32(E1000_TORH);
4625 adapter->stats.tot += rd32(E1000_TOTH);
4626 adapter->stats.tpr += rd32(E1000_TPR);
4627
4628 adapter->stats.ptc64 += rd32(E1000_PTC64);
4629 adapter->stats.ptc127 += rd32(E1000_PTC127);
4630 adapter->stats.ptc255 += rd32(E1000_PTC255);
4631 adapter->stats.ptc511 += rd32(E1000_PTC511);
4632 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4633 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4634
4635 adapter->stats.mptc += rd32(E1000_MPTC);
4636 adapter->stats.bptc += rd32(E1000_BPTC);
4637
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004638 adapter->stats.tpt += rd32(E1000_TPT);
4639 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004640
4641 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004642 /* read internal phy specific stats */
4643 reg = rd32(E1000_CTRL_EXT);
4644 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4645 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4646 adapter->stats.tncrs += rd32(E1000_TNCRS);
4647 }
4648
Auke Kok9d5c8242008-01-24 02:22:38 -08004649 adapter->stats.tsctc += rd32(E1000_TSCTC);
4650 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4651
4652 adapter->stats.iac += rd32(E1000_IAC);
4653 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4654 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4655 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4656 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4657 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4658 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4659 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4660 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4661
4662 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004663 net_stats->multicast = adapter->stats.mprc;
4664 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004665
4666 /* Rx Errors */
4667
4668 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004669 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004670 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004671 adapter->stats.crcerrs + adapter->stats.algnerrc +
4672 adapter->stats.ruc + adapter->stats.roc +
4673 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004674 net_stats->rx_length_errors = adapter->stats.ruc +
4675 adapter->stats.roc;
4676 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4677 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4678 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004679
4680 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004681 net_stats->tx_errors = adapter->stats.ecol +
4682 adapter->stats.latecol;
4683 net_stats->tx_aborted_errors = adapter->stats.ecol;
4684 net_stats->tx_window_errors = adapter->stats.latecol;
4685 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004686
4687 /* Tx Dropped needs to be maintained elsewhere */
4688
4689 /* Phy Stats */
4690 if (hw->phy.media_type == e1000_media_type_copper) {
4691 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004692 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004693 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4694 adapter->phy_stats.idle_errors += phy_tmp;
4695 }
4696 }
4697
4698 /* Management Stats */
4699 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4700 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4701 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
Carolyn Wyborny0a915b92011-02-26 07:42:37 +00004702
4703 /* OS2BMC Stats */
4704 reg = rd32(E1000_MANC);
4705 if (reg & E1000_MANC_EN_BMC2OS) {
4706 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4707 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4708 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4709 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4710 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004711}
4712
Auke Kok9d5c8242008-01-24 02:22:38 -08004713static irqreturn_t igb_msix_other(int irq, void *data)
4714{
Alexander Duyck047e0032009-10-27 15:49:27 +00004715 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004716 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004717 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004718 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004719
Alexander Duyck7f081d42010-01-07 17:41:00 +00004720 if (icr & E1000_ICR_DRSTA)
4721 schedule_work(&adapter->reset_task);
4722
Alexander Duyck047e0032009-10-27 15:49:27 +00004723 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004724 /* HW is reporting DMA is out of sync */
4725 adapter->stats.doosync++;
Greg Rose13800462010-11-06 02:08:26 +00004726 /* The DMA Out of Sync is also indication of a spoof event
4727 * in IOV mode. Check the Wrong VM Behavior register to
4728 * see if it is really a spoof event. */
4729 igb_check_wvbr(adapter);
Alexander Duyckdda0e082009-02-06 23:19:08 +00004730 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004731
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004732 /* Check for a mailbox event */
4733 if (icr & E1000_ICR_VMMB)
4734 igb_msg_task(adapter);
4735
4736 if (icr & E1000_ICR_LSC) {
4737 hw->mac.get_link_status = 1;
4738 /* guard against interrupt when we're going down */
4739 if (!test_bit(__IGB_DOWN, &adapter->state))
4740 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4741 }
4742
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004743 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004744
4745 return IRQ_HANDLED;
4746}
4747
Alexander Duyck047e0032009-10-27 15:49:27 +00004748static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004749{
Alexander Duyck26b39272010-02-17 01:00:41 +00004750 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004751 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004752
Alexander Duyck047e0032009-10-27 15:49:27 +00004753 if (!q_vector->set_itr)
4754 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004755
Alexander Duyck047e0032009-10-27 15:49:27 +00004756 if (!itr_val)
4757 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004758
Alexander Duyck26b39272010-02-17 01:00:41 +00004759 if (adapter->hw.mac.type == e1000_82575)
4760 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004761 else
Alexander Duyck0ba82992011-08-26 07:45:47 +00004762 itr_val |= E1000_EITR_CNT_IGNR;
Alexander Duyck047e0032009-10-27 15:49:27 +00004763
4764 writel(itr_val, q_vector->itr_register);
4765 q_vector->set_itr = 0;
4766}
4767
4768static irqreturn_t igb_msix_ring(int irq, void *data)
4769{
4770 struct igb_q_vector *q_vector = data;
4771
4772 /* Write the ITR value calculated from the previous interrupt. */
4773 igb_write_itr(q_vector);
4774
4775 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004776
Auke Kok9d5c8242008-01-24 02:22:38 -08004777 return IRQ_HANDLED;
4778}
4779
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004780#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004781static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004782{
Alexander Duyck047e0032009-10-27 15:49:27 +00004783 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004784 struct e1000_hw *hw = &adapter->hw;
4785 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004786
Alexander Duyck047e0032009-10-27 15:49:27 +00004787 if (q_vector->cpu == cpu)
4788 goto out_no_update;
4789
Alexander Duyck0ba82992011-08-26 07:45:47 +00004790 if (q_vector->tx.ring) {
4791 int q = q_vector->tx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004792 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4793 if (hw->mac.type == e1000_82575) {
4794 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4795 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4796 } else {
4797 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4798 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4799 E1000_DCA_TXCTRL_CPUID_SHIFT;
4800 }
4801 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4802 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4803 }
Alexander Duyck0ba82992011-08-26 07:45:47 +00004804 if (q_vector->rx.ring) {
4805 int q = q_vector->rx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004806 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4807 if (hw->mac.type == e1000_82575) {
4808 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4809 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4810 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004811 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004812 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004813 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004814 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004815 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4816 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4817 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4818 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004819 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004820 q_vector->cpu = cpu;
4821out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004822 put_cpu();
4823}
4824
4825static void igb_setup_dca(struct igb_adapter *adapter)
4826{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004827 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004828 int i;
4829
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004830 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004831 return;
4832
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004833 /* Always use CB2 mode, difference is masked in the CB driver. */
4834 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4835
Alexander Duyck047e0032009-10-27 15:49:27 +00004836 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004837 adapter->q_vector[i]->cpu = -1;
4838 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004839 }
4840}
4841
4842static int __igb_notify_dca(struct device *dev, void *data)
4843{
4844 struct net_device *netdev = dev_get_drvdata(dev);
4845 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004846 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004847 struct e1000_hw *hw = &adapter->hw;
4848 unsigned long event = *(unsigned long *)data;
4849
4850 switch (event) {
4851 case DCA_PROVIDER_ADD:
4852 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004853 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004854 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004855 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004856 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004857 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004858 igb_setup_dca(adapter);
4859 break;
4860 }
4861 /* Fall Through since DCA is disabled. */
4862 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004863 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004864 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004865 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004866 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004867 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004868 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004869 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004870 }
4871 break;
4872 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004873
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004874 return 0;
4875}
4876
4877static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4878 void *p)
4879{
4880 int ret_val;
4881
4882 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4883 __igb_notify_dca);
4884
4885 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4886}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004887#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004888
Greg Rose0224d662011-10-14 02:57:14 +00004889#ifdef CONFIG_PCI_IOV
4890static int igb_vf_configure(struct igb_adapter *adapter, int vf)
4891{
4892 unsigned char mac_addr[ETH_ALEN];
4893 struct pci_dev *pdev = adapter->pdev;
4894 struct e1000_hw *hw = &adapter->hw;
4895 struct pci_dev *pvfdev;
4896 unsigned int device_id;
4897 u16 thisvf_devfn;
4898
4899 random_ether_addr(mac_addr);
4900 igb_set_vf_mac(adapter, vf, mac_addr);
4901
4902 switch (adapter->hw.mac.type) {
4903 case e1000_82576:
4904 device_id = IGB_82576_VF_DEV_ID;
4905 /* VF Stride for 82576 is 2 */
4906 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) |
4907 (pdev->devfn & 1);
4908 break;
4909 case e1000_i350:
4910 device_id = IGB_I350_VF_DEV_ID;
4911 /* VF Stride for I350 is 4 */
4912 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) |
4913 (pdev->devfn & 3);
4914 break;
4915 default:
4916 device_id = 0;
4917 thisvf_devfn = 0;
4918 break;
4919 }
4920
4921 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
4922 while (pvfdev) {
4923 if (pvfdev->devfn == thisvf_devfn)
4924 break;
4925 pvfdev = pci_get_device(hw->vendor_id,
4926 device_id, pvfdev);
4927 }
4928
4929 if (pvfdev)
4930 adapter->vf_data[vf].vfdev = pvfdev;
4931 else
4932 dev_err(&pdev->dev,
4933 "Couldn't find pci dev ptr for VF %4.4x\n",
4934 thisvf_devfn);
4935 return pvfdev != NULL;
4936}
4937
4938static int igb_find_enabled_vfs(struct igb_adapter *adapter)
4939{
4940 struct e1000_hw *hw = &adapter->hw;
4941 struct pci_dev *pdev = adapter->pdev;
4942 struct pci_dev *pvfdev;
4943 u16 vf_devfn = 0;
4944 u16 vf_stride;
4945 unsigned int device_id;
4946 int vfs_found = 0;
4947
4948 switch (adapter->hw.mac.type) {
4949 case e1000_82576:
4950 device_id = IGB_82576_VF_DEV_ID;
4951 /* VF Stride for 82576 is 2 */
4952 vf_stride = 2;
4953 break;
4954 case e1000_i350:
4955 device_id = IGB_I350_VF_DEV_ID;
4956 /* VF Stride for I350 is 4 */
4957 vf_stride = 4;
4958 break;
4959 default:
4960 device_id = 0;
4961 vf_stride = 0;
4962 break;
4963 }
4964
4965 vf_devfn = pdev->devfn + 0x80;
4966 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
4967 while (pvfdev) {
4968 if (pvfdev->devfn == vf_devfn)
4969 vfs_found++;
4970 vf_devfn += vf_stride;
4971 pvfdev = pci_get_device(hw->vendor_id,
4972 device_id, pvfdev);
4973 }
4974
4975 return vfs_found;
4976}
4977
4978static int igb_check_vf_assignment(struct igb_adapter *adapter)
4979{
4980 int i;
4981 for (i = 0; i < adapter->vfs_allocated_count; i++) {
4982 if (adapter->vf_data[i].vfdev) {
4983 if (adapter->vf_data[i].vfdev->dev_flags &
4984 PCI_DEV_FLAGS_ASSIGNED)
4985 return true;
4986 }
4987 }
4988 return false;
4989}
4990
4991#endif
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004992static void igb_ping_all_vfs(struct igb_adapter *adapter)
4993{
4994 struct e1000_hw *hw = &adapter->hw;
4995 u32 ping;
4996 int i;
4997
4998 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4999 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005000 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005001 ping |= E1000_VT_MSGTYPE_CTS;
5002 igb_write_mbx(hw, &ping, 1, i);
5003 }
5004}
5005
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005006static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5007{
5008 struct e1000_hw *hw = &adapter->hw;
5009 u32 vmolr = rd32(E1000_VMOLR(vf));
5010 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5011
Alexander Duyckd85b90042010-09-22 17:56:20 +00005012 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005013 IGB_VF_FLAG_MULTI_PROMISC);
5014 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5015
5016 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
5017 vmolr |= E1000_VMOLR_MPME;
Alexander Duyckd85b90042010-09-22 17:56:20 +00005018 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005019 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
5020 } else {
5021 /*
5022 * if we have hashes and we are clearing a multicast promisc
5023 * flag we need to write the hashes to the MTA as this step
5024 * was previously skipped
5025 */
5026 if (vf_data->num_vf_mc_hashes > 30) {
5027 vmolr |= E1000_VMOLR_MPME;
5028 } else if (vf_data->num_vf_mc_hashes) {
5029 int j;
5030 vmolr |= E1000_VMOLR_ROMPE;
5031 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5032 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5033 }
5034 }
5035
5036 wr32(E1000_VMOLR(vf), vmolr);
5037
5038 /* there are flags left unprocessed, likely not supported */
5039 if (*msgbuf & E1000_VT_MSGINFO_MASK)
5040 return -EINVAL;
5041
5042 return 0;
5043
5044}
5045
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005046static int igb_set_vf_multicasts(struct igb_adapter *adapter,
5047 u32 *msgbuf, u32 vf)
5048{
5049 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5050 u16 *hash_list = (u16 *)&msgbuf[1];
5051 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5052 int i;
5053
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005054 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005055 * to this VF for later use to restore when the PF multi cast
5056 * list changes
5057 */
5058 vf_data->num_vf_mc_hashes = n;
5059
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005060 /* only up to 30 hash values supported */
5061 if (n > 30)
5062 n = 30;
5063
5064 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005065 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07005066 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005067
5068 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005069 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005070
5071 return 0;
5072}
5073
5074static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5075{
5076 struct e1000_hw *hw = &adapter->hw;
5077 struct vf_data_storage *vf_data;
5078 int i, j;
5079
5080 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005081 u32 vmolr = rd32(E1000_VMOLR(i));
5082 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5083
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005084 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005085
5086 if ((vf_data->num_vf_mc_hashes > 30) ||
5087 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5088 vmolr |= E1000_VMOLR_MPME;
5089 } else if (vf_data->num_vf_mc_hashes) {
5090 vmolr |= E1000_VMOLR_ROMPE;
5091 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5092 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5093 }
5094 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005095 }
5096}
5097
5098static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5099{
5100 struct e1000_hw *hw = &adapter->hw;
5101 u32 pool_mask, reg, vid;
5102 int i;
5103
5104 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5105
5106 /* Find the vlan filter for this id */
5107 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5108 reg = rd32(E1000_VLVF(i));
5109
5110 /* remove the vf from the pool */
5111 reg &= ~pool_mask;
5112
5113 /* if pool is empty then remove entry from vfta */
5114 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5115 (reg & E1000_VLVF_VLANID_ENABLE)) {
5116 reg = 0;
5117 vid = reg & E1000_VLVF_VLANID_MASK;
5118 igb_vfta_set(hw, vid, false);
5119 }
5120
5121 wr32(E1000_VLVF(i), reg);
5122 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005123
5124 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005125}
5126
5127static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5128{
5129 struct e1000_hw *hw = &adapter->hw;
5130 u32 reg, i;
5131
Alexander Duyck51466232009-10-27 23:47:35 +00005132 /* The vlvf table only exists on 82576 hardware and newer */
5133 if (hw->mac.type < e1000_82576)
5134 return -1;
5135
5136 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005137 if (!adapter->vfs_allocated_count)
5138 return -1;
5139
5140 /* Find the vlan filter for this id */
5141 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5142 reg = rd32(E1000_VLVF(i));
5143 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5144 vid == (reg & E1000_VLVF_VLANID_MASK))
5145 break;
5146 }
5147
5148 if (add) {
5149 if (i == E1000_VLVF_ARRAY_SIZE) {
5150 /* Did not find a matching VLAN ID entry that was
5151 * enabled. Search for a free filter entry, i.e.
5152 * one without the enable bit set
5153 */
5154 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5155 reg = rd32(E1000_VLVF(i));
5156 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5157 break;
5158 }
5159 }
5160 if (i < E1000_VLVF_ARRAY_SIZE) {
5161 /* Found an enabled/available entry */
5162 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5163
5164 /* if !enabled we need to set this up in vfta */
5165 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00005166 /* add VID to filter table */
5167 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005168 reg |= E1000_VLVF_VLANID_ENABLE;
5169 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00005170 reg &= ~E1000_VLVF_VLANID_MASK;
5171 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005172 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005173
5174 /* do not modify RLPML for PF devices */
5175 if (vf >= adapter->vfs_allocated_count)
5176 return 0;
5177
5178 if (!adapter->vf_data[vf].vlans_enabled) {
5179 u32 size;
5180 reg = rd32(E1000_VMOLR(vf));
5181 size = reg & E1000_VMOLR_RLPML_MASK;
5182 size += 4;
5183 reg &= ~E1000_VMOLR_RLPML_MASK;
5184 reg |= size;
5185 wr32(E1000_VMOLR(vf), reg);
5186 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005187
Alexander Duyck51466232009-10-27 23:47:35 +00005188 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005189 }
5190 } else {
5191 if (i < E1000_VLVF_ARRAY_SIZE) {
5192 /* remove vf from the pool */
5193 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5194 /* if pool is empty then remove entry from vfta */
5195 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5196 reg = 0;
5197 igb_vfta_set(hw, vid, false);
5198 }
5199 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005200
5201 /* do not modify RLPML for PF devices */
5202 if (vf >= adapter->vfs_allocated_count)
5203 return 0;
5204
5205 adapter->vf_data[vf].vlans_enabled--;
5206 if (!adapter->vf_data[vf].vlans_enabled) {
5207 u32 size;
5208 reg = rd32(E1000_VMOLR(vf));
5209 size = reg & E1000_VMOLR_RLPML_MASK;
5210 size -= 4;
5211 reg &= ~E1000_VMOLR_RLPML_MASK;
5212 reg |= size;
5213 wr32(E1000_VMOLR(vf), reg);
5214 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005215 }
5216 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00005217 return 0;
5218}
5219
5220static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5221{
5222 struct e1000_hw *hw = &adapter->hw;
5223
5224 if (vid)
5225 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5226 else
5227 wr32(E1000_VMVIR(vf), 0);
5228}
5229
5230static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5231 int vf, u16 vlan, u8 qos)
5232{
5233 int err = 0;
5234 struct igb_adapter *adapter = netdev_priv(netdev);
5235
5236 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5237 return -EINVAL;
5238 if (vlan || qos) {
5239 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5240 if (err)
5241 goto out;
5242 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5243 igb_set_vmolr(adapter, vf, !vlan);
5244 adapter->vf_data[vf].pf_vlan = vlan;
5245 adapter->vf_data[vf].pf_qos = qos;
5246 dev_info(&adapter->pdev->dev,
5247 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5248 if (test_bit(__IGB_DOWN, &adapter->state)) {
5249 dev_warn(&adapter->pdev->dev,
5250 "The VF VLAN has been set,"
5251 " but the PF device is not up.\n");
5252 dev_warn(&adapter->pdev->dev,
5253 "Bring the PF device up before"
5254 " attempting to use the VF device.\n");
5255 }
5256 } else {
5257 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5258 false, vf);
5259 igb_set_vmvir(adapter, vlan, vf);
5260 igb_set_vmolr(adapter, vf, true);
5261 adapter->vf_data[vf].pf_vlan = 0;
5262 adapter->vf_data[vf].pf_qos = 0;
5263 }
5264out:
5265 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005266}
5267
5268static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5269{
5270 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5271 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5272
5273 return igb_vlvf_set(adapter, vid, add, vf);
5274}
5275
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005276static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005277{
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005278 /* clear flags - except flag that indicates PF has set the MAC */
5279 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005280 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005281
5282 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005283 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005284
5285 /* reset vlans for device */
5286 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005287 if (adapter->vf_data[vf].pf_vlan)
5288 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5289 adapter->vf_data[vf].pf_vlan,
5290 adapter->vf_data[vf].pf_qos);
5291 else
5292 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005293
5294 /* reset multicast table array for vf */
5295 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5296
5297 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005298 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005299}
5300
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005301static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5302{
5303 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5304
5305 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005306 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
5307 random_ether_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005308
5309 /* process remaining reset events */
5310 igb_vf_reset(adapter, vf);
5311}
5312
5313static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005314{
5315 struct e1000_hw *hw = &adapter->hw;
5316 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005317 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005318 u32 reg, msgbuf[3];
5319 u8 *addr = (u8 *)(&msgbuf[1]);
5320
5321 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005322 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005323
5324 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00005325 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005326
5327 /* enable transmit and receive for vf */
5328 reg = rd32(E1000_VFTE);
5329 wr32(E1000_VFTE, reg | (1 << vf));
5330 reg = rd32(E1000_VFRE);
5331 wr32(E1000_VFRE, reg | (1 << vf));
5332
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005333 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005334
5335 /* reply to reset with ack and vf mac address */
5336 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5337 memcpy(addr, vf_mac, 6);
5338 igb_write_mbx(hw, msgbuf, 3, vf);
5339}
5340
5341static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5342{
Greg Rosede42edd2010-07-01 13:39:23 +00005343 /*
5344 * The VF MAC Address is stored in a packed array of bytes
5345 * starting at the second 32 bit word of the msg array
5346 */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005347 unsigned char *addr = (char *)&msg[1];
5348 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005349
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005350 if (is_valid_ether_addr(addr))
5351 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005352
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005353 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005354}
5355
5356static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5357{
5358 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005359 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005360 u32 msg = E1000_VT_MSGTYPE_NACK;
5361
5362 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005363 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5364 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005365 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005366 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005367 }
5368}
5369
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005370static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005371{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005372 struct pci_dev *pdev = adapter->pdev;
5373 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005374 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005375 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005376 s32 retval;
5377
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005378 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005379
Alexander Duyckfef45f42009-12-11 22:57:34 -08005380 if (retval) {
5381 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005382 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08005383 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5384 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5385 return;
5386 goto out;
5387 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005388
5389 /* this is a message we already processed, do nothing */
5390 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005391 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005392
5393 /*
5394 * until the vf completes a reset it should not be
5395 * allowed to start any configuration.
5396 */
5397
5398 if (msgbuf[0] == E1000_VF_RESET) {
5399 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005400 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005401 }
5402
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005403 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08005404 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5405 return;
5406 retval = -1;
5407 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005408 }
5409
5410 switch ((msgbuf[0] & 0xFFFF)) {
5411 case E1000_VF_SET_MAC_ADDR:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005412 retval = -EINVAL;
5413 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5414 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5415 else
5416 dev_warn(&pdev->dev,
5417 "VF %d attempted to override administratively "
5418 "set MAC address\nReload the VF driver to "
5419 "resume operations\n", vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005420 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005421 case E1000_VF_SET_PROMISC:
5422 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5423 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005424 case E1000_VF_SET_MULTICAST:
5425 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5426 break;
5427 case E1000_VF_SET_LPE:
5428 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5429 break;
5430 case E1000_VF_SET_VLAN:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005431 retval = -1;
5432 if (vf_data->pf_vlan)
5433 dev_warn(&pdev->dev,
5434 "VF %d attempted to override administratively "
5435 "set VLAN tag\nReload the VF driver to "
5436 "resume operations\n", vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005437 else
5438 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005439 break;
5440 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005441 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005442 retval = -1;
5443 break;
5444 }
5445
Alexander Duyckfef45f42009-12-11 22:57:34 -08005446 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5447out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005448 /* notify the VF of the results of what it sent us */
5449 if (retval)
5450 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5451 else
5452 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5453
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005454 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005455}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005456
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005457static void igb_msg_task(struct igb_adapter *adapter)
5458{
5459 struct e1000_hw *hw = &adapter->hw;
5460 u32 vf;
5461
5462 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5463 /* process any reset requests */
5464 if (!igb_check_for_rst(hw, vf))
5465 igb_vf_reset_event(adapter, vf);
5466
5467 /* process any messages pending */
5468 if (!igb_check_for_msg(hw, vf))
5469 igb_rcv_msg_from_vf(adapter, vf);
5470
5471 /* process any acks */
5472 if (!igb_check_for_ack(hw, vf))
5473 igb_rcv_ack_from_vf(adapter, vf);
5474 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005475}
5476
Auke Kok9d5c8242008-01-24 02:22:38 -08005477/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00005478 * igb_set_uta - Set unicast filter table address
5479 * @adapter: board private structure
5480 *
5481 * The unicast table address is a register array of 32-bit registers.
5482 * The table is meant to be used in a way similar to how the MTA is used
5483 * however due to certain limitations in the hardware it is necessary to
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005484 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5485 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
Alexander Duyck68d480c2009-10-05 06:33:08 +00005486 **/
5487static void igb_set_uta(struct igb_adapter *adapter)
5488{
5489 struct e1000_hw *hw = &adapter->hw;
5490 int i;
5491
5492 /* The UTA table only exists on 82576 hardware and newer */
5493 if (hw->mac.type < e1000_82576)
5494 return;
5495
5496 /* we only need to do this if VMDq is enabled */
5497 if (!adapter->vfs_allocated_count)
5498 return;
5499
5500 for (i = 0; i < hw->mac.uta_reg_count; i++)
5501 array_wr32(E1000_UTA, i, ~0);
5502}
5503
5504/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005505 * igb_intr_msi - Interrupt Handler
5506 * @irq: interrupt number
5507 * @data: pointer to a network interface device structure
5508 **/
5509static irqreturn_t igb_intr_msi(int irq, void *data)
5510{
Alexander Duyck047e0032009-10-27 15:49:27 +00005511 struct igb_adapter *adapter = data;
5512 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005513 struct e1000_hw *hw = &adapter->hw;
5514 /* read ICR disables interrupts using IAM */
5515 u32 icr = rd32(E1000_ICR);
5516
Alexander Duyck047e0032009-10-27 15:49:27 +00005517 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005518
Alexander Duyck7f081d42010-01-07 17:41:00 +00005519 if (icr & E1000_ICR_DRSTA)
5520 schedule_work(&adapter->reset_task);
5521
Alexander Duyck047e0032009-10-27 15:49:27 +00005522 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005523 /* HW is reporting DMA is out of sync */
5524 adapter->stats.doosync++;
5525 }
5526
Auke Kok9d5c8242008-01-24 02:22:38 -08005527 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5528 hw->mac.get_link_status = 1;
5529 if (!test_bit(__IGB_DOWN, &adapter->state))
5530 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5531 }
5532
Alexander Duyck047e0032009-10-27 15:49:27 +00005533 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005534
5535 return IRQ_HANDLED;
5536}
5537
5538/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00005539 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08005540 * @irq: interrupt number
5541 * @data: pointer to a network interface device structure
5542 **/
5543static irqreturn_t igb_intr(int irq, void *data)
5544{
Alexander Duyck047e0032009-10-27 15:49:27 +00005545 struct igb_adapter *adapter = data;
5546 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005547 struct e1000_hw *hw = &adapter->hw;
5548 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5549 * need for the IMC write */
5550 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08005551
5552 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5553 * not set, then the adapter didn't send an interrupt */
5554 if (!(icr & E1000_ICR_INT_ASSERTED))
5555 return IRQ_NONE;
5556
Alexander Duyck0ba82992011-08-26 07:45:47 +00005557 igb_write_itr(q_vector);
5558
Alexander Duyck7f081d42010-01-07 17:41:00 +00005559 if (icr & E1000_ICR_DRSTA)
5560 schedule_work(&adapter->reset_task);
5561
Alexander Duyck047e0032009-10-27 15:49:27 +00005562 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005563 /* HW is reporting DMA is out of sync */
5564 adapter->stats.doosync++;
5565 }
5566
Auke Kok9d5c8242008-01-24 02:22:38 -08005567 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5568 hw->mac.get_link_status = 1;
5569 /* guard against interrupt when we're going down */
5570 if (!test_bit(__IGB_DOWN, &adapter->state))
5571 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5572 }
5573
Alexander Duyck047e0032009-10-27 15:49:27 +00005574 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005575
5576 return IRQ_HANDLED;
5577}
5578
Alexander Duyck0ba82992011-08-26 07:45:47 +00005579void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08005580{
Alexander Duyck047e0032009-10-27 15:49:27 +00005581 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08005582 struct e1000_hw *hw = &adapter->hw;
5583
Alexander Duyck0ba82992011-08-26 07:45:47 +00005584 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
5585 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
5586 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
5587 igb_set_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005588 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005589 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005590 }
5591
5592 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5593 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00005594 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08005595 else
5596 igb_irq_enable(adapter);
5597 }
5598}
5599
Auke Kok9d5c8242008-01-24 02:22:38 -08005600/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005601 * igb_poll - NAPI Rx polling callback
5602 * @napi: napi polling structure
5603 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08005604 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005605static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005606{
Alexander Duyck047e0032009-10-27 15:49:27 +00005607 struct igb_q_vector *q_vector = container_of(napi,
5608 struct igb_q_vector,
5609 napi);
Alexander Duyck16eb8812011-08-26 07:43:54 +00005610 bool clean_complete = true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005611
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005612#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00005613 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5614 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005615#endif
Alexander Duyck0ba82992011-08-26 07:45:47 +00005616 if (q_vector->tx.ring)
Alexander Duyck13fde972011-10-05 13:35:24 +00005617 clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005618
Alexander Duyck0ba82992011-08-26 07:45:47 +00005619 if (q_vector->rx.ring)
Alexander Duyckcd392f52011-08-26 07:43:59 +00005620 clean_complete &= igb_clean_rx_irq(q_vector, budget);
Alexander Duyck047e0032009-10-27 15:49:27 +00005621
Alexander Duyck16eb8812011-08-26 07:43:54 +00005622 /* If all work not completed, return budget and keep polling */
5623 if (!clean_complete)
5624 return budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005625
Alexander Duyck46544252009-02-19 20:39:04 -08005626 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck16eb8812011-08-26 07:43:54 +00005627 napi_complete(napi);
5628 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005629
Alexander Duyck16eb8812011-08-26 07:43:54 +00005630 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005631}
Al Viro6d8126f2008-03-16 22:23:24 +00005632
Auke Kok9d5c8242008-01-24 02:22:38 -08005633/**
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005634 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005635 * @adapter: board private structure
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005636 * @shhwtstamps: timestamp structure to update
5637 * @regval: unsigned 64bit system time value.
5638 *
5639 * We need to convert the system time value stored in the RX/TXSTMP registers
5640 * into a hwtstamp which can be used by the upper level timestamping functions
5641 */
5642static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
5643 struct skb_shared_hwtstamps *shhwtstamps,
5644 u64 regval)
5645{
5646 u64 ns;
5647
Alexander Duyck55cac242009-11-19 12:42:21 +00005648 /*
5649 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
5650 * 24 to match clock shift we setup earlier.
5651 */
Alexander Duyck06218a82011-08-26 07:46:55 +00005652 if (adapter->hw.mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00005653 regval <<= IGB_82580_TSYNC_SHIFT;
5654
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005655 ns = timecounter_cyc2time(&adapter->clock, regval);
5656 timecompare_update(&adapter->compare, ns);
5657 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
5658 shhwtstamps->hwtstamp = ns_to_ktime(ns);
5659 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
5660}
5661
5662/**
5663 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5664 * @q_vector: pointer to q_vector containing needed info
Alexander Duyck06034642011-08-26 07:44:22 +00005665 * @buffer: pointer to igb_tx_buffer structure
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005666 *
5667 * If we were asked to do hardware stamping and such a time stamp is
5668 * available, then it must have been for this skb here because we only
5669 * allow only one such packet into the queue.
5670 */
Alexander Duyck06034642011-08-26 07:44:22 +00005671static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5672 struct igb_tx_buffer *buffer_info)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005673{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005674 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005675 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005676 struct skb_shared_hwtstamps shhwtstamps;
5677 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005678
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005679 /* if skb does not support hw timestamp or TX stamp not valid exit */
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00005680 if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005681 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5682 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005683
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005684 regval = rd32(E1000_TXSTMPL);
5685 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
5686
5687 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
Nick Nunley28739572010-05-04 21:58:07 +00005688 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005689}
5690
5691/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005692 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00005693 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08005694 * returns true if ring is completely cleaned
5695 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005696static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005697{
Alexander Duyck047e0032009-10-27 15:49:27 +00005698 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005699 struct igb_ring *tx_ring = q_vector->tx.ring;
Alexander Duyck06034642011-08-26 07:44:22 +00005700 struct igb_tx_buffer *tx_buffer;
Alexander Duyck8542db02011-08-26 07:44:43 +00005701 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005702 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005703 unsigned int budget = q_vector->tx.work_limit;
Alexander Duyck8542db02011-08-26 07:44:43 +00005704 unsigned int i = tx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005705
Alexander Duyck13fde972011-10-05 13:35:24 +00005706 if (test_bit(__IGB_DOWN, &adapter->state))
5707 return true;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005708
Alexander Duyck06034642011-08-26 07:44:22 +00005709 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyck13fde972011-10-05 13:35:24 +00005710 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck8542db02011-08-26 07:44:43 +00005711 i -= tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005712
Alexander Duyck13fde972011-10-05 13:35:24 +00005713 for (; budget; budget--) {
Alexander Duyck8542db02011-08-26 07:44:43 +00005714 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005715
Alexander Duyck8542db02011-08-26 07:44:43 +00005716 /* prevent any other reads prior to eop_desc */
5717 rmb();
5718
5719 /* if next_to_watch is not set then there is no work pending */
5720 if (!eop_desc)
5721 break;
Alexander Duyck13fde972011-10-05 13:35:24 +00005722
5723 /* if DD is not set pending work has not been completed */
5724 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5725 break;
5726
Alexander Duyck8542db02011-08-26 07:44:43 +00005727 /* clear next_to_watch to prevent false hangs */
5728 tx_buffer->next_to_watch = NULL;
Alexander Duyck13fde972011-10-05 13:35:24 +00005729
Alexander Duyckebe42d12011-08-26 07:45:09 +00005730 /* update the statistics for this packet */
5731 total_bytes += tx_buffer->bytecount;
5732 total_packets += tx_buffer->gso_segs;
Alexander Duyck13fde972011-10-05 13:35:24 +00005733
Alexander Duyckebe42d12011-08-26 07:45:09 +00005734 /* retrieve hardware timestamp */
5735 igb_tx_hwtstamp(q_vector, tx_buffer);
Auke Kok9d5c8242008-01-24 02:22:38 -08005736
Alexander Duyckebe42d12011-08-26 07:45:09 +00005737 /* free the skb */
5738 dev_kfree_skb_any(tx_buffer->skb);
5739 tx_buffer->skb = NULL;
5740
5741 /* unmap skb header data */
5742 dma_unmap_single(tx_ring->dev,
5743 tx_buffer->dma,
5744 tx_buffer->length,
5745 DMA_TO_DEVICE);
5746
5747 /* clear last DMA location and unmap remaining buffers */
5748 while (tx_desc != eop_desc) {
5749 tx_buffer->dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005750
Alexander Duyck13fde972011-10-05 13:35:24 +00005751 tx_buffer++;
5752 tx_desc++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005753 i++;
Alexander Duyck8542db02011-08-26 07:44:43 +00005754 if (unlikely(!i)) {
5755 i -= tx_ring->count;
Alexander Duyck06034642011-08-26 07:44:22 +00005756 tx_buffer = tx_ring->tx_buffer_info;
Alexander Duyck13fde972011-10-05 13:35:24 +00005757 tx_desc = IGB_TX_DESC(tx_ring, 0);
5758 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00005759
5760 /* unmap any remaining paged data */
5761 if (tx_buffer->dma) {
5762 dma_unmap_page(tx_ring->dev,
5763 tx_buffer->dma,
5764 tx_buffer->length,
5765 DMA_TO_DEVICE);
5766 }
5767 }
5768
5769 /* clear last DMA location */
5770 tx_buffer->dma = 0;
5771
5772 /* move us one more past the eop_desc for start of next pkt */
5773 tx_buffer++;
5774 tx_desc++;
5775 i++;
5776 if (unlikely(!i)) {
5777 i -= tx_ring->count;
5778 tx_buffer = tx_ring->tx_buffer_info;
5779 tx_desc = IGB_TX_DESC(tx_ring, 0);
5780 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005781 }
5782
Eric Dumazetbdbc0632012-01-04 20:23:36 +00005783 netdev_tx_completed_queue(txring_txq(tx_ring),
5784 total_packets, total_bytes);
Alexander Duyck8542db02011-08-26 07:44:43 +00005785 i += tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005786 tx_ring->next_to_clean = i;
Alexander Duyck13fde972011-10-05 13:35:24 +00005787 u64_stats_update_begin(&tx_ring->tx_syncp);
5788 tx_ring->tx_stats.bytes += total_bytes;
5789 tx_ring->tx_stats.packets += total_packets;
5790 u64_stats_update_end(&tx_ring->tx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00005791 q_vector->tx.total_bytes += total_bytes;
5792 q_vector->tx.total_packets += total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08005793
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005794 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
Alexander Duyck13fde972011-10-05 13:35:24 +00005795 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck13fde972011-10-05 13:35:24 +00005796
Alexander Duyck8542db02011-08-26 07:44:43 +00005797 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005798
Auke Kok9d5c8242008-01-24 02:22:38 -08005799 /* Detect a transmit hang in hardware, this serializes the
5800 * check with the clearing of time_stamp and movement of i */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005801 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyck8542db02011-08-26 07:44:43 +00005802 if (eop_desc &&
5803 time_after(jiffies, tx_buffer->time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005804 (adapter->tx_timeout_factor * HZ)) &&
5805 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005806
Auke Kok9d5c8242008-01-24 02:22:38 -08005807 /* detected Tx unit hang */
Alexander Duyck59d71982010-04-27 13:09:25 +00005808 dev_err(tx_ring->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005809 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005810 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005811 " TDH <%x>\n"
5812 " TDT <%x>\n"
5813 " next_to_use <%x>\n"
5814 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005815 "buffer_info[next_to_clean]\n"
5816 " time_stamp <%lx>\n"
Alexander Duyck8542db02011-08-26 07:44:43 +00005817 " next_to_watch <%p>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005818 " jiffies <%lx>\n"
5819 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005820 tx_ring->queue_index,
Alexander Duyck238ac812011-08-26 07:43:48 +00005821 rd32(E1000_TDH(tx_ring->reg_idx)),
Alexander Duyckfce99e32009-10-27 15:51:27 +00005822 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005823 tx_ring->next_to_use,
5824 tx_ring->next_to_clean,
Alexander Duyck8542db02011-08-26 07:44:43 +00005825 tx_buffer->time_stamp,
5826 eop_desc,
Auke Kok9d5c8242008-01-24 02:22:38 -08005827 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005828 eop_desc->wb.status);
Alexander Duyck13fde972011-10-05 13:35:24 +00005829 netif_stop_subqueue(tx_ring->netdev,
5830 tx_ring->queue_index);
5831
5832 /* we are about to reset, no point in enabling stuff */
5833 return true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005834 }
5835 }
Alexander Duyck13fde972011-10-05 13:35:24 +00005836
5837 if (unlikely(total_packets &&
5838 netif_carrier_ok(tx_ring->netdev) &&
5839 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5840 /* Make sure that anybody stopping the queue after this
5841 * sees the new next_to_clean.
5842 */
5843 smp_mb();
5844 if (__netif_subqueue_stopped(tx_ring->netdev,
5845 tx_ring->queue_index) &&
5846 !(test_bit(__IGB_DOWN, &adapter->state))) {
5847 netif_wake_subqueue(tx_ring->netdev,
5848 tx_ring->queue_index);
5849
5850 u64_stats_update_begin(&tx_ring->tx_syncp);
5851 tx_ring->tx_stats.restart_queue++;
5852 u64_stats_update_end(&tx_ring->tx_syncp);
5853 }
5854 }
5855
5856 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005857}
5858
Alexander Duyckcd392f52011-08-26 07:43:59 +00005859static inline void igb_rx_checksum(struct igb_ring *ring,
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005860 union e1000_adv_rx_desc *rx_desc,
5861 struct sk_buff *skb)
Auke Kok9d5c8242008-01-24 02:22:38 -08005862{
Eric Dumazetbc8acf22010-09-02 13:07:41 -07005863 skb_checksum_none_assert(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005864
Alexander Duyck294e7d72011-08-26 07:45:57 +00005865 /* Ignore Checksum bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005866 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
Alexander Duyck294e7d72011-08-26 07:45:57 +00005867 return;
5868
5869 /* Rx checksum disabled via ethtool */
5870 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005871 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005872
Auke Kok9d5c8242008-01-24 02:22:38 -08005873 /* TCP/UDP checksum error bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005874 if (igb_test_staterr(rx_desc,
5875 E1000_RXDEXT_STATERR_TCPE |
5876 E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005877 /*
5878 * work around errata with sctp packets where the TCPE aka
5879 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5880 * packets, (aka let the stack check the crc32c)
5881 */
Alexander Duyck866cff02011-08-26 07:45:36 +00005882 if (!((skb->len == 60) &&
5883 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
Eric Dumazet12dcd862010-10-15 17:27:10 +00005884 u64_stats_update_begin(&ring->rx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005885 ring->rx_stats.csum_err++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005886 u64_stats_update_end(&ring->rx_syncp);
5887 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005888 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005889 return;
5890 }
5891 /* It must be a TCP or UDP packet with a valid checksum */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005892 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
5893 E1000_RXD_STAT_UDPCS))
Auke Kok9d5c8242008-01-24 02:22:38 -08005894 skb->ip_summed = CHECKSUM_UNNECESSARY;
5895
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005896 dev_dbg(ring->dev, "cksum success: bits %08X\n",
5897 le32_to_cpu(rx_desc->wb.upper.status_error));
Auke Kok9d5c8242008-01-24 02:22:38 -08005898}
5899
Alexander Duyck077887c2011-08-26 07:46:29 +00005900static inline void igb_rx_hash(struct igb_ring *ring,
5901 union e1000_adv_rx_desc *rx_desc,
5902 struct sk_buff *skb)
5903{
5904 if (ring->netdev->features & NETIF_F_RXHASH)
5905 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5906}
5907
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005908static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
5909 union e1000_adv_rx_desc *rx_desc,
5910 struct sk_buff *skb)
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005911{
5912 struct igb_adapter *adapter = q_vector->adapter;
5913 struct e1000_hw *hw = &adapter->hw;
5914 u64 regval;
5915
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005916 if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
5917 E1000_RXDADV_STAT_TS))
5918 return;
5919
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005920 /*
5921 * If this bit is set, then the RX registers contain the time stamp. No
5922 * other packet will be time stamped until we read these registers, so
5923 * read the registers to make them available again. Because only one
5924 * packet can be time stamped at a time, we know that the register
5925 * values must belong to this one here and therefore we don't need to
5926 * compare any of the additional attributes stored for it.
5927 *
Oliver Hartkopp2244d072010-08-17 08:59:14 +00005928 * If nothing went wrong, then it should have a shared tx_flags that we
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005929 * can turn into a skb_shared_hwtstamps.
5930 */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005931 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
Nick Nunley757b77e2010-03-26 11:36:47 +00005932 u32 *stamp = (u32 *)skb->data;
5933 regval = le32_to_cpu(*(stamp + 2));
5934 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5935 skb_pull(skb, IGB_TS_HDR_LEN);
5936 } else {
5937 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5938 return;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005939
Nick Nunley757b77e2010-03-26 11:36:47 +00005940 regval = rd32(E1000_RXSTMPL);
5941 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5942 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005943
5944 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5945}
Alexander Duyck8be10e92011-08-26 07:47:11 +00005946
5947static void igb_rx_vlan(struct igb_ring *ring,
5948 union e1000_adv_rx_desc *rx_desc,
5949 struct sk_buff *skb)
5950{
5951 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
5952 u16 vid;
5953 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
5954 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
5955 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
5956 else
5957 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
5958
5959 __vlan_hwaccel_put_tag(skb, vid);
5960 }
5961}
5962
Alexander Duyck44390ca2011-08-26 07:43:38 +00005963static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005964{
5965 /* HW will not DMA in data larger than the given buffer, even if it
5966 * parses the (NFS, of course) header to be larger. In that case, it
5967 * fills the header buffer and spills the rest into the page.
5968 */
5969 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5970 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck44390ca2011-08-26 07:43:38 +00005971 if (hlen > IGB_RX_HDR_LEN)
5972 hlen = IGB_RX_HDR_LEN;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005973 return hlen;
5974}
5975
Alexander Duyckcd392f52011-08-26 07:43:59 +00005976static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005977{
Alexander Duyck0ba82992011-08-26 07:45:47 +00005978 struct igb_ring *rx_ring = q_vector->rx.ring;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005979 union e1000_adv_rx_desc *rx_desc;
5980 const int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08005981 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005982 u16 cleaned_count = igb_desc_unused(rx_ring);
5983 u16 i = rx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005984
Alexander Duyck601369062011-08-26 07:44:05 +00005985 rx_desc = IGB_RX_DESC(rx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08005986
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005987 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
Alexander Duyck06034642011-08-26 07:44:22 +00005988 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck16eb8812011-08-26 07:43:54 +00005989 struct sk_buff *skb = buffer_info->skb;
5990 union e1000_adv_rx_desc *next_rxd;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005991
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005992 buffer_info->skb = NULL;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005993 prefetch(skb->data);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005994
5995 i++;
5996 if (i == rx_ring->count)
5997 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00005998
Alexander Duyck601369062011-08-26 07:44:05 +00005999 next_rxd = IGB_RX_DESC(rx_ring, i);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006000 prefetch(next_rxd);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006001
Alexander Duyck16eb8812011-08-26 07:43:54 +00006002 /*
6003 * This memory barrier is needed to keep us from reading
6004 * any other fields out of the rx_desc until we know the
6005 * RXD_STAT_DD bit is set
6006 */
6007 rmb();
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006008
Alexander Duyck16eb8812011-08-26 07:43:54 +00006009 if (!skb_is_nonlinear(skb)) {
6010 __skb_put(skb, igb_get_hlen(rx_desc));
6011 dma_unmap_single(rx_ring->dev, buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00006012 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00006013 DMA_FROM_DEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00006014 buffer_info->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006015 }
6016
Alexander Duyck16eb8812011-08-26 07:43:54 +00006017 if (rx_desc->wb.upper.length) {
6018 u16 length = le16_to_cpu(rx_desc->wb.upper.length);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006019
Koki Sanagiaa913402010-04-27 01:01:19 +00006020 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006021 buffer_info->page,
6022 buffer_info->page_offset,
6023 length);
6024
Alexander Duyck16eb8812011-08-26 07:43:54 +00006025 skb->len += length;
6026 skb->data_len += length;
Eric Dumazet95b9c1d2011-10-13 07:56:41 +00006027 skb->truesize += PAGE_SIZE / 2;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006028
Alexander Duyckd1eff352009-11-12 18:38:35 +00006029 if ((page_count(buffer_info->page) != 1) ||
6030 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006031 buffer_info->page = NULL;
6032 else
6033 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08006034
Alexander Duyck16eb8812011-08-26 07:43:54 +00006035 dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
6036 PAGE_SIZE / 2, DMA_FROM_DEVICE);
6037 buffer_info->page_dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006038 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006039
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006040 if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
Alexander Duyck06034642011-08-26 07:44:22 +00006041 struct igb_rx_buffer *next_buffer;
6042 next_buffer = &rx_ring->rx_buffer_info[i];
Alexander Duyckb2d56532008-11-20 00:47:34 -08006043 buffer_info->skb = next_buffer->skb;
6044 buffer_info->dma = next_buffer->dma;
6045 next_buffer->skb = skb;
6046 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006047 goto next_desc;
6048 }
Alexander Duyck44390ca2011-08-26 07:43:38 +00006049
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006050 if (igb_test_staterr(rx_desc,
6051 E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
Alexander Duyck16eb8812011-08-26 07:43:54 +00006052 dev_kfree_skb_any(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006053 goto next_desc;
6054 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006055
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006056 igb_rx_hwtstamp(q_vector, rx_desc, skb);
Alexander Duyck077887c2011-08-26 07:46:29 +00006057 igb_rx_hash(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006058 igb_rx_checksum(rx_ring, rx_desc, skb);
Alexander Duyck8be10e92011-08-26 07:47:11 +00006059 igb_rx_vlan(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006060
6061 total_bytes += skb->len;
6062 total_packets++;
6063
6064 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
6065
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006066 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006067
Alexander Duyck16eb8812011-08-26 07:43:54 +00006068 budget--;
Auke Kok9d5c8242008-01-24 02:22:38 -08006069next_desc:
Alexander Duyck16eb8812011-08-26 07:43:54 +00006070 if (!budget)
6071 break;
6072
6073 cleaned_count++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006074 /* return some buffers to hardware, one at a time is too slow */
6075 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Alexander Duyckcd392f52011-08-26 07:43:59 +00006076 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08006077 cleaned_count = 0;
6078 }
6079
6080 /* use prefetched values */
6081 rx_desc = next_rxd;
Auke Kok9d5c8242008-01-24 02:22:38 -08006082 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006083
Auke Kok9d5c8242008-01-24 02:22:38 -08006084 rx_ring->next_to_clean = i;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006085 u64_stats_update_begin(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08006086 rx_ring->rx_stats.packets += total_packets;
6087 rx_ring->rx_stats.bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006088 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00006089 q_vector->rx.total_packets += total_packets;
6090 q_vector->rx.total_bytes += total_bytes;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006091
6092 if (cleaned_count)
Alexander Duyckcd392f52011-08-26 07:43:59 +00006093 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006094
Alexander Duyck16eb8812011-08-26 07:43:54 +00006095 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08006096}
6097
Alexander Duyckc023cd82011-08-26 07:43:43 +00006098static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006099 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006100{
6101 struct sk_buff *skb = bi->skb;
6102 dma_addr_t dma = bi->dma;
6103
6104 if (dma)
6105 return true;
6106
6107 if (likely(!skb)) {
6108 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6109 IGB_RX_HDR_LEN);
6110 bi->skb = skb;
6111 if (!skb) {
6112 rx_ring->rx_stats.alloc_failed++;
6113 return false;
6114 }
6115
6116 /* initialize skb for ring */
6117 skb_record_rx_queue(skb, rx_ring->queue_index);
6118 }
6119
6120 dma = dma_map_single(rx_ring->dev, skb->data,
6121 IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
6122
6123 if (dma_mapping_error(rx_ring->dev, dma)) {
6124 rx_ring->rx_stats.alloc_failed++;
6125 return false;
6126 }
6127
6128 bi->dma = dma;
6129 return true;
6130}
6131
6132static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006133 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006134{
6135 struct page *page = bi->page;
6136 dma_addr_t page_dma = bi->page_dma;
6137 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
6138
6139 if (page_dma)
6140 return true;
6141
6142 if (!page) {
Eric Dumazet1f2149c2011-11-22 10:57:41 +00006143 page = alloc_page(GFP_ATOMIC | __GFP_COLD);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006144 bi->page = page;
6145 if (unlikely(!page)) {
6146 rx_ring->rx_stats.alloc_failed++;
6147 return false;
6148 }
6149 }
6150
6151 page_dma = dma_map_page(rx_ring->dev, page,
6152 page_offset, PAGE_SIZE / 2,
6153 DMA_FROM_DEVICE);
6154
6155 if (dma_mapping_error(rx_ring->dev, page_dma)) {
6156 rx_ring->rx_stats.alloc_failed++;
6157 return false;
6158 }
6159
6160 bi->page_dma = page_dma;
6161 bi->page_offset = page_offset;
6162 return true;
6163}
6164
Auke Kok9d5c8242008-01-24 02:22:38 -08006165/**
Alexander Duyckcd392f52011-08-26 07:43:59 +00006166 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
Auke Kok9d5c8242008-01-24 02:22:38 -08006167 * @adapter: address of board private structure
6168 **/
Alexander Duyckcd392f52011-08-26 07:43:59 +00006169void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08006170{
Auke Kok9d5c8242008-01-24 02:22:38 -08006171 union e1000_adv_rx_desc *rx_desc;
Alexander Duyck06034642011-08-26 07:44:22 +00006172 struct igb_rx_buffer *bi;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006173 u16 i = rx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08006174
Alexander Duyck601369062011-08-26 07:44:05 +00006175 rx_desc = IGB_RX_DESC(rx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +00006176 bi = &rx_ring->rx_buffer_info[i];
Alexander Duyckc023cd82011-08-26 07:43:43 +00006177 i -= rx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006178
6179 while (cleaned_count--) {
Alexander Duyckc023cd82011-08-26 07:43:43 +00006180 if (!igb_alloc_mapped_skb(rx_ring, bi))
6181 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006182
Alexander Duyckc023cd82011-08-26 07:43:43 +00006183 /* Refresh the desc even if buffer_addrs didn't change
6184 * because each write-back erases this info. */
6185 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006186
Alexander Duyckc023cd82011-08-26 07:43:43 +00006187 if (!igb_alloc_mapped_page(rx_ring, bi))
6188 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006189
Alexander Duyckc023cd82011-08-26 07:43:43 +00006190 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006191
Alexander Duyckc023cd82011-08-26 07:43:43 +00006192 rx_desc++;
6193 bi++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006194 i++;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006195 if (unlikely(!i)) {
Alexander Duyck601369062011-08-26 07:44:05 +00006196 rx_desc = IGB_RX_DESC(rx_ring, 0);
Alexander Duyck06034642011-08-26 07:44:22 +00006197 bi = rx_ring->rx_buffer_info;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006198 i -= rx_ring->count;
6199 }
6200
6201 /* clear the hdr_addr for the next_to_use descriptor */
6202 rx_desc->read.hdr_addr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006203 }
6204
Alexander Duyckc023cd82011-08-26 07:43:43 +00006205 i += rx_ring->count;
6206
Auke Kok9d5c8242008-01-24 02:22:38 -08006207 if (rx_ring->next_to_use != i) {
6208 rx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006209
6210 /* Force memory writes to complete before letting h/w
6211 * know there are new descriptors to fetch. (Only
6212 * applicable for weak-ordered memory model archs,
6213 * such as IA-64). */
6214 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00006215 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08006216 }
6217}
6218
6219/**
6220 * igb_mii_ioctl -
6221 * @netdev:
6222 * @ifreq:
6223 * @cmd:
6224 **/
6225static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6226{
6227 struct igb_adapter *adapter = netdev_priv(netdev);
6228 struct mii_ioctl_data *data = if_mii(ifr);
6229
6230 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6231 return -EOPNOTSUPP;
6232
6233 switch (cmd) {
6234 case SIOCGMIIPHY:
6235 data->phy_id = adapter->hw.phy.addr;
6236 break;
6237 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08006238 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6239 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08006240 return -EIO;
6241 break;
6242 case SIOCSMIIREG:
6243 default:
6244 return -EOPNOTSUPP;
6245 }
6246 return 0;
6247}
6248
6249/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006250 * igb_hwtstamp_ioctl - control hardware time stamping
6251 * @netdev:
6252 * @ifreq:
6253 * @cmd:
6254 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006255 * Outgoing time stamping can be enabled and disabled. Play nice and
6256 * disable it when requested, although it shouldn't case any overhead
6257 * when no packet needs it. At most one packet in the queue may be
6258 * marked for time stamping, otherwise it would be impossible to tell
6259 * for sure to which packet the hardware time stamp belongs.
6260 *
6261 * Incoming time stamping has to be configured via the hardware
6262 * filters. Not all combinations are supported, in particular event
6263 * type has to be specified. Matching the kind of event packet is
6264 * not supported, with the exception of "all V2 events regardless of
6265 * level 2 or 4".
6266 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006267 **/
6268static int igb_hwtstamp_ioctl(struct net_device *netdev,
6269 struct ifreq *ifr, int cmd)
6270{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006271 struct igb_adapter *adapter = netdev_priv(netdev);
6272 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006273 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006274 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
6275 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006276 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006277 bool is_l4 = false;
6278 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006279 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006280
6281 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
6282 return -EFAULT;
6283
6284 /* reserved for future extensions */
6285 if (config.flags)
6286 return -EINVAL;
6287
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006288 switch (config.tx_type) {
6289 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006290 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006291 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006292 break;
6293 default:
6294 return -ERANGE;
6295 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006296
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006297 switch (config.rx_filter) {
6298 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006299 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006300 break;
6301 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6302 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6303 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6304 case HWTSTAMP_FILTER_ALL:
6305 /*
6306 * register TSYNCRXCFG must be set, therefore it is not
6307 * possible to time stamp both Sync and Delay_Req messages
6308 * => fall back to time stamping all packets
6309 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006310 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006311 config.rx_filter = HWTSTAMP_FILTER_ALL;
6312 break;
6313 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006314 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006315 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006316 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006317 break;
6318 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006319 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006320 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006321 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006322 break;
6323 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6324 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006325 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006326 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006327 is_l2 = true;
6328 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006329 config.rx_filter = HWTSTAMP_FILTER_SOME;
6330 break;
6331 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6332 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006333 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006334 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006335 is_l2 = true;
6336 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006337 config.rx_filter = HWTSTAMP_FILTER_SOME;
6338 break;
6339 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6340 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6341 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006342 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006343 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006344 is_l2 = true;
Jacob Keller11ba69e2011-10-12 00:51:54 +00006345 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006346 break;
6347 default:
6348 return -ERANGE;
6349 }
6350
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006351 if (hw->mac.type == e1000_82575) {
6352 if (tsync_rx_ctl | tsync_tx_ctl)
6353 return -EINVAL;
6354 return 0;
6355 }
6356
Nick Nunley757b77e2010-03-26 11:36:47 +00006357 /*
6358 * Per-packet timestamping only works if all packets are
6359 * timestamped, so enable timestamping in all packets as
6360 * long as one rx filter was configured.
6361 */
Alexander Duyck06218a82011-08-26 07:46:55 +00006362 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
Nick Nunley757b77e2010-03-26 11:36:47 +00006363 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6364 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6365 }
6366
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006367 /* enable/disable TX */
6368 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006369 regval &= ~E1000_TSYNCTXCTL_ENABLED;
6370 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006371 wr32(E1000_TSYNCTXCTL, regval);
6372
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006373 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006374 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006375 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
6376 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006377 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006378
6379 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006380 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
6381
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006382 /* define ethertype filter for timestamped packets */
6383 if (is_l2)
6384 wr32(E1000_ETQF(3),
6385 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
6386 E1000_ETQF_1588 | /* enable timestamping */
6387 ETH_P_1588)); /* 1588 eth protocol type */
6388 else
6389 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006390
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006391#define PTP_PORT 319
6392 /* L4 Queue Filter[3]: filter by destination port and protocol */
6393 if (is_l4) {
6394 u32 ftqf = (IPPROTO_UDP /* UDP */
6395 | E1000_FTQF_VF_BP /* VF not compared */
6396 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
6397 | E1000_FTQF_MASK); /* mask all inputs */
6398 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006399
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006400 wr32(E1000_IMIR(3), htons(PTP_PORT));
6401 wr32(E1000_IMIREXT(3),
6402 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
6403 if (hw->mac.type == e1000_82576) {
6404 /* enable source port check */
6405 wr32(E1000_SPQF(3), htons(PTP_PORT));
6406 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
6407 }
6408 wr32(E1000_FTQF(3), ftqf);
6409 } else {
6410 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
6411 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006412 wrfl();
6413
6414 adapter->hwtstamp_config = config;
6415
6416 /* clear TX/RX time stamp registers, just to be sure */
6417 regval = rd32(E1000_TXSTMPH);
6418 regval = rd32(E1000_RXSTMPH);
6419
6420 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
6421 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006422}
6423
6424/**
Auke Kok9d5c8242008-01-24 02:22:38 -08006425 * igb_ioctl -
6426 * @netdev:
6427 * @ifreq:
6428 * @cmd:
6429 **/
6430static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6431{
6432 switch (cmd) {
6433 case SIOCGMIIPHY:
6434 case SIOCGMIIREG:
6435 case SIOCSMIIREG:
6436 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006437 case SIOCSHWTSTAMP:
6438 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08006439 default:
6440 return -EOPNOTSUPP;
6441 }
6442}
6443
Alexander Duyck009bc062009-07-23 18:08:35 +00006444s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6445{
6446 struct igb_adapter *adapter = hw->back;
6447 u16 cap_offset;
6448
Jon Masonbdaae042011-06-27 07:44:01 +00006449 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006450 if (!cap_offset)
6451 return -E1000_ERR_CONFIG;
6452
6453 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6454
6455 return 0;
6456}
6457
6458s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6459{
6460 struct igb_adapter *adapter = hw->back;
6461 u16 cap_offset;
6462
Jon Masonbdaae042011-06-27 07:44:01 +00006463 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006464 if (!cap_offset)
6465 return -E1000_ERR_CONFIG;
6466
6467 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6468
6469 return 0;
6470}
6471
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006472static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
Auke Kok9d5c8242008-01-24 02:22:38 -08006473{
6474 struct igb_adapter *adapter = netdev_priv(netdev);
6475 struct e1000_hw *hw = &adapter->hw;
6476 u32 ctrl, rctl;
Alexander Duyck5faf0302011-08-26 07:46:08 +00006477 bool enable = !!(features & NETIF_F_HW_VLAN_RX);
Auke Kok9d5c8242008-01-24 02:22:38 -08006478
Alexander Duyck5faf0302011-08-26 07:46:08 +00006479 if (enable) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006480 /* enable VLAN tag insert/strip */
6481 ctrl = rd32(E1000_CTRL);
6482 ctrl |= E1000_CTRL_VME;
6483 wr32(E1000_CTRL, ctrl);
6484
Alexander Duyck51466232009-10-27 23:47:35 +00006485 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08006486 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08006487 rctl &= ~E1000_RCTL_CFIEN;
6488 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006489 } else {
6490 /* disable VLAN tag insert/strip */
6491 ctrl = rd32(E1000_CTRL);
6492 ctrl &= ~E1000_CTRL_VME;
6493 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006494 }
6495
Alexander Duycke1739522009-02-19 20:39:44 -08006496 igb_rlpml_set(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006497}
6498
Jiri Pirko8e586132011-12-08 19:52:37 -05006499static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08006500{
6501 struct igb_adapter *adapter = netdev_priv(netdev);
6502 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006503 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006504
Alexander Duyck51466232009-10-27 23:47:35 +00006505 /* attempt to add filter to vlvf array */
6506 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006507
Alexander Duyck51466232009-10-27 23:47:35 +00006508 /* add the filter since PF can receive vlans w/o entry in vlvf */
6509 igb_vfta_set(hw, vid, true);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006510
6511 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05006512
6513 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006514}
6515
Jiri Pirko8e586132011-12-08 19:52:37 -05006516static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08006517{
6518 struct igb_adapter *adapter = netdev_priv(netdev);
6519 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006520 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00006521 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006522
Alexander Duyck51466232009-10-27 23:47:35 +00006523 /* remove vlan from VLVF table array */
6524 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08006525
Alexander Duyck51466232009-10-27 23:47:35 +00006526 /* if vid was not present in VLVF just remove it from table */
6527 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006528 igb_vfta_set(hw, vid, false);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006529
6530 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05006531
6532 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006533}
6534
6535static void igb_restore_vlan(struct igb_adapter *adapter)
6536{
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006537 u16 vid;
Auke Kok9d5c8242008-01-24 02:22:38 -08006538
Alexander Duyck5faf0302011-08-26 07:46:08 +00006539 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
6540
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006541 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6542 igb_vlan_rx_add_vid(adapter->netdev, vid);
Auke Kok9d5c8242008-01-24 02:22:38 -08006543}
6544
David Decotigny14ad2512011-04-27 18:32:43 +00006545int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
Auke Kok9d5c8242008-01-24 02:22:38 -08006546{
Alexander Duyck090b1792009-10-27 23:51:55 +00006547 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08006548 struct e1000_mac_info *mac = &adapter->hw.mac;
6549
6550 mac->autoneg = 0;
6551
David Decotigny14ad2512011-04-27 18:32:43 +00006552 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6553 * for the switch() below to work */
6554 if ((spd & 1) || (dplx & ~1))
6555 goto err_inval;
6556
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006557 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6558 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
David Decotigny14ad2512011-04-27 18:32:43 +00006559 spd != SPEED_1000 &&
6560 dplx != DUPLEX_FULL)
6561 goto err_inval;
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006562
David Decotigny14ad2512011-04-27 18:32:43 +00006563 switch (spd + dplx) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006564 case SPEED_10 + DUPLEX_HALF:
6565 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6566 break;
6567 case SPEED_10 + DUPLEX_FULL:
6568 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6569 break;
6570 case SPEED_100 + DUPLEX_HALF:
6571 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6572 break;
6573 case SPEED_100 + DUPLEX_FULL:
6574 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6575 break;
6576 case SPEED_1000 + DUPLEX_FULL:
6577 mac->autoneg = 1;
6578 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6579 break;
6580 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6581 default:
David Decotigny14ad2512011-04-27 18:32:43 +00006582 goto err_inval;
Auke Kok9d5c8242008-01-24 02:22:38 -08006583 }
6584 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00006585
6586err_inval:
6587 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6588 return -EINVAL;
Auke Kok9d5c8242008-01-24 02:22:38 -08006589}
6590
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006591static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
Auke Kok9d5c8242008-01-24 02:22:38 -08006592{
6593 struct net_device *netdev = pci_get_drvdata(pdev);
6594 struct igb_adapter *adapter = netdev_priv(netdev);
6595 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07006596 u32 ctrl, rctl, status;
Auke Kok9d5c8242008-01-24 02:22:38 -08006597 u32 wufc = adapter->wol;
6598#ifdef CONFIG_PM
6599 int retval = 0;
6600#endif
6601
6602 netif_device_detach(netdev);
6603
Alexander Duycka88f10e2008-07-08 15:13:38 -07006604 if (netif_running(netdev))
6605 igb_close(netdev);
6606
Alexander Duyck047e0032009-10-27 15:49:27 +00006607 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006608
6609#ifdef CONFIG_PM
6610 retval = pci_save_state(pdev);
6611 if (retval)
6612 return retval;
6613#endif
6614
6615 status = rd32(E1000_STATUS);
6616 if (status & E1000_STATUS_LU)
6617 wufc &= ~E1000_WUFC_LNKC;
6618
6619 if (wufc) {
6620 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006621 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006622
6623 /* turn on all-multi mode if wake on multicast is enabled */
6624 if (wufc & E1000_WUFC_MC) {
6625 rctl = rd32(E1000_RCTL);
6626 rctl |= E1000_RCTL_MPE;
6627 wr32(E1000_RCTL, rctl);
6628 }
6629
6630 ctrl = rd32(E1000_CTRL);
6631 /* advertise wake from D3Cold */
6632 #define E1000_CTRL_ADVD3WUC 0x00100000
6633 /* phy power management enable */
6634 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6635 ctrl |= E1000_CTRL_ADVD3WUC;
6636 wr32(E1000_CTRL, ctrl);
6637
Auke Kok9d5c8242008-01-24 02:22:38 -08006638 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00006639 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08006640
6641 wr32(E1000_WUC, E1000_WUC_PME_EN);
6642 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08006643 } else {
6644 wr32(E1000_WUC, 0);
6645 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08006646 }
6647
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006648 *enable_wake = wufc || adapter->en_mng_pt;
6649 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00006650 igb_power_down_link(adapter);
6651 else
6652 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006653
6654 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6655 * would have already happened in close and is redundant. */
6656 igb_release_hw_control(adapter);
6657
6658 pci_disable_device(pdev);
6659
Auke Kok9d5c8242008-01-24 02:22:38 -08006660 return 0;
6661}
6662
6663#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006664static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
6665{
6666 int retval;
6667 bool wake;
6668
6669 retval = __igb_shutdown(pdev, &wake);
6670 if (retval)
6671 return retval;
6672
6673 if (wake) {
6674 pci_prepare_to_sleep(pdev);
6675 } else {
6676 pci_wake_from_d3(pdev, false);
6677 pci_set_power_state(pdev, PCI_D3hot);
6678 }
6679
6680 return 0;
6681}
6682
Auke Kok9d5c8242008-01-24 02:22:38 -08006683static int igb_resume(struct pci_dev *pdev)
6684{
6685 struct net_device *netdev = pci_get_drvdata(pdev);
6686 struct igb_adapter *adapter = netdev_priv(netdev);
6687 struct e1000_hw *hw = &adapter->hw;
6688 u32 err;
6689
6690 pci_set_power_state(pdev, PCI_D0);
6691 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006692 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006693
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006694 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006695 if (err) {
6696 dev_err(&pdev->dev,
6697 "igb: Cannot enable PCI device from suspend\n");
6698 return err;
6699 }
6700 pci_set_master(pdev);
6701
6702 pci_enable_wake(pdev, PCI_D3hot, 0);
6703 pci_enable_wake(pdev, PCI_D3cold, 0);
6704
Alexander Duyck047e0032009-10-27 15:49:27 +00006705 if (igb_init_interrupt_scheme(adapter)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07006706 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6707 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08006708 }
6709
Auke Kok9d5c8242008-01-24 02:22:38 -08006710 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00006711
6712 /* let the f/w know that the h/w is now under the control of the
6713 * driver. */
6714 igb_get_hw_control(adapter);
6715
Auke Kok9d5c8242008-01-24 02:22:38 -08006716 wr32(E1000_WUS, ~0);
6717
Alexander Duycka88f10e2008-07-08 15:13:38 -07006718 if (netif_running(netdev)) {
6719 err = igb_open(netdev);
6720 if (err)
6721 return err;
6722 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006723
6724 netif_device_attach(netdev);
6725
Auke Kok9d5c8242008-01-24 02:22:38 -08006726 return 0;
6727}
6728#endif
6729
6730static void igb_shutdown(struct pci_dev *pdev)
6731{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006732 bool wake;
6733
6734 __igb_shutdown(pdev, &wake);
6735
6736 if (system_state == SYSTEM_POWER_OFF) {
6737 pci_wake_from_d3(pdev, wake);
6738 pci_set_power_state(pdev, PCI_D3hot);
6739 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006740}
6741
6742#ifdef CONFIG_NET_POLL_CONTROLLER
6743/*
6744 * Polling 'interrupt' - used by things like netconsole to send skbs
6745 * without having to re-enable interrupts. It's not called while
6746 * the interrupt routine is executing.
6747 */
6748static void igb_netpoll(struct net_device *netdev)
6749{
6750 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006751 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00006752 struct igb_q_vector *q_vector;
Auke Kok9d5c8242008-01-24 02:22:38 -08006753 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006754
Alexander Duyck047e0032009-10-27 15:49:27 +00006755 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00006756 q_vector = adapter->q_vector[i];
6757 if (adapter->msix_entries)
6758 wr32(E1000_EIMC, q_vector->eims_value);
6759 else
6760 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00006761 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006762 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006763}
6764#endif /* CONFIG_NET_POLL_CONTROLLER */
6765
6766/**
6767 * igb_io_error_detected - called when PCI error is detected
6768 * @pdev: Pointer to PCI device
6769 * @state: The current pci connection state
6770 *
6771 * This function is called after a PCI bus error affecting
6772 * this device has been detected.
6773 */
6774static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6775 pci_channel_state_t state)
6776{
6777 struct net_device *netdev = pci_get_drvdata(pdev);
6778 struct igb_adapter *adapter = netdev_priv(netdev);
6779
6780 netif_device_detach(netdev);
6781
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00006782 if (state == pci_channel_io_perm_failure)
6783 return PCI_ERS_RESULT_DISCONNECT;
6784
Auke Kok9d5c8242008-01-24 02:22:38 -08006785 if (netif_running(netdev))
6786 igb_down(adapter);
6787 pci_disable_device(pdev);
6788
6789 /* Request a slot slot reset. */
6790 return PCI_ERS_RESULT_NEED_RESET;
6791}
6792
6793/**
6794 * igb_io_slot_reset - called after the pci bus has been reset.
6795 * @pdev: Pointer to PCI device
6796 *
6797 * Restart the card from scratch, as if from a cold-boot. Implementation
6798 * resembles the first-half of the igb_resume routine.
6799 */
6800static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6801{
6802 struct net_device *netdev = pci_get_drvdata(pdev);
6803 struct igb_adapter *adapter = netdev_priv(netdev);
6804 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08006805 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006806 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006807
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006808 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006809 dev_err(&pdev->dev,
6810 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08006811 result = PCI_ERS_RESULT_DISCONNECT;
6812 } else {
6813 pci_set_master(pdev);
6814 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006815 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006816
6817 pci_enable_wake(pdev, PCI_D3hot, 0);
6818 pci_enable_wake(pdev, PCI_D3cold, 0);
6819
6820 igb_reset(adapter);
6821 wr32(E1000_WUS, ~0);
6822 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006823 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006824
Jeff Kirsherea943d42008-12-11 20:34:19 -08006825 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6826 if (err) {
6827 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6828 "failed 0x%0x\n", err);
6829 /* non-fatal, continue */
6830 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006831
Alexander Duyck40a914f2008-11-27 00:24:37 -08006832 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006833}
6834
6835/**
6836 * igb_io_resume - called when traffic can start flowing again.
6837 * @pdev: Pointer to PCI device
6838 *
6839 * This callback is called when the error recovery driver tells us that
6840 * its OK to resume normal operation. Implementation resembles the
6841 * second-half of the igb_resume routine.
6842 */
6843static void igb_io_resume(struct pci_dev *pdev)
6844{
6845 struct net_device *netdev = pci_get_drvdata(pdev);
6846 struct igb_adapter *adapter = netdev_priv(netdev);
6847
Auke Kok9d5c8242008-01-24 02:22:38 -08006848 if (netif_running(netdev)) {
6849 if (igb_up(adapter)) {
6850 dev_err(&pdev->dev, "igb_up failed after reset\n");
6851 return;
6852 }
6853 }
6854
6855 netif_device_attach(netdev);
6856
6857 /* let the f/w know that the h/w is now under the control of the
6858 * driver. */
6859 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006860}
6861
Alexander Duyck26ad9172009-10-05 06:32:49 +00006862static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6863 u8 qsel)
6864{
6865 u32 rar_low, rar_high;
6866 struct e1000_hw *hw = &adapter->hw;
6867
6868 /* HW expects these in little endian so we reverse the byte order
6869 * from network order (big endian) to little endian
6870 */
6871 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6872 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6873 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6874
6875 /* Indicate to hardware the Address is Valid. */
6876 rar_high |= E1000_RAH_AV;
6877
6878 if (hw->mac.type == e1000_82575)
6879 rar_high |= E1000_RAH_POOL_1 * qsel;
6880 else
6881 rar_high |= E1000_RAH_POOL_1 << qsel;
6882
6883 wr32(E1000_RAL(index), rar_low);
6884 wrfl();
6885 wr32(E1000_RAH(index), rar_high);
6886 wrfl();
6887}
6888
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006889static int igb_set_vf_mac(struct igb_adapter *adapter,
6890 int vf, unsigned char *mac_addr)
6891{
6892 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006893 /* VF MAC addresses start at end of receive addresses and moves
6894 * torwards the first, as a result a collision should not be possible */
6895 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006896
Alexander Duyck37680112009-02-19 20:40:30 -08006897 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006898
Alexander Duyck26ad9172009-10-05 06:32:49 +00006899 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006900
6901 return 0;
6902}
6903
Williams, Mitch A8151d292010-02-10 01:44:24 +00006904static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6905{
6906 struct igb_adapter *adapter = netdev_priv(netdev);
6907 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6908 return -EINVAL;
6909 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6910 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6911 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6912 " change effective.");
6913 if (test_bit(__IGB_DOWN, &adapter->state)) {
6914 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6915 " but the PF device is not up.\n");
6916 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6917 " attempting to use the VF device.\n");
6918 }
6919 return igb_set_vf_mac(adapter, vf, mac);
6920}
6921
Lior Levy17dc5662011-02-08 02:28:46 +00006922static int igb_link_mbps(int internal_link_speed)
6923{
6924 switch (internal_link_speed) {
6925 case SPEED_100:
6926 return 100;
6927 case SPEED_1000:
6928 return 1000;
6929 default:
6930 return 0;
6931 }
6932}
6933
6934static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6935 int link_speed)
6936{
6937 int rf_dec, rf_int;
6938 u32 bcnrc_val;
6939
6940 if (tx_rate != 0) {
6941 /* Calculate the rate factor values to set */
6942 rf_int = link_speed / tx_rate;
6943 rf_dec = (link_speed - (rf_int * tx_rate));
6944 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6945
6946 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6947 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6948 E1000_RTTBCNRC_RF_INT_MASK);
6949 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6950 } else {
6951 bcnrc_val = 0;
6952 }
6953
6954 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
6955 wr32(E1000_RTTBCNRC, bcnrc_val);
6956}
6957
6958static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
6959{
6960 int actual_link_speed, i;
6961 bool reset_rate = false;
6962
6963 /* VF TX rate limit was not set or not supported */
6964 if ((adapter->vf_rate_link_speed == 0) ||
6965 (adapter->hw.mac.type != e1000_82576))
6966 return;
6967
6968 actual_link_speed = igb_link_mbps(adapter->link_speed);
6969 if (actual_link_speed != adapter->vf_rate_link_speed) {
6970 reset_rate = true;
6971 adapter->vf_rate_link_speed = 0;
6972 dev_info(&adapter->pdev->dev,
6973 "Link speed has been changed. VF Transmit "
6974 "rate is disabled\n");
6975 }
6976
6977 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6978 if (reset_rate)
6979 adapter->vf_data[i].tx_rate = 0;
6980
6981 igb_set_vf_rate_limit(&adapter->hw, i,
6982 adapter->vf_data[i].tx_rate,
6983 actual_link_speed);
6984 }
6985}
6986
Williams, Mitch A8151d292010-02-10 01:44:24 +00006987static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6988{
Lior Levy17dc5662011-02-08 02:28:46 +00006989 struct igb_adapter *adapter = netdev_priv(netdev);
6990 struct e1000_hw *hw = &adapter->hw;
6991 int actual_link_speed;
6992
6993 if (hw->mac.type != e1000_82576)
6994 return -EOPNOTSUPP;
6995
6996 actual_link_speed = igb_link_mbps(adapter->link_speed);
6997 if ((vf >= adapter->vfs_allocated_count) ||
6998 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
6999 (tx_rate < 0) || (tx_rate > actual_link_speed))
7000 return -EINVAL;
7001
7002 adapter->vf_rate_link_speed = actual_link_speed;
7003 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
7004 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
7005
7006 return 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007007}
7008
7009static int igb_ndo_get_vf_config(struct net_device *netdev,
7010 int vf, struct ifla_vf_info *ivi)
7011{
7012 struct igb_adapter *adapter = netdev_priv(netdev);
7013 if (vf >= adapter->vfs_allocated_count)
7014 return -EINVAL;
7015 ivi->vf = vf;
7016 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
Lior Levy17dc5662011-02-08 02:28:46 +00007017 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007018 ivi->vlan = adapter->vf_data[vf].pf_vlan;
7019 ivi->qos = adapter->vf_data[vf].pf_qos;
7020 return 0;
7021}
7022
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007023static void igb_vmm_control(struct igb_adapter *adapter)
7024{
7025 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00007026 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007027
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007028 switch (hw->mac.type) {
7029 case e1000_82575:
7030 default:
7031 /* replication is not supported for 82575 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007032 return;
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007033 case e1000_82576:
7034 /* notify HW that the MAC is adding vlan tags */
7035 reg = rd32(E1000_DTXCTL);
7036 reg |= E1000_DTXCTL_VLAN_ADDED;
7037 wr32(E1000_DTXCTL, reg);
7038 case e1000_82580:
7039 /* enable replication vlan tag stripping */
7040 reg = rd32(E1000_RPLOLR);
7041 reg |= E1000_RPLOLR_STRVLAN;
7042 wr32(E1000_RPLOLR, reg);
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00007043 case e1000_i350:
7044 /* none of the above registers are supported by i350 */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007045 break;
7046 }
Alexander Duyck10d8e902009-10-27 15:54:04 +00007047
Alexander Duyckd4960302009-10-27 15:53:45 +00007048 if (adapter->vfs_allocated_count) {
7049 igb_vmdq_set_loopback_pf(hw, true);
7050 igb_vmdq_set_replication_pf(hw, true);
Greg Rose13800462010-11-06 02:08:26 +00007051 igb_vmdq_set_anti_spoofing_pf(hw, true,
7052 adapter->vfs_allocated_count);
Alexander Duyckd4960302009-10-27 15:53:45 +00007053 } else {
7054 igb_vmdq_set_loopback_pf(hw, false);
7055 igb_vmdq_set_replication_pf(hw, false);
7056 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007057}
7058
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007059static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7060{
7061 struct e1000_hw *hw = &adapter->hw;
7062 u32 dmac_thr;
7063 u16 hwm;
7064
7065 if (hw->mac.type > e1000_82580) {
7066 if (adapter->flags & IGB_FLAG_DMAC) {
7067 u32 reg;
7068
7069 /* force threshold to 0. */
7070 wr32(E1000_DMCTXTH, 0);
7071
7072 /*
Matthew Vicke8c626e2011-11-17 08:33:12 +00007073 * DMA Coalescing high water mark needs to be greater
7074 * than the Rx threshold. Set hwm to PBA - max frame
7075 * size in 16B units, capping it at PBA - 6KB.
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007076 */
Matthew Vicke8c626e2011-11-17 08:33:12 +00007077 hwm = 64 * pba - adapter->max_frame_size / 16;
7078 if (hwm < 64 * (pba - 6))
7079 hwm = 64 * (pba - 6);
7080 reg = rd32(E1000_FCRTC);
7081 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
7082 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
7083 & E1000_FCRTC_RTH_COAL_MASK);
7084 wr32(E1000_FCRTC, reg);
7085
7086 /*
7087 * Set the DMA Coalescing Rx threshold to PBA - 2 * max
7088 * frame size, capping it at PBA - 10KB.
7089 */
7090 dmac_thr = pba - adapter->max_frame_size / 512;
7091 if (dmac_thr < pba - 10)
7092 dmac_thr = pba - 10;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007093 reg = rd32(E1000_DMACR);
7094 reg &= ~E1000_DMACR_DMACTHR_MASK;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007095 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
7096 & E1000_DMACR_DMACTHR_MASK);
7097
7098 /* transition to L0x or L1 if available..*/
7099 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
7100
7101 /* watchdog timer= +-1000 usec in 32usec intervals */
7102 reg |= (1000 >> 5);
7103 wr32(E1000_DMACR, reg);
7104
7105 /*
7106 * no lower threshold to disable
7107 * coalescing(smart fifb)-UTRESH=0
7108 */
7109 wr32(E1000_DMCRTRH, 0);
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007110
7111 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
7112
7113 wr32(E1000_DMCTLX, reg);
7114
7115 /*
7116 * free space in tx packet buffer to wake from
7117 * DMA coal
7118 */
7119 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
7120 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
7121
7122 /*
7123 * make low power state decision controlled
7124 * by DMA coal
7125 */
7126 reg = rd32(E1000_PCIEMISC);
7127 reg &= ~E1000_PCIEMISC_LX_DECISION;
7128 wr32(E1000_PCIEMISC, reg);
7129 } /* endif adapter->dmac is not disabled */
7130 } else if (hw->mac.type == e1000_82580) {
7131 u32 reg = rd32(E1000_PCIEMISC);
7132 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
7133 wr32(E1000_DMACR, 0);
7134 }
7135}
7136
Auke Kok9d5c8242008-01-24 02:22:38 -08007137/* igb_main.c */