blob: fac71e21e0a48fd3e358113e881e49046b636b43 [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Carolyn Wyborny4297f992011-06-29 01:16:10 +00004 Copyright(c) 2007-2011 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
Jeff Kirsher876d2d62011-10-21 20:01:34 +000028#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
Auke Kok9d5c8242008-01-24 02:22:38 -080030#include <linux/module.h>
31#include <linux/types.h>
32#include <linux/init.h>
Jiri Pirkob2cb09b2011-07-21 03:27:27 +000033#include <linux/bitops.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080034#include <linux/vmalloc.h>
35#include <linux/pagemap.h>
36#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080037#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080039#include <net/checksum.h>
40#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000041#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080042#include <linux/mii.h>
43#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000044#include <linux/if.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080045#include <linux/if_vlan.h>
46#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070047#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080048#include <linux/delay.h>
49#include <linux/interrupt.h>
Alexander Duyck7d13a7d2011-08-26 07:44:32 +000050#include <linux/ip.h>
51#include <linux/tcp.h>
52#include <linux/sctp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080053#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080054#include <linux/aer.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040055#include <linux/prefetch.h>
Yan, Zheng749ab2c2012-01-04 20:23:37 +000056#include <linux/pm_runtime.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070057#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070058#include <linux/dca.h>
59#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080060#include "igb.h"
61
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080062#define MAJ 3
Carolyn Wybornya28dc432011-10-07 07:00:27 +000063#define MIN 2
64#define BUILD 10
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080065#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
Carolyn Wyborny929dd042011-05-26 03:02:26 +000066__stringify(BUILD) "-k"
Auke Kok9d5c8242008-01-24 02:22:38 -080067char igb_driver_name[] = "igb";
68char igb_driver_version[] = DRV_VERSION;
69static const char igb_driver_string[] =
70 "Intel(R) Gigabit Ethernet Network Driver";
Carolyn Wyborny4c4b42c2011-02-17 09:02:30 +000071static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080072
Auke Kok9d5c8242008-01-24 02:22:38 -080073static const struct e1000_info *igb_info_tbl[] = {
74 [board_82575] = &e1000_82575_info,
75};
76
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000077static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +000078 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000082 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
Carolyn Wyborny6493d242011-01-14 05:33:46 +000084 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000085 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Joseph Gasparakis308fb392010-09-22 17:56:44 +000088 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
Gasparakis, Joseph1b5dda32010-12-09 01:41:01 +000090 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070092 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000093 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000094 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070095 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000097 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +000098 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000099 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -0800100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
102 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
103 /* required last entry */
104 {0, }
105};
106
107MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
108
109void igb_reset(struct igb_adapter *);
110static int igb_setup_all_tx_resources(struct igb_adapter *);
111static int igb_setup_all_rx_resources(struct igb_adapter *);
112static void igb_free_all_tx_resources(struct igb_adapter *);
113static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +0000114static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800115static int igb_probe(struct pci_dev *, const struct pci_device_id *);
116static void __devexit igb_remove(struct pci_dev *pdev);
Anders Berggren673b8b72011-02-04 07:32:32 +0000117static void igb_init_hw_timer(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800118static int igb_sw_init(struct igb_adapter *);
119static int igb_open(struct net_device *);
120static int igb_close(struct net_device *);
121static void igb_configure_tx(struct igb_adapter *);
122static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800123static void igb_clean_all_tx_rings(struct igb_adapter *);
124static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700125static void igb_clean_tx_ring(struct igb_ring *);
126static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000127static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800128static void igb_update_phy_info(unsigned long);
129static void igb_watchdog(unsigned long);
130static void igb_watchdog_task(struct work_struct *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000131static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
Eric Dumazet12dcd862010-10-15 17:27:10 +0000132static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
133 struct rtnl_link_stats64 *stats);
Auke Kok9d5c8242008-01-24 02:22:38 -0800134static int igb_change_mtu(struct net_device *, int);
135static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000136static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800137static irqreturn_t igb_intr(int irq, void *);
138static irqreturn_t igb_intr_msi(int irq, void *);
139static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000140static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700141#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000142static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700143static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700144#endif /* CONFIG_IGB_DCA */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700145static int igb_poll(struct napi_struct *, int);
Alexander Duyck13fde972011-10-05 13:35:24 +0000146static bool igb_clean_tx_irq(struct igb_q_vector *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000147static bool igb_clean_rx_irq(struct igb_q_vector *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800148static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
149static void igb_tx_timeout(struct net_device *);
150static void igb_reset_task(struct work_struct *);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000151static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
Jiri Pirko8e586132011-12-08 19:52:37 -0500152static int igb_vlan_rx_add_vid(struct net_device *, u16);
153static int igb_vlan_rx_kill_vid(struct net_device *, u16);
Auke Kok9d5c8242008-01-24 02:22:38 -0800154static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000155static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800156static void igb_ping_all_vfs(struct igb_adapter *);
157static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800158static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000159static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800160static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000161static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
162static int igb_ndo_set_vf_vlan(struct net_device *netdev,
163 int vf, u16 vlan, u8 qos);
164static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
165static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
166 struct ifla_vf_info *ivi);
Lior Levy17dc5662011-02-08 02:28:46 +0000167static void igb_check_vf_rate_limit(struct igb_adapter *);
RongQing Li46a01692011-10-18 22:52:35 +0000168
169#ifdef CONFIG_PCI_IOV
Greg Rose0224d662011-10-14 02:57:14 +0000170static int igb_vf_configure(struct igb_adapter *adapter, int vf);
171static int igb_find_enabled_vfs(struct igb_adapter *adapter);
172static int igb_check_vf_assignment(struct igb_adapter *adapter);
RongQing Li46a01692011-10-18 22:52:35 +0000173#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800174
Auke Kok9d5c8242008-01-24 02:22:38 -0800175#ifdef CONFIG_PM
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000176static int igb_suspend(struct device *);
177static int igb_resume(struct device *);
178#ifdef CONFIG_PM_RUNTIME
179static int igb_runtime_suspend(struct device *dev);
180static int igb_runtime_resume(struct device *dev);
181static int igb_runtime_idle(struct device *dev);
182#endif
183static const struct dev_pm_ops igb_pm_ops = {
184 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
185 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
186 igb_runtime_idle)
187};
Auke Kok9d5c8242008-01-24 02:22:38 -0800188#endif
189static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700190#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700191static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
192static struct notifier_block dca_notifier = {
193 .notifier_call = igb_notify_dca,
194 .next = NULL,
195 .priority = 0
196};
197#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800198#ifdef CONFIG_NET_POLL_CONTROLLER
199/* for netdump / net console */
200static void igb_netpoll(struct net_device *);
201#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800202#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000203static unsigned int max_vfs = 0;
204module_param(max_vfs, uint, 0);
205MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
206 "per physical function");
207#endif /* CONFIG_PCI_IOV */
208
Auke Kok9d5c8242008-01-24 02:22:38 -0800209static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
210 pci_channel_state_t);
211static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
212static void igb_io_resume(struct pci_dev *);
213
214static struct pci_error_handlers igb_err_handler = {
215 .error_detected = igb_io_error_detected,
216 .slot_reset = igb_io_slot_reset,
217 .resume = igb_io_resume,
218};
219
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +0000220static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
Auke Kok9d5c8242008-01-24 02:22:38 -0800221
222static struct pci_driver igb_driver = {
223 .name = igb_driver_name,
224 .id_table = igb_pci_tbl,
225 .probe = igb_probe,
226 .remove = __devexit_p(igb_remove),
227#ifdef CONFIG_PM
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000228 .driver.pm = &igb_pm_ops,
Auke Kok9d5c8242008-01-24 02:22:38 -0800229#endif
230 .shutdown = igb_shutdown,
231 .err_handler = &igb_err_handler
232};
233
234MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
235MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
236MODULE_LICENSE("GPL");
237MODULE_VERSION(DRV_VERSION);
238
Taku Izumic97ec422010-04-27 14:39:30 +0000239struct igb_reg_info {
240 u32 ofs;
241 char *name;
242};
243
244static const struct igb_reg_info igb_reg_info_tbl[] = {
245
246 /* General Registers */
247 {E1000_CTRL, "CTRL"},
248 {E1000_STATUS, "STATUS"},
249 {E1000_CTRL_EXT, "CTRL_EXT"},
250
251 /* Interrupt Registers */
252 {E1000_ICR, "ICR"},
253
254 /* RX Registers */
255 {E1000_RCTL, "RCTL"},
256 {E1000_RDLEN(0), "RDLEN"},
257 {E1000_RDH(0), "RDH"},
258 {E1000_RDT(0), "RDT"},
259 {E1000_RXDCTL(0), "RXDCTL"},
260 {E1000_RDBAL(0), "RDBAL"},
261 {E1000_RDBAH(0), "RDBAH"},
262
263 /* TX Registers */
264 {E1000_TCTL, "TCTL"},
265 {E1000_TDBAL(0), "TDBAL"},
266 {E1000_TDBAH(0), "TDBAH"},
267 {E1000_TDLEN(0), "TDLEN"},
268 {E1000_TDH(0), "TDH"},
269 {E1000_TDT(0), "TDT"},
270 {E1000_TXDCTL(0), "TXDCTL"},
271 {E1000_TDFH, "TDFH"},
272 {E1000_TDFT, "TDFT"},
273 {E1000_TDFHS, "TDFHS"},
274 {E1000_TDFPC, "TDFPC"},
275
276 /* List Terminator */
277 {}
278};
279
280/*
281 * igb_regdump - register printout routine
282 */
283static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
284{
285 int n = 0;
286 char rname[16];
287 u32 regs[8];
288
289 switch (reginfo->ofs) {
290 case E1000_RDLEN(0):
291 for (n = 0; n < 4; n++)
292 regs[n] = rd32(E1000_RDLEN(n));
293 break;
294 case E1000_RDH(0):
295 for (n = 0; n < 4; n++)
296 regs[n] = rd32(E1000_RDH(n));
297 break;
298 case E1000_RDT(0):
299 for (n = 0; n < 4; n++)
300 regs[n] = rd32(E1000_RDT(n));
301 break;
302 case E1000_RXDCTL(0):
303 for (n = 0; n < 4; n++)
304 regs[n] = rd32(E1000_RXDCTL(n));
305 break;
306 case E1000_RDBAL(0):
307 for (n = 0; n < 4; n++)
308 regs[n] = rd32(E1000_RDBAL(n));
309 break;
310 case E1000_RDBAH(0):
311 for (n = 0; n < 4; n++)
312 regs[n] = rd32(E1000_RDBAH(n));
313 break;
314 case E1000_TDBAL(0):
315 for (n = 0; n < 4; n++)
316 regs[n] = rd32(E1000_RDBAL(n));
317 break;
318 case E1000_TDBAH(0):
319 for (n = 0; n < 4; n++)
320 regs[n] = rd32(E1000_TDBAH(n));
321 break;
322 case E1000_TDLEN(0):
323 for (n = 0; n < 4; n++)
324 regs[n] = rd32(E1000_TDLEN(n));
325 break;
326 case E1000_TDH(0):
327 for (n = 0; n < 4; n++)
328 regs[n] = rd32(E1000_TDH(n));
329 break;
330 case E1000_TDT(0):
331 for (n = 0; n < 4; n++)
332 regs[n] = rd32(E1000_TDT(n));
333 break;
334 case E1000_TXDCTL(0):
335 for (n = 0; n < 4; n++)
336 regs[n] = rd32(E1000_TXDCTL(n));
337 break;
338 default:
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000339 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
Taku Izumic97ec422010-04-27 14:39:30 +0000340 return;
341 }
342
343 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000344 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
345 regs[2], regs[3]);
Taku Izumic97ec422010-04-27 14:39:30 +0000346}
347
348/*
349 * igb_dump - Print registers, tx-rings and rx-rings
350 */
351static void igb_dump(struct igb_adapter *adapter)
352{
353 struct net_device *netdev = adapter->netdev;
354 struct e1000_hw *hw = &adapter->hw;
355 struct igb_reg_info *reginfo;
Taku Izumic97ec422010-04-27 14:39:30 +0000356 struct igb_ring *tx_ring;
357 union e1000_adv_tx_desc *tx_desc;
358 struct my_u0 { u64 a; u64 b; } *u0;
Taku Izumic97ec422010-04-27 14:39:30 +0000359 struct igb_ring *rx_ring;
360 union e1000_adv_rx_desc *rx_desc;
361 u32 staterr;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +0000362 u16 i, n;
Taku Izumic97ec422010-04-27 14:39:30 +0000363
364 if (!netif_msg_hw(adapter))
365 return;
366
367 /* Print netdevice Info */
368 if (netdev) {
369 dev_info(&adapter->pdev->dev, "Net device Info\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000370 pr_info("Device Name state trans_start "
371 "last_rx\n");
372 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
373 netdev->state, netdev->trans_start, netdev->last_rx);
Taku Izumic97ec422010-04-27 14:39:30 +0000374 }
375
376 /* Print Registers */
377 dev_info(&adapter->pdev->dev, "Register Dump\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000378 pr_info(" Register Name Value\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000379 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
380 reginfo->name; reginfo++) {
381 igb_regdump(hw, reginfo);
382 }
383
384 /* Print TX Ring Summary */
385 if (!netdev || !netif_running(netdev))
386 goto exit;
387
388 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000389 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000390 for (n = 0; n < adapter->num_tx_queues; n++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000391 struct igb_tx_buffer *buffer_info;
Taku Izumic97ec422010-04-27 14:39:30 +0000392 tx_ring = adapter->tx_ring[n];
Alexander Duyck06034642011-08-26 07:44:22 +0000393 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000394 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
395 n, tx_ring->next_to_use, tx_ring->next_to_clean,
396 (u64)buffer_info->dma,
397 buffer_info->length,
398 buffer_info->next_to_watch,
399 (u64)buffer_info->time_stamp);
Taku Izumic97ec422010-04-27 14:39:30 +0000400 }
401
402 /* Print TX Rings */
403 if (!netif_msg_tx_done(adapter))
404 goto rx_ring_summary;
405
406 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
407
408 /* Transmit Descriptor Formats
409 *
410 * Advanced Transmit Descriptor
411 * +--------------------------------------------------------------+
412 * 0 | Buffer Address [63:0] |
413 * +--------------------------------------------------------------+
414 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
415 * +--------------------------------------------------------------+
416 * 63 46 45 40 39 38 36 35 32 31 24 15 0
417 */
418
419 for (n = 0; n < adapter->num_tx_queues; n++) {
420 tx_ring = adapter->tx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000421 pr_info("------------------------------------\n");
422 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
423 pr_info("------------------------------------\n");
424 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
425 "[bi->dma ] leng ntw timestamp "
426 "bi->skb\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000427
428 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000429 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000430 struct igb_tx_buffer *buffer_info;
Alexander Duyck601369062011-08-26 07:44:05 +0000431 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +0000432 buffer_info = &tx_ring->tx_buffer_info[i];
Taku Izumic97ec422010-04-27 14:39:30 +0000433 u0 = (struct my_u0 *)tx_desc;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000434 if (i == tx_ring->next_to_use &&
435 i == tx_ring->next_to_clean)
436 next_desc = " NTC/U";
437 else if (i == tx_ring->next_to_use)
438 next_desc = " NTU";
439 else if (i == tx_ring->next_to_clean)
440 next_desc = " NTC";
441 else
442 next_desc = "";
443
444 pr_info("T [0x%03X] %016llX %016llX %016llX"
445 " %04X %p %016llX %p%s\n", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000446 le64_to_cpu(u0->a),
447 le64_to_cpu(u0->b),
448 (u64)buffer_info->dma,
449 buffer_info->length,
450 buffer_info->next_to_watch,
451 (u64)buffer_info->time_stamp,
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000452 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000453
454 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
455 print_hex_dump(KERN_INFO, "",
456 DUMP_PREFIX_ADDRESS,
457 16, 1, phys_to_virt(buffer_info->dma),
458 buffer_info->length, true);
459 }
460 }
461
462 /* Print RX Rings Summary */
463rx_ring_summary:
464 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000465 pr_info("Queue [NTU] [NTC]\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000466 for (n = 0; n < adapter->num_rx_queues; n++) {
467 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000468 pr_info(" %5d %5X %5X\n",
469 n, rx_ring->next_to_use, rx_ring->next_to_clean);
Taku Izumic97ec422010-04-27 14:39:30 +0000470 }
471
472 /* Print RX Rings */
473 if (!netif_msg_rx_status(adapter))
474 goto exit;
475
476 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
477
478 /* Advanced Receive Descriptor (Read) Format
479 * 63 1 0
480 * +-----------------------------------------------------+
481 * 0 | Packet Buffer Address [63:1] |A0/NSE|
482 * +----------------------------------------------+------+
483 * 8 | Header Buffer Address [63:1] | DD |
484 * +-----------------------------------------------------+
485 *
486 *
487 * Advanced Receive Descriptor (Write-Back) Format
488 *
489 * 63 48 47 32 31 30 21 20 17 16 4 3 0
490 * +------------------------------------------------------+
491 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
492 * | Checksum Ident | | | | Type | Type |
493 * +------------------------------------------------------+
494 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
495 * +------------------------------------------------------+
496 * 63 48 47 32 31 20 19 0
497 */
498
499 for (n = 0; n < adapter->num_rx_queues; n++) {
500 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000501 pr_info("------------------------------------\n");
502 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
503 pr_info("------------------------------------\n");
504 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
505 "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
506 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
507 "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000508
509 for (i = 0; i < rx_ring->count; i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000510 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000511 struct igb_rx_buffer *buffer_info;
512 buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +0000513 rx_desc = IGB_RX_DESC(rx_ring, i);
Taku Izumic97ec422010-04-27 14:39:30 +0000514 u0 = (struct my_u0 *)rx_desc;
515 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000516
517 if (i == rx_ring->next_to_use)
518 next_desc = " NTU";
519 else if (i == rx_ring->next_to_clean)
520 next_desc = " NTC";
521 else
522 next_desc = "";
523
Taku Izumic97ec422010-04-27 14:39:30 +0000524 if (staterr & E1000_RXD_STAT_DD) {
525 /* Descriptor Done */
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000526 pr_info("%s[0x%03X] %016llX %016llX -------"
527 "--------- %p%s\n", "RWB", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000528 le64_to_cpu(u0->a),
529 le64_to_cpu(u0->b),
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000530 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000531 } else {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000532 pr_info("%s[0x%03X] %016llX %016llX %016llX"
533 " %p%s\n", "R ", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000534 le64_to_cpu(u0->a),
535 le64_to_cpu(u0->b),
536 (u64)buffer_info->dma,
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000537 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000538
539 if (netif_msg_pktdata(adapter)) {
540 print_hex_dump(KERN_INFO, "",
541 DUMP_PREFIX_ADDRESS,
542 16, 1,
543 phys_to_virt(buffer_info->dma),
Alexander Duyck44390ca2011-08-26 07:43:38 +0000544 IGB_RX_HDR_LEN, true);
545 print_hex_dump(KERN_INFO, "",
546 DUMP_PREFIX_ADDRESS,
547 16, 1,
548 phys_to_virt(
549 buffer_info->page_dma +
550 buffer_info->page_offset),
551 PAGE_SIZE/2, true);
Taku Izumic97ec422010-04-27 14:39:30 +0000552 }
553 }
Taku Izumic97ec422010-04-27 14:39:30 +0000554 }
555 }
556
557exit:
558 return;
559}
560
561
Patrick Ohly38c845c2009-02-12 05:03:41 +0000562/**
Patrick Ohly38c845c2009-02-12 05:03:41 +0000563 * igb_read_clock - read raw cycle counter (to be used by time counter)
564 */
565static cycle_t igb_read_clock(const struct cyclecounter *tc)
566{
567 struct igb_adapter *adapter =
568 container_of(tc, struct igb_adapter, cycles);
569 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000570 u64 stamp = 0;
571 int shift = 0;
Patrick Ohly38c845c2009-02-12 05:03:41 +0000572
Alexander Duyck55cac242009-11-19 12:42:21 +0000573 /*
574 * The timestamp latches on lowest register read. For the 82580
575 * the lowest register is SYSTIMR instead of SYSTIML. However we never
576 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
577 */
Alexander Duyck06218a82011-08-26 07:46:55 +0000578 if (hw->mac.type >= e1000_82580) {
Alexander Duyck55cac242009-11-19 12:42:21 +0000579 stamp = rd32(E1000_SYSTIMR) >> 8;
580 shift = IGB_82580_TSYNC_SHIFT;
581 }
582
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000583 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
584 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
Patrick Ohly38c845c2009-02-12 05:03:41 +0000585 return stamp;
586}
587
Auke Kok9d5c8242008-01-24 02:22:38 -0800588/**
Alexander Duyckc0410762010-03-25 13:10:08 +0000589 * igb_get_hw_dev - return device
Auke Kok9d5c8242008-01-24 02:22:38 -0800590 * used by hardware layer to print debugging information
591 **/
Alexander Duyckc0410762010-03-25 13:10:08 +0000592struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
Auke Kok9d5c8242008-01-24 02:22:38 -0800593{
594 struct igb_adapter *adapter = hw->back;
Alexander Duyckc0410762010-03-25 13:10:08 +0000595 return adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800596}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000597
598/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800599 * igb_init_module - Driver Registration Routine
600 *
601 * igb_init_module is the first routine called when the driver is
602 * loaded. All it does is register with the PCI subsystem.
603 **/
604static int __init igb_init_module(void)
605{
606 int ret;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000607 pr_info("%s - version %s\n",
Auke Kok9d5c8242008-01-24 02:22:38 -0800608 igb_driver_string, igb_driver_version);
609
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000610 pr_info("%s\n", igb_copyright);
Auke Kok9d5c8242008-01-24 02:22:38 -0800611
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700612#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700613 dca_register_notify(&dca_notifier);
614#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800615 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800616 return ret;
617}
618
619module_init(igb_init_module);
620
621/**
622 * igb_exit_module - Driver Exit Cleanup Routine
623 *
624 * igb_exit_module is called just before the driver is removed
625 * from memory.
626 **/
627static void __exit igb_exit_module(void)
628{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700629#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700630 dca_unregister_notify(&dca_notifier);
631#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800632 pci_unregister_driver(&igb_driver);
633}
634
635module_exit(igb_exit_module);
636
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800637#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
638/**
639 * igb_cache_ring_register - Descriptor ring to register mapping
640 * @adapter: board private structure to initialize
641 *
642 * Once we know the feature-set enabled for the device, we'll cache
643 * the register offset the descriptor ring is assigned to.
644 **/
645static void igb_cache_ring_register(struct igb_adapter *adapter)
646{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000647 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000648 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800649
650 switch (adapter->hw.mac.type) {
651 case e1000_82576:
652 /* The queues are allocated for virtualization such that VF 0
653 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
654 * In order to avoid collision we start at the first free queue
655 * and continue consuming queues in the same sequence
656 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000657 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000658 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000659 adapter->rx_ring[i]->reg_idx = rbase_offset +
660 Q_IDX_82576(i);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000661 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800662 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000663 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000664 case e1000_i350:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800665 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000666 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000667 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000668 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000669 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800670 break;
671 }
672}
673
Alexander Duyck047e0032009-10-27 15:49:27 +0000674static void igb_free_queues(struct igb_adapter *adapter)
675{
Alexander Duyck3025a442010-02-17 01:02:39 +0000676 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000677
Alexander Duyck3025a442010-02-17 01:02:39 +0000678 for (i = 0; i < adapter->num_tx_queues; i++) {
679 kfree(adapter->tx_ring[i]);
680 adapter->tx_ring[i] = NULL;
681 }
682 for (i = 0; i < adapter->num_rx_queues; i++) {
683 kfree(adapter->rx_ring[i]);
684 adapter->rx_ring[i] = NULL;
685 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000686 adapter->num_rx_queues = 0;
687 adapter->num_tx_queues = 0;
688}
689
Auke Kok9d5c8242008-01-24 02:22:38 -0800690/**
691 * igb_alloc_queues - Allocate memory for all rings
692 * @adapter: board private structure to initialize
693 *
694 * We allocate one ring per queue at run-time since we don't know the
695 * number of queues at compile-time.
696 **/
697static int igb_alloc_queues(struct igb_adapter *adapter)
698{
Alexander Duyck3025a442010-02-17 01:02:39 +0000699 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800700 int i;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000701 int orig_node = adapter->node;
Auke Kok9d5c8242008-01-24 02:22:38 -0800702
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700703 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000704 if (orig_node == -1) {
705 int cur_node = next_online_node(adapter->node);
706 if (cur_node == MAX_NUMNODES)
707 cur_node = first_online_node;
708 adapter->node = cur_node;
709 }
710 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
711 adapter->node);
712 if (!ring)
713 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000714 if (!ring)
715 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800716 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700717 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000718 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000719 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000720 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000721 /* For 82575, context index must be unique per ring. */
722 if (adapter->hw.mac.type == e1000_82575)
Alexander Duyck866cff02011-08-26 07:45:36 +0000723 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
Alexander Duyck3025a442010-02-17 01:02:39 +0000724 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700725 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000726 /* Restore the adapter's original node */
727 adapter->node = orig_node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000728
Auke Kok9d5c8242008-01-24 02:22:38 -0800729 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000730 if (orig_node == -1) {
731 int cur_node = next_online_node(adapter->node);
732 if (cur_node == MAX_NUMNODES)
733 cur_node = first_online_node;
734 adapter->node = cur_node;
735 }
736 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
737 adapter->node);
738 if (!ring)
739 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000740 if (!ring)
741 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800742 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700743 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000744 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000745 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000746 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000747 /* set flag indicating ring supports SCTP checksum offload */
748 if (adapter->hw.mac.type >= e1000_82576)
Alexander Duyck866cff02011-08-26 07:45:36 +0000749 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
Alexander Duyck8be10e92011-08-26 07:47:11 +0000750
751 /* On i350, loopback VLAN packets have the tag byte-swapped. */
752 if (adapter->hw.mac.type == e1000_i350)
753 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
754
Alexander Duyck3025a442010-02-17 01:02:39 +0000755 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800756 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000757 /* Restore the adapter's original node */
758 adapter->node = orig_node;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800759
760 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000761
Auke Kok9d5c8242008-01-24 02:22:38 -0800762 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800763
Alexander Duyck047e0032009-10-27 15:49:27 +0000764err:
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000765 /* Restore the adapter's original node */
766 adapter->node = orig_node;
Alexander Duyck047e0032009-10-27 15:49:27 +0000767 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700768
Alexander Duyck047e0032009-10-27 15:49:27 +0000769 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700770}
771
Alexander Duyck4be000c2011-08-26 07:45:52 +0000772/**
773 * igb_write_ivar - configure ivar for given MSI-X vector
774 * @hw: pointer to the HW structure
775 * @msix_vector: vector number we are allocating to a given ring
776 * @index: row index of IVAR register to write within IVAR table
777 * @offset: column offset of in IVAR, should be multiple of 8
778 *
779 * This function is intended to handle the writing of the IVAR register
780 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
781 * each containing an cause allocation for an Rx and Tx ring, and a
782 * variable number of rows depending on the number of queues supported.
783 **/
784static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
785 int index, int offset)
786{
787 u32 ivar = array_rd32(E1000_IVAR0, index);
788
789 /* clear any bits that are currently set */
790 ivar &= ~((u32)0xFF << offset);
791
792 /* write vector and valid bit */
793 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
794
795 array_wr32(E1000_IVAR0, index, ivar);
796}
797
Auke Kok9d5c8242008-01-24 02:22:38 -0800798#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000799static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800800{
Alexander Duyck047e0032009-10-27 15:49:27 +0000801 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800802 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck047e0032009-10-27 15:49:27 +0000803 int rx_queue = IGB_N0_QUEUE;
804 int tx_queue = IGB_N0_QUEUE;
Alexander Duyck4be000c2011-08-26 07:45:52 +0000805 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000806
Alexander Duyck0ba82992011-08-26 07:45:47 +0000807 if (q_vector->rx.ring)
808 rx_queue = q_vector->rx.ring->reg_idx;
809 if (q_vector->tx.ring)
810 tx_queue = q_vector->tx.ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700811
812 switch (hw->mac.type) {
813 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800814 /* The 82575 assigns vectors using a bitmask, which matches the
815 bitmask for the EICR/EIMS/EIMC registers. To assign one
816 or more queues to a vector, we write the appropriate bits
817 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000818 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800819 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000820 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800821 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000822 if (!adapter->msix_entries && msix_vector == 0)
823 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800824 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000825 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700826 break;
827 case e1000_82576:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000828 /*
829 * 82576 uses a table that essentially consists of 2 columns
830 * with 8 rows. The ordering is column-major so we use the
831 * lower 3 bits as the row index, and the 4th bit as the
832 * column offset.
833 */
834 if (rx_queue > IGB_N0_QUEUE)
835 igb_write_ivar(hw, msix_vector,
836 rx_queue & 0x7,
837 (rx_queue & 0x8) << 1);
838 if (tx_queue > IGB_N0_QUEUE)
839 igb_write_ivar(hw, msix_vector,
840 tx_queue & 0x7,
841 ((tx_queue & 0x8) << 1) + 8);
Alexander Duyck047e0032009-10-27 15:49:27 +0000842 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700843 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000844 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000845 case e1000_i350:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000846 /*
847 * On 82580 and newer adapters the scheme is similar to 82576
848 * however instead of ordering column-major we have things
849 * ordered row-major. So we traverse the table by using
850 * bit 0 as the column offset, and the remaining bits as the
851 * row index.
852 */
853 if (rx_queue > IGB_N0_QUEUE)
854 igb_write_ivar(hw, msix_vector,
855 rx_queue >> 1,
856 (rx_queue & 0x1) << 4);
857 if (tx_queue > IGB_N0_QUEUE)
858 igb_write_ivar(hw, msix_vector,
859 tx_queue >> 1,
860 ((tx_queue & 0x1) << 4) + 8);
Alexander Duyck55cac242009-11-19 12:42:21 +0000861 q_vector->eims_value = 1 << msix_vector;
862 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700863 default:
864 BUG();
865 break;
866 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000867
868 /* add q_vector eims value to global eims_enable_mask */
869 adapter->eims_enable_mask |= q_vector->eims_value;
870
871 /* configure q_vector to set itr on first interrupt */
872 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800873}
874
875/**
876 * igb_configure_msix - Configure MSI-X hardware
877 *
878 * igb_configure_msix sets up the hardware to properly
879 * generate MSI-X interrupts.
880 **/
881static void igb_configure_msix(struct igb_adapter *adapter)
882{
883 u32 tmp;
884 int i, vector = 0;
885 struct e1000_hw *hw = &adapter->hw;
886
887 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800888
889 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700890 switch (hw->mac.type) {
891 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800892 tmp = rd32(E1000_CTRL_EXT);
893 /* enable MSI-X PBA support*/
894 tmp |= E1000_CTRL_EXT_PBA_CLR;
895
896 /* Auto-Mask interrupts upon ICR read. */
897 tmp |= E1000_CTRL_EXT_EIAME;
898 tmp |= E1000_CTRL_EXT_IRCA;
899
900 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000901
902 /* enable msix_other interrupt */
903 array_wr32(E1000_MSIXBM(0), vector++,
904 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700905 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800906
Alexander Duyck2d064c02008-07-08 15:10:12 -0700907 break;
908
909 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000910 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000911 case e1000_i350:
Alexander Duyck047e0032009-10-27 15:49:27 +0000912 /* Turn on MSI-X capability first, or our settings
913 * won't stick. And it will take days to debug. */
914 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
915 E1000_GPIE_PBA | E1000_GPIE_EIAME |
916 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700917
Alexander Duyck047e0032009-10-27 15:49:27 +0000918 /* enable msix_other interrupt */
919 adapter->eims_other = 1 << vector;
920 tmp = (vector++ | E1000_IVAR_VALID) << 8;
921
922 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700923 break;
924 default:
925 /* do nothing, since nothing else supports MSI-X */
926 break;
927 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000928
929 adapter->eims_enable_mask |= adapter->eims_other;
930
Alexander Duyck26b39272010-02-17 01:00:41 +0000931 for (i = 0; i < adapter->num_q_vectors; i++)
932 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000933
Auke Kok9d5c8242008-01-24 02:22:38 -0800934 wrfl();
935}
936
937/**
938 * igb_request_msix - Initialize MSI-X interrupts
939 *
940 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
941 * kernel.
942 **/
943static int igb_request_msix(struct igb_adapter *adapter)
944{
945 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000946 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800947 int i, err = 0, vector = 0;
948
Auke Kok9d5c8242008-01-24 02:22:38 -0800949 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800950 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800951 if (err)
952 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000953 vector++;
954
955 for (i = 0; i < adapter->num_q_vectors; i++) {
956 struct igb_q_vector *q_vector = adapter->q_vector[i];
957
958 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
959
Alexander Duyck0ba82992011-08-26 07:45:47 +0000960 if (q_vector->rx.ring && q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000961 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000962 q_vector->rx.ring->queue_index);
963 else if (q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000964 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000965 q_vector->tx.ring->queue_index);
966 else if (q_vector->rx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000967 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000968 q_vector->rx.ring->queue_index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000969 else
970 sprintf(q_vector->name, "%s-unused", netdev->name);
971
972 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800973 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000974 q_vector);
975 if (err)
976 goto out;
977 vector++;
978 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800979
Auke Kok9d5c8242008-01-24 02:22:38 -0800980 igb_configure_msix(adapter);
981 return 0;
982out:
983 return err;
984}
985
986static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
987{
988 if (adapter->msix_entries) {
989 pci_disable_msix(adapter->pdev);
990 kfree(adapter->msix_entries);
991 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000992 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800993 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000994 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800995}
996
Alexander Duyck047e0032009-10-27 15:49:27 +0000997/**
998 * igb_free_q_vectors - Free memory allocated for interrupt vectors
999 * @adapter: board private structure to initialize
1000 *
1001 * This function frees the memory allocated to the q_vectors. In addition if
1002 * NAPI is enabled it will delete any references to the NAPI struct prior
1003 * to freeing the q_vector.
1004 **/
1005static void igb_free_q_vectors(struct igb_adapter *adapter)
1006{
1007 int v_idx;
1008
1009 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1010 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1011 adapter->q_vector[v_idx] = NULL;
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001012 if (!q_vector)
1013 continue;
Alexander Duyck047e0032009-10-27 15:49:27 +00001014 netif_napi_del(&q_vector->napi);
1015 kfree(q_vector);
1016 }
1017 adapter->num_q_vectors = 0;
1018}
1019
1020/**
1021 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1022 *
1023 * This function resets the device so that it has 0 rx queues, tx queues, and
1024 * MSI-X interrupts allocated.
1025 */
1026static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1027{
1028 igb_free_queues(adapter);
1029 igb_free_q_vectors(adapter);
1030 igb_reset_interrupt_capability(adapter);
1031}
Auke Kok9d5c8242008-01-24 02:22:38 -08001032
1033/**
1034 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1035 *
1036 * Attempt to configure interrupts using the best available
1037 * capabilities of the hardware and kernel.
1038 **/
Ben Hutchings21adef32010-09-27 08:28:39 +00001039static int igb_set_interrupt_capability(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08001040{
1041 int err;
1042 int numvecs, i;
1043
Alexander Duyck83b71802009-02-06 23:15:45 +00001044 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +00001045 adapter->num_rx_queues = adapter->rss_queues;
Greg Rose5fa85172010-07-01 13:38:16 +00001046 if (adapter->vfs_allocated_count)
1047 adapter->num_tx_queues = 1;
1048 else
1049 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +00001050
Alexander Duyck047e0032009-10-27 15:49:27 +00001051 /* start with one vector for every rx queue */
1052 numvecs = adapter->num_rx_queues;
1053
Daniel Mack3ad2f3f2010-02-03 08:01:28 +08001054 /* if tx handler is separate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +00001055 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1056 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +00001057
1058 /* store the number of vectors reserved for queues */
1059 adapter->num_q_vectors = numvecs;
1060
1061 /* add 1 vector for link status interrupts */
1062 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -08001063 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1064 GFP_KERNEL);
1065 if (!adapter->msix_entries)
1066 goto msi_only;
1067
1068 for (i = 0; i < numvecs; i++)
1069 adapter->msix_entries[i].entry = i;
1070
1071 err = pci_enable_msix(adapter->pdev,
1072 adapter->msix_entries,
1073 numvecs);
1074 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -07001075 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -08001076
1077 igb_reset_interrupt_capability(adapter);
1078
1079 /* If we can't do MSI-X, try MSI */
1080msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001081#ifdef CONFIG_PCI_IOV
1082 /* disable SR-IOV for non MSI-X configurations */
1083 if (adapter->vf_data) {
1084 struct e1000_hw *hw = &adapter->hw;
1085 /* disable iov and allow time for transactions to clear */
1086 pci_disable_sriov(adapter->pdev);
1087 msleep(500);
1088
1089 kfree(adapter->vf_data);
1090 adapter->vf_data = NULL;
1091 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00001092 wrfl();
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001093 msleep(100);
1094 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1095 }
1096#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001097 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +00001098 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001099 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -08001100 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001101 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001102 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001103 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001104 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -07001105out:
Ben Hutchings21adef32010-09-27 08:28:39 +00001106 /* Notify the stack of the (possibly) reduced queue counts. */
1107 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1108 return netif_set_real_num_rx_queues(adapter->netdev,
1109 adapter->num_rx_queues);
Auke Kok9d5c8242008-01-24 02:22:38 -08001110}
1111
1112/**
Alexander Duyck047e0032009-10-27 15:49:27 +00001113 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1114 * @adapter: board private structure to initialize
1115 *
1116 * We allocate one q_vector per queue interrupt. If allocation fails we
1117 * return -ENOMEM.
1118 **/
1119static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1120{
1121 struct igb_q_vector *q_vector;
1122 struct e1000_hw *hw = &adapter->hw;
1123 int v_idx;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001124 int orig_node = adapter->node;
Alexander Duyck047e0032009-10-27 15:49:27 +00001125
1126 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001127 if ((adapter->num_q_vectors == (adapter->num_rx_queues +
1128 adapter->num_tx_queues)) &&
1129 (adapter->num_rx_queues == v_idx))
1130 adapter->node = orig_node;
1131 if (orig_node == -1) {
1132 int cur_node = next_online_node(adapter->node);
1133 if (cur_node == MAX_NUMNODES)
1134 cur_node = first_online_node;
1135 adapter->node = cur_node;
1136 }
1137 q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
1138 adapter->node);
1139 if (!q_vector)
1140 q_vector = kzalloc(sizeof(struct igb_q_vector),
1141 GFP_KERNEL);
Alexander Duyck047e0032009-10-27 15:49:27 +00001142 if (!q_vector)
1143 goto err_out;
1144 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00001145 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1146 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001147 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1148 adapter->q_vector[v_idx] = q_vector;
1149 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001150 /* Restore the adapter's original node */
1151 adapter->node = orig_node;
1152
Alexander Duyck047e0032009-10-27 15:49:27 +00001153 return 0;
1154
1155err_out:
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001156 /* Restore the adapter's original node */
1157 adapter->node = orig_node;
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001158 igb_free_q_vectors(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001159 return -ENOMEM;
1160}
1161
1162static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1163 int ring_idx, int v_idx)
1164{
Alexander Duyck3025a442010-02-17 01:02:39 +00001165 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001166
Alexander Duyck0ba82992011-08-26 07:45:47 +00001167 q_vector->rx.ring = adapter->rx_ring[ring_idx];
1168 q_vector->rx.ring->q_vector = q_vector;
1169 q_vector->rx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001170 q_vector->itr_val = adapter->rx_itr_setting;
1171 if (q_vector->itr_val && q_vector->itr_val <= 3)
1172 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001173}
1174
1175static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1176 int ring_idx, int v_idx)
1177{
Alexander Duyck3025a442010-02-17 01:02:39 +00001178 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001179
Alexander Duyck0ba82992011-08-26 07:45:47 +00001180 q_vector->tx.ring = adapter->tx_ring[ring_idx];
1181 q_vector->tx.ring->q_vector = q_vector;
1182 q_vector->tx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001183 q_vector->itr_val = adapter->tx_itr_setting;
Alexander Duyck0ba82992011-08-26 07:45:47 +00001184 q_vector->tx.work_limit = adapter->tx_work_limit;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001185 if (q_vector->itr_val && q_vector->itr_val <= 3)
1186 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001187}
1188
1189/**
1190 * igb_map_ring_to_vector - maps allocated queues to vectors
1191 *
1192 * This function maps the recently allocated queues to vectors.
1193 **/
1194static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1195{
1196 int i;
1197 int v_idx = 0;
1198
1199 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1200 (adapter->num_q_vectors < adapter->num_tx_queues))
1201 return -ENOMEM;
1202
1203 if (adapter->num_q_vectors >=
1204 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1205 for (i = 0; i < adapter->num_rx_queues; i++)
1206 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1207 for (i = 0; i < adapter->num_tx_queues; i++)
1208 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1209 } else {
1210 for (i = 0; i < adapter->num_rx_queues; i++) {
1211 if (i < adapter->num_tx_queues)
1212 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1213 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1214 }
1215 for (; i < adapter->num_tx_queues; i++)
1216 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1217 }
1218 return 0;
1219}
1220
1221/**
1222 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1223 *
1224 * This function initializes the interrupts and allocates all of the queues.
1225 **/
1226static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1227{
1228 struct pci_dev *pdev = adapter->pdev;
1229 int err;
1230
Ben Hutchings21adef32010-09-27 08:28:39 +00001231 err = igb_set_interrupt_capability(adapter);
1232 if (err)
1233 return err;
Alexander Duyck047e0032009-10-27 15:49:27 +00001234
1235 err = igb_alloc_q_vectors(adapter);
1236 if (err) {
1237 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1238 goto err_alloc_q_vectors;
1239 }
1240
1241 err = igb_alloc_queues(adapter);
1242 if (err) {
1243 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1244 goto err_alloc_queues;
1245 }
1246
1247 err = igb_map_ring_to_vector(adapter);
1248 if (err) {
1249 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1250 goto err_map_queues;
1251 }
1252
1253
1254 return 0;
1255err_map_queues:
1256 igb_free_queues(adapter);
1257err_alloc_queues:
1258 igb_free_q_vectors(adapter);
1259err_alloc_q_vectors:
1260 igb_reset_interrupt_capability(adapter);
1261 return err;
1262}
1263
1264/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001265 * igb_request_irq - initialize interrupts
1266 *
1267 * Attempts to configure interrupts using the best available
1268 * capabilities of the hardware and kernel.
1269 **/
1270static int igb_request_irq(struct igb_adapter *adapter)
1271{
1272 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +00001273 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001274 int err = 0;
1275
1276 if (adapter->msix_entries) {
1277 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001278 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001279 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08001280 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +00001281 igb_clear_interrupt_scheme(adapter);
Alexander Duyckc74d5882011-08-26 07:46:45 +00001282 if (!pci_enable_msi(pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001283 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001284 igb_free_all_tx_resources(adapter);
1285 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001286 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001287 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001288 adapter->num_q_vectors = 1;
1289 err = igb_alloc_q_vectors(adapter);
1290 if (err) {
1291 dev_err(&pdev->dev,
1292 "Unable to allocate memory for vectors\n");
1293 goto request_done;
1294 }
1295 err = igb_alloc_queues(adapter);
1296 if (err) {
1297 dev_err(&pdev->dev,
1298 "Unable to allocate memory for queues\n");
1299 igb_free_q_vectors(adapter);
1300 goto request_done;
1301 }
1302 igb_setup_all_tx_resources(adapter);
1303 igb_setup_all_rx_resources(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001304 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001305
Alexander Duyckc74d5882011-08-26 07:46:45 +00001306 igb_assign_vector(adapter->q_vector[0], 0);
1307
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001308 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Alexander Duyckc74d5882011-08-26 07:46:45 +00001309 err = request_irq(pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +00001310 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001311 if (!err)
1312 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +00001313
Auke Kok9d5c8242008-01-24 02:22:38 -08001314 /* fall back to legacy interrupts */
1315 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001316 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001317 }
1318
Alexander Duyckc74d5882011-08-26 07:46:45 +00001319 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +00001320 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001321
Andy Gospodarek6cb5e572008-02-15 14:05:25 -08001322 if (err)
Alexander Duyckc74d5882011-08-26 07:46:45 +00001323 dev_err(&pdev->dev, "Error %d getting interrupt\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001324 err);
Auke Kok9d5c8242008-01-24 02:22:38 -08001325
1326request_done:
1327 return err;
1328}
1329
1330static void igb_free_irq(struct igb_adapter *adapter)
1331{
Auke Kok9d5c8242008-01-24 02:22:38 -08001332 if (adapter->msix_entries) {
1333 int vector = 0, i;
1334
Alexander Duyck047e0032009-10-27 15:49:27 +00001335 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001336
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001337 for (i = 0; i < adapter->num_q_vectors; i++)
Alexander Duyck047e0032009-10-27 15:49:27 +00001338 free_irq(adapter->msix_entries[vector++].vector,
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001339 adapter->q_vector[i]);
Alexander Duyck047e0032009-10-27 15:49:27 +00001340 } else {
1341 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001342 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001343}
1344
1345/**
1346 * igb_irq_disable - Mask off interrupt generation on the NIC
1347 * @adapter: board private structure
1348 **/
1349static void igb_irq_disable(struct igb_adapter *adapter)
1350{
1351 struct e1000_hw *hw = &adapter->hw;
1352
Alexander Duyck25568a52009-10-27 23:49:59 +00001353 /*
1354 * we need to be careful when disabling interrupts. The VFs are also
1355 * mapped into these registers and so clearing the bits can cause
1356 * issues on the VF drivers so we only need to clear what we set
1357 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001358 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001359 u32 regval = rd32(E1000_EIAM);
1360 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1361 wr32(E1000_EIMC, adapter->eims_enable_mask);
1362 regval = rd32(E1000_EIAC);
1363 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001364 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001365
1366 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001367 wr32(E1000_IMC, ~0);
1368 wrfl();
Emil Tantilov81a61852010-08-02 14:40:52 +00001369 if (adapter->msix_entries) {
1370 int i;
1371 for (i = 0; i < adapter->num_q_vectors; i++)
1372 synchronize_irq(adapter->msix_entries[i].vector);
1373 } else {
1374 synchronize_irq(adapter->pdev->irq);
1375 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001376}
1377
1378/**
1379 * igb_irq_enable - Enable default interrupt generation settings
1380 * @adapter: board private structure
1381 **/
1382static void igb_irq_enable(struct igb_adapter *adapter)
1383{
1384 struct e1000_hw *hw = &adapter->hw;
1385
1386 if (adapter->msix_entries) {
Alexander Duyck06218a82011-08-26 07:46:55 +00001387 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001388 u32 regval = rd32(E1000_EIAC);
1389 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1390 regval = rd32(E1000_EIAM);
1391 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001392 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001393 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001394 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001395 ims |= E1000_IMS_VMMB;
1396 }
1397 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001398 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001399 wr32(E1000_IMS, IMS_ENABLE_MASK |
1400 E1000_IMS_DRSTA);
1401 wr32(E1000_IAM, IMS_ENABLE_MASK |
1402 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001403 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001404}
1405
1406static void igb_update_mng_vlan(struct igb_adapter *adapter)
1407{
Alexander Duyck51466232009-10-27 23:47:35 +00001408 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001409 u16 vid = adapter->hw.mng_cookie.vlan_id;
1410 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001411
Alexander Duyck51466232009-10-27 23:47:35 +00001412 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1413 /* add VID to filter table */
1414 igb_vfta_set(hw, vid, true);
1415 adapter->mng_vlan_id = vid;
1416 } else {
1417 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1418 }
1419
1420 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1421 (vid != old_vid) &&
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001422 !test_bit(old_vid, adapter->active_vlans)) {
Alexander Duyck51466232009-10-27 23:47:35 +00001423 /* remove VID from filter table */
1424 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001425 }
1426}
1427
1428/**
1429 * igb_release_hw_control - release control of the h/w to f/w
1430 * @adapter: address of board private structure
1431 *
1432 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1433 * For ASF and Pass Through versions of f/w this means that the
1434 * driver is no longer loaded.
1435 *
1436 **/
1437static void igb_release_hw_control(struct igb_adapter *adapter)
1438{
1439 struct e1000_hw *hw = &adapter->hw;
1440 u32 ctrl_ext;
1441
1442 /* Let firmware take over control of h/w */
1443 ctrl_ext = rd32(E1000_CTRL_EXT);
1444 wr32(E1000_CTRL_EXT,
1445 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1446}
1447
Auke Kok9d5c8242008-01-24 02:22:38 -08001448/**
1449 * igb_get_hw_control - get control of the h/w from f/w
1450 * @adapter: address of board private structure
1451 *
1452 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1453 * For ASF and Pass Through versions of f/w this means that
1454 * the driver is loaded.
1455 *
1456 **/
1457static void igb_get_hw_control(struct igb_adapter *adapter)
1458{
1459 struct e1000_hw *hw = &adapter->hw;
1460 u32 ctrl_ext;
1461
1462 /* Let firmware know the driver has taken over */
1463 ctrl_ext = rd32(E1000_CTRL_EXT);
1464 wr32(E1000_CTRL_EXT,
1465 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1466}
1467
Auke Kok9d5c8242008-01-24 02:22:38 -08001468/**
1469 * igb_configure - configure the hardware for RX and TX
1470 * @adapter: private board structure
1471 **/
1472static void igb_configure(struct igb_adapter *adapter)
1473{
1474 struct net_device *netdev = adapter->netdev;
1475 int i;
1476
1477 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001478 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001479
1480 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001481
Alexander Duyck85b430b2009-10-27 15:50:29 +00001482 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001483 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001484 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001485
1486 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001487 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001488
1489 igb_rx_fifo_flush_82575(&adapter->hw);
1490
Alexander Duyckc493ea42009-03-20 00:16:50 +00001491 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001492 * at least 1 descriptor unused to make sure
1493 * next_to_use != next_to_clean */
1494 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001495 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckcd392f52011-08-26 07:43:59 +00001496 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001497 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001498}
1499
Nick Nunley88a268c2010-02-17 01:01:59 +00001500/**
1501 * igb_power_up_link - Power up the phy/serdes link
1502 * @adapter: address of board private structure
1503 **/
1504void igb_power_up_link(struct igb_adapter *adapter)
1505{
1506 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1507 igb_power_up_phy_copper(&adapter->hw);
1508 else
1509 igb_power_up_serdes_link_82575(&adapter->hw);
1510}
1511
1512/**
1513 * igb_power_down_link - Power down the phy/serdes link
1514 * @adapter: address of board private structure
1515 */
1516static void igb_power_down_link(struct igb_adapter *adapter)
1517{
1518 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1519 igb_power_down_phy_copper_82575(&adapter->hw);
1520 else
1521 igb_shutdown_serdes_link_82575(&adapter->hw);
1522}
Auke Kok9d5c8242008-01-24 02:22:38 -08001523
1524/**
1525 * igb_up - Open the interface and prepare it to handle traffic
1526 * @adapter: board private structure
1527 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001528int igb_up(struct igb_adapter *adapter)
1529{
1530 struct e1000_hw *hw = &adapter->hw;
1531 int i;
1532
1533 /* hardware has been reset, we need to reload some things */
1534 igb_configure(adapter);
1535
1536 clear_bit(__IGB_DOWN, &adapter->state);
1537
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001538 for (i = 0; i < adapter->num_q_vectors; i++)
1539 napi_enable(&(adapter->q_vector[i]->napi));
1540
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001541 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001542 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001543 else
1544 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001545
1546 /* Clear any pending interrupts. */
1547 rd32(E1000_ICR);
1548 igb_irq_enable(adapter);
1549
Alexander Duyckd4960302009-10-27 15:53:45 +00001550 /* notify VFs that reset has been completed */
1551 if (adapter->vfs_allocated_count) {
1552 u32 reg_data = rd32(E1000_CTRL_EXT);
1553 reg_data |= E1000_CTRL_EXT_PFRSTD;
1554 wr32(E1000_CTRL_EXT, reg_data);
1555 }
1556
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001557 netif_tx_start_all_queues(adapter->netdev);
1558
Alexander Duyck25568a52009-10-27 23:49:59 +00001559 /* start the watchdog. */
1560 hw->mac.get_link_status = 1;
1561 schedule_work(&adapter->watchdog_task);
1562
Auke Kok9d5c8242008-01-24 02:22:38 -08001563 return 0;
1564}
1565
1566void igb_down(struct igb_adapter *adapter)
1567{
Auke Kok9d5c8242008-01-24 02:22:38 -08001568 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001569 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001570 u32 tctl, rctl;
1571 int i;
1572
1573 /* signal that we're down so the interrupt handler does not
1574 * reschedule our watchdog timer */
1575 set_bit(__IGB_DOWN, &adapter->state);
1576
1577 /* disable receives in the hardware */
1578 rctl = rd32(E1000_RCTL);
1579 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1580 /* flush and sleep below */
1581
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001582 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001583
1584 /* disable transmits in the hardware */
1585 tctl = rd32(E1000_TCTL);
1586 tctl &= ~E1000_TCTL_EN;
1587 wr32(E1000_TCTL, tctl);
1588 /* flush both disables and wait for them to finish */
1589 wrfl();
1590 msleep(10);
1591
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001592 for (i = 0; i < adapter->num_q_vectors; i++)
1593 napi_disable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08001594
Auke Kok9d5c8242008-01-24 02:22:38 -08001595 igb_irq_disable(adapter);
1596
1597 del_timer_sync(&adapter->watchdog_timer);
1598 del_timer_sync(&adapter->phy_info_timer);
1599
Auke Kok9d5c8242008-01-24 02:22:38 -08001600 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001601
1602 /* record the stats before reset*/
Eric Dumazet12dcd862010-10-15 17:27:10 +00001603 spin_lock(&adapter->stats64_lock);
1604 igb_update_stats(adapter, &adapter->stats64);
1605 spin_unlock(&adapter->stats64_lock);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001606
Auke Kok9d5c8242008-01-24 02:22:38 -08001607 adapter->link_speed = 0;
1608 adapter->link_duplex = 0;
1609
Jeff Kirsher30236822008-06-24 17:01:15 -07001610 if (!pci_channel_offline(adapter->pdev))
1611 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001612 igb_clean_all_tx_rings(adapter);
1613 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001614#ifdef CONFIG_IGB_DCA
1615
1616 /* since we reset the hardware DCA settings were cleared */
1617 igb_setup_dca(adapter);
1618#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001619}
1620
1621void igb_reinit_locked(struct igb_adapter *adapter)
1622{
1623 WARN_ON(in_interrupt());
1624 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1625 msleep(1);
1626 igb_down(adapter);
1627 igb_up(adapter);
1628 clear_bit(__IGB_RESETTING, &adapter->state);
1629}
1630
1631void igb_reset(struct igb_adapter *adapter)
1632{
Alexander Duyck090b1792009-10-27 23:51:55 +00001633 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001634 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001635 struct e1000_mac_info *mac = &hw->mac;
1636 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001637 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1638 u16 hwm;
1639
1640 /* Repartition Pba for greater than 9k mtu
1641 * To take effect CTRL.RST is required.
1642 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001643 switch (mac->type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00001644 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00001645 case e1000_82580:
1646 pba = rd32(E1000_RXPBS);
1647 pba = igb_rxpbs_adjust_82580(pba);
1648 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001649 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001650 pba = rd32(E1000_RXPBS);
1651 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001652 break;
1653 case e1000_82575:
1654 default:
1655 pba = E1000_PBA_34K;
1656 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001657 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001658
Alexander Duyck2d064c02008-07-08 15:10:12 -07001659 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1660 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001661 /* adjust PBA for jumbo frames */
1662 wr32(E1000_PBA, pba);
1663
1664 /* To maintain wire speed transmits, the Tx FIFO should be
1665 * large enough to accommodate two full transmit packets,
1666 * rounded up to the next 1KB and expressed in KB. Likewise,
1667 * the Rx FIFO should be large enough to accommodate at least
1668 * one full receive packet and is similarly rounded up and
1669 * expressed in KB. */
1670 pba = rd32(E1000_PBA);
1671 /* upper 16 bits has Tx packet buffer allocation size in KB */
1672 tx_space = pba >> 16;
1673 /* lower 16 bits has Rx packet buffer allocation size in KB */
1674 pba &= 0xffff;
1675 /* the tx fifo also stores 16 bytes of information about the tx
1676 * but don't include ethernet FCS because hardware appends it */
1677 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001678 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001679 ETH_FCS_LEN) * 2;
1680 min_tx_space = ALIGN(min_tx_space, 1024);
1681 min_tx_space >>= 10;
1682 /* software strips receive CRC, so leave room for it */
1683 min_rx_space = adapter->max_frame_size;
1684 min_rx_space = ALIGN(min_rx_space, 1024);
1685 min_rx_space >>= 10;
1686
1687 /* If current Tx allocation is less than the min Tx FIFO size,
1688 * and the min Tx FIFO size is less than the current Rx FIFO
1689 * allocation, take space away from current Rx allocation */
1690 if (tx_space < min_tx_space &&
1691 ((min_tx_space - tx_space) < pba)) {
1692 pba = pba - (min_tx_space - tx_space);
1693
1694 /* if short on rx space, rx wins and must trump tx
1695 * adjustment */
1696 if (pba < min_rx_space)
1697 pba = min_rx_space;
1698 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001699 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001700 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001701
1702 /* flow control settings */
1703 /* The high water mark must be low enough to fit one full frame
1704 * (or the size used for early receive) above it in the Rx FIFO.
1705 * Set it to the lower of:
1706 * - 90% of the Rx FIFO size, or
1707 * - the full Rx FIFO size minus one full frame */
1708 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001709 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001710
Alexander Duyckd405ea32009-12-23 13:21:27 +00001711 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1712 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001713 fc->pause_time = 0xFFFF;
1714 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001715 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001716
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001717 /* disable receive for all VFs and wait one second */
1718 if (adapter->vfs_allocated_count) {
1719 int i;
1720 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Greg Rose8fa7e0f2010-11-06 05:43:21 +00001721 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001722
1723 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001724 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001725
1726 /* disable transmits and receives */
1727 wr32(E1000_VFRE, 0);
1728 wr32(E1000_VFTE, 0);
1729 }
1730
Auke Kok9d5c8242008-01-24 02:22:38 -08001731 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001732 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001733 wr32(E1000_WUC, 0);
1734
Alexander Duyck330a6d62009-10-27 23:51:35 +00001735 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001736 dev_err(&pdev->dev, "Hardware Error\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08001737
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00001738 igb_init_dmac(adapter, pba);
Nick Nunley88a268c2010-02-17 01:01:59 +00001739 if (!netif_running(adapter->netdev))
1740 igb_power_down_link(adapter);
1741
Auke Kok9d5c8242008-01-24 02:22:38 -08001742 igb_update_mng_vlan(adapter);
1743
1744 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1745 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1746
Alexander Duyck330a6d62009-10-27 23:51:35 +00001747 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001748}
1749
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001750static netdev_features_t igb_fix_features(struct net_device *netdev,
1751 netdev_features_t features)
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001752{
1753 /*
1754 * Since there is no support for separate rx/tx vlan accel
1755 * enable/disable make sure tx flag is always in same state as rx.
1756 */
1757 if (features & NETIF_F_HW_VLAN_RX)
1758 features |= NETIF_F_HW_VLAN_TX;
1759 else
1760 features &= ~NETIF_F_HW_VLAN_TX;
1761
1762 return features;
1763}
1764
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001765static int igb_set_features(struct net_device *netdev,
1766 netdev_features_t features)
Michał Mirosławac52caa2011-06-08 08:38:01 +00001767{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001768 netdev_features_t changed = netdev->features ^ features;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001769
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001770 if (changed & NETIF_F_HW_VLAN_RX)
1771 igb_vlan_mode(netdev, features);
1772
Michał Mirosławac52caa2011-06-08 08:38:01 +00001773 return 0;
1774}
1775
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001776static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001777 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001778 .ndo_stop = igb_close,
Alexander Duyckcd392f52011-08-26 07:43:59 +00001779 .ndo_start_xmit = igb_xmit_frame,
Eric Dumazet12dcd862010-10-15 17:27:10 +00001780 .ndo_get_stats64 = igb_get_stats64,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001781 .ndo_set_rx_mode = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001782 .ndo_set_mac_address = igb_set_mac,
1783 .ndo_change_mtu = igb_change_mtu,
1784 .ndo_do_ioctl = igb_ioctl,
1785 .ndo_tx_timeout = igb_tx_timeout,
1786 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001787 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1788 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001789 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1790 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1791 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1792 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001793#ifdef CONFIG_NET_POLL_CONTROLLER
1794 .ndo_poll_controller = igb_netpoll,
1795#endif
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001796 .ndo_fix_features = igb_fix_features,
1797 .ndo_set_features = igb_set_features,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001798};
1799
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001800/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001801 * igb_probe - Device Initialization Routine
1802 * @pdev: PCI device information struct
1803 * @ent: entry in igb_pci_tbl
1804 *
1805 * Returns 0 on success, negative on failure
1806 *
1807 * igb_probe initializes an adapter identified by a pci_dev structure.
1808 * The OS initialization, configuring of the adapter private structure,
1809 * and a hardware reset occur.
1810 **/
1811static int __devinit igb_probe(struct pci_dev *pdev,
1812 const struct pci_device_id *ent)
1813{
1814 struct net_device *netdev;
1815 struct igb_adapter *adapter;
1816 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001817 u16 eeprom_data = 0;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001818 s32 ret_val;
Alexander Duyck4337e992009-10-27 23:48:31 +00001819 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001820 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1821 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001822 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001823 u16 eeprom_apme_mask = IGB_EEPROM_APME;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001824 u8 part_str[E1000_PBANUM_LENGTH];
Auke Kok9d5c8242008-01-24 02:22:38 -08001825
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001826 /* Catch broken hardware that put the wrong VF device ID in
1827 * the PCIe SR-IOV capability.
1828 */
1829 if (pdev->is_virtfn) {
1830 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
1831 pci_name(pdev), pdev->vendor, pdev->device);
1832 return -EINVAL;
1833 }
1834
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001835 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001836 if (err)
1837 return err;
1838
1839 pci_using_dac = 0;
Alexander Duyck59d71982010-04-27 13:09:25 +00001840 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001841 if (!err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001842 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001843 if (!err)
1844 pci_using_dac = 1;
1845 } else {
Alexander Duyck59d71982010-04-27 13:09:25 +00001846 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001847 if (err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001848 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001849 if (err) {
1850 dev_err(&pdev->dev, "No usable DMA "
1851 "configuration, aborting\n");
1852 goto err_dma;
1853 }
1854 }
1855 }
1856
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001857 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1858 IORESOURCE_MEM),
1859 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001860 if (err)
1861 goto err_pci_reg;
1862
Frans Pop19d5afd2009-10-02 10:04:12 -07001863 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001864
Auke Kok9d5c8242008-01-24 02:22:38 -08001865 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001866 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001867
1868 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001869 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00001870 IGB_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001871 if (!netdev)
1872 goto err_alloc_etherdev;
1873
1874 SET_NETDEV_DEV(netdev, &pdev->dev);
1875
1876 pci_set_drvdata(pdev, netdev);
1877 adapter = netdev_priv(netdev);
1878 adapter->netdev = netdev;
1879 adapter->pdev = pdev;
1880 hw = &adapter->hw;
1881 hw->back = adapter;
1882 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1883
1884 mmio_start = pci_resource_start(pdev, 0);
1885 mmio_len = pci_resource_len(pdev, 0);
1886
1887 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001888 hw->hw_addr = ioremap(mmio_start, mmio_len);
1889 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001890 goto err_ioremap;
1891
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001892 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001893 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001894 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001895
1896 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1897
1898 netdev->mem_start = mmio_start;
1899 netdev->mem_end = mmio_start + mmio_len;
1900
Auke Kok9d5c8242008-01-24 02:22:38 -08001901 /* PCI config space info */
1902 hw->vendor_id = pdev->vendor;
1903 hw->device_id = pdev->device;
1904 hw->revision_id = pdev->revision;
1905 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1906 hw->subsystem_device_id = pdev->subsystem_device;
1907
Auke Kok9d5c8242008-01-24 02:22:38 -08001908 /* Copy the default MAC, PHY and NVM function pointers */
1909 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1910 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1911 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1912 /* Initialize skew-specific constants */
1913 err = ei->get_invariants(hw);
1914 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001915 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001916
Alexander Duyck450c87c2009-02-06 23:22:11 +00001917 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001918 err = igb_sw_init(adapter);
1919 if (err)
1920 goto err_sw_init;
1921
1922 igb_get_bus_info_pcie(hw);
1923
1924 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08001925
1926 /* Copper options */
1927 if (hw->phy.media_type == e1000_media_type_copper) {
1928 hw->phy.mdix = AUTO_ALL_MODES;
1929 hw->phy.disable_polarity_correction = false;
1930 hw->phy.ms_type = e1000_ms_hw_default;
1931 }
1932
1933 if (igb_check_reset_block(hw))
1934 dev_info(&pdev->dev,
1935 "PHY reset is blocked due to SOL/IDER session.\n");
1936
Alexander Duyck077887c2011-08-26 07:46:29 +00001937 /*
1938 * features is initialized to 0 in allocation, it might have bits
1939 * set by igb_sw_init so we should use an or instead of an
1940 * assignment.
1941 */
1942 netdev->features |= NETIF_F_SG |
1943 NETIF_F_IP_CSUM |
1944 NETIF_F_IPV6_CSUM |
1945 NETIF_F_TSO |
1946 NETIF_F_TSO6 |
1947 NETIF_F_RXHASH |
1948 NETIF_F_RXCSUM |
1949 NETIF_F_HW_VLAN_RX |
1950 NETIF_F_HW_VLAN_TX;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001951
Alexander Duyck077887c2011-08-26 07:46:29 +00001952 /* copy netdev features into list of user selectable features */
1953 netdev->hw_features |= netdev->features;
Auke Kok9d5c8242008-01-24 02:22:38 -08001954
Alexander Duyck077887c2011-08-26 07:46:29 +00001955 /* set this bit last since it cannot be part of hw_features */
1956 netdev->features |= NETIF_F_HW_VLAN_FILTER;
1957
1958 netdev->vlan_features |= NETIF_F_TSO |
1959 NETIF_F_TSO6 |
1960 NETIF_F_IP_CSUM |
1961 NETIF_F_IPV6_CSUM |
1962 NETIF_F_SG;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001963
Yi Zou7b872a52010-09-22 17:57:58 +00001964 if (pci_using_dac) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001965 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00001966 netdev->vlan_features |= NETIF_F_HIGHDMA;
1967 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001968
Michał Mirosławac52caa2011-06-08 08:38:01 +00001969 if (hw->mac.type >= e1000_82576) {
1970 netdev->hw_features |= NETIF_F_SCTP_CSUM;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001971 netdev->features |= NETIF_F_SCTP_CSUM;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001972 }
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001973
Jiri Pirko01789342011-08-16 06:29:00 +00001974 netdev->priv_flags |= IFF_UNICAST_FLT;
1975
Alexander Duyck330a6d62009-10-27 23:51:35 +00001976 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001977
1978 /* before reading the NVM, reset the controller to put the device in a
1979 * known good starting state */
1980 hw->mac.ops.reset_hw(hw);
1981
1982 /* make sure the NVM is good */
Carolyn Wyborny4322e562011-03-11 20:43:18 -08001983 if (hw->nvm.ops.validate(hw) < 0) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001984 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1985 err = -EIO;
1986 goto err_eeprom;
1987 }
1988
1989 /* copy the MAC address out of the NVM */
1990 if (hw->mac.ops.read_mac_addr(hw))
1991 dev_err(&pdev->dev, "NVM Read Error\n");
1992
1993 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1994 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1995
1996 if (!is_valid_ether_addr(netdev->perm_addr)) {
1997 dev_err(&pdev->dev, "Invalid MAC Address\n");
1998 err = -EIO;
1999 goto err_eeprom;
2000 }
2001
Joe Perchesc061b182010-08-23 18:20:03 +00002002 setup_timer(&adapter->watchdog_timer, igb_watchdog,
Alexander Duyck0e340482009-03-20 00:17:08 +00002003 (unsigned long) adapter);
Joe Perchesc061b182010-08-23 18:20:03 +00002004 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
Alexander Duyck0e340482009-03-20 00:17:08 +00002005 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002006
2007 INIT_WORK(&adapter->reset_task, igb_reset_task);
2008 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2009
Alexander Duyck450c87c2009-02-06 23:22:11 +00002010 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08002011 adapter->fc_autoneg = true;
2012 hw->mac.autoneg = true;
2013 hw->phy.autoneg_advertised = 0x2f;
2014
Alexander Duyck0cce1192009-07-23 18:10:24 +00002015 hw->fc.requested_mode = e1000_fc_default;
2016 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08002017
Auke Kok9d5c8242008-01-24 02:22:38 -08002018 igb_validate_mdi_setting(hw);
2019
Auke Kok9d5c8242008-01-24 02:22:38 -08002020 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
2021 * enable the ACPI Magic Packet filter
2022 */
2023
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002024 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00002025 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Carolyn Wyborny6d337dc2011-07-07 00:24:56 +00002026 else if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00002027 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2028 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2029 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002030 else if (hw->bus.func == 1)
2031 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08002032
2033 if (eeprom_data & eeprom_apme_mask)
2034 adapter->eeprom_wol |= E1000_WUFC_MAG;
2035
2036 /* now that we have the eeprom settings, apply the special cases where
2037 * the eeprom may be wrong or the board simply won't support wake on
2038 * lan on a particular port */
2039 switch (pdev->device) {
2040 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2041 adapter->eeprom_wol = 0;
2042 break;
2043 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07002044 case E1000_DEV_ID_82576_FIBER:
2045 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08002046 /* Wake events only supported on port A for dual fiber
2047 * regardless of eeprom setting */
2048 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2049 adapter->eeprom_wol = 0;
2050 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002051 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00002052 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002053 /* if quad port adapter, disable WoL on all but port A */
2054 if (global_quad_port_a != 0)
2055 adapter->eeprom_wol = 0;
2056 else
2057 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2058 /* Reset for multiple quad port adapters */
2059 if (++global_quad_port_a == 4)
2060 global_quad_port_a = 0;
2061 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08002062 }
2063
2064 /* initialize the wol settings based on the eeprom settings */
2065 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00002066 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08002067
2068 /* reset the hardware with the new settings */
2069 igb_reset(adapter);
2070
2071 /* let the f/w know that the h/w is now under the control of the
2072 * driver. */
2073 igb_get_hw_control(adapter);
2074
Auke Kok9d5c8242008-01-24 02:22:38 -08002075 strcpy(netdev->name, "eth%d");
2076 err = register_netdev(netdev);
2077 if (err)
2078 goto err_register;
2079
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002080 /* carrier off reporting is important to ethtool even BEFORE open */
2081 netif_carrier_off(netdev);
2082
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002083#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08002084 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002085 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002086 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002087 igb_setup_dca(adapter);
2088 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00002089
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002090#endif
Anders Berggren673b8b72011-02-04 07:32:32 +00002091 /* do hw tstamp init after resetting */
2092 igb_init_hw_timer(adapter);
2093
Auke Kok9d5c8242008-01-24 02:22:38 -08002094 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2095 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07002096 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08002097 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00002098 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
Alexander Duyckff846f52010-04-27 01:02:40 +00002099 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
Alexander Duyck559e9c42009-10-27 23:52:50 +00002100 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00002101 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2102 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2103 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2104 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07002105 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08002106
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002107 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2108 if (ret_val)
2109 strcpy(part_str, "Unknown");
2110 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
Auke Kok9d5c8242008-01-24 02:22:38 -08002111 dev_info(&pdev->dev,
2112 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2113 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002114 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08002115 adapter->num_rx_queues, adapter->num_tx_queues);
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002116 switch (hw->mac.type) {
2117 case e1000_i350:
2118 igb_set_eee_i350(hw);
2119 break;
2120 default:
2121 break;
2122 }
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002123
2124 pm_runtime_put_noidle(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002125 return 0;
2126
2127err_register:
2128 igb_release_hw_control(adapter);
2129err_eeprom:
2130 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002131 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002132
2133 if (hw->flash_address)
2134 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08002135err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00002136 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002137 iounmap(hw->hw_addr);
2138err_ioremap:
2139 free_netdev(netdev);
2140err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00002141 pci_release_selected_regions(pdev,
2142 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002143err_pci_reg:
2144err_dma:
2145 pci_disable_device(pdev);
2146 return err;
2147}
2148
2149/**
2150 * igb_remove - Device Removal Routine
2151 * @pdev: PCI device information struct
2152 *
2153 * igb_remove is called by the PCI subsystem to alert the driver
2154 * that it should release a PCI device. The could be caused by a
2155 * Hot-Plug event, or because the driver is going to be removed from
2156 * memory.
2157 **/
2158static void __devexit igb_remove(struct pci_dev *pdev)
2159{
2160 struct net_device *netdev = pci_get_drvdata(pdev);
2161 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002162 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002163
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002164 pm_runtime_get_noresume(&pdev->dev);
2165
Tejun Heo760141a2010-12-12 16:45:14 +01002166 /*
2167 * The watchdog timer may be rescheduled, so explicitly
2168 * disable watchdog from being rescheduled.
2169 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002170 set_bit(__IGB_DOWN, &adapter->state);
2171 del_timer_sync(&adapter->watchdog_timer);
2172 del_timer_sync(&adapter->phy_info_timer);
2173
Tejun Heo760141a2010-12-12 16:45:14 +01002174 cancel_work_sync(&adapter->reset_task);
2175 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002176
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002177#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002178 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002179 dev_info(&pdev->dev, "DCA disabled\n");
2180 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002181 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08002182 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002183 }
2184#endif
2185
Auke Kok9d5c8242008-01-24 02:22:38 -08002186 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2187 * would have already happened in close and is redundant. */
2188 igb_release_hw_control(adapter);
2189
2190 unregister_netdev(netdev);
2191
Alexander Duyck047e0032009-10-27 15:49:27 +00002192 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002193
Alexander Duyck37680112009-02-19 20:40:30 -08002194#ifdef CONFIG_PCI_IOV
2195 /* reclaim resources allocated to VFs */
2196 if (adapter->vf_data) {
2197 /* disable iov and allow time for transactions to clear */
Greg Rose0224d662011-10-14 02:57:14 +00002198 if (!igb_check_vf_assignment(adapter)) {
2199 pci_disable_sriov(pdev);
2200 msleep(500);
2201 } else {
2202 dev_info(&pdev->dev, "VF(s) assigned to guests!\n");
2203 }
Alexander Duyck37680112009-02-19 20:40:30 -08002204
2205 kfree(adapter->vf_data);
2206 adapter->vf_data = NULL;
2207 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00002208 wrfl();
Alexander Duyck37680112009-02-19 20:40:30 -08002209 msleep(100);
2210 dev_info(&pdev->dev, "IOV Disabled\n");
2211 }
2212#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00002213
Alexander Duyck28b07592009-02-06 23:20:31 +00002214 iounmap(hw->hw_addr);
2215 if (hw->flash_address)
2216 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00002217 pci_release_selected_regions(pdev,
2218 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002219
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002220 kfree(adapter->shadow_vfta);
Auke Kok9d5c8242008-01-24 02:22:38 -08002221 free_netdev(netdev);
2222
Frans Pop19d5afd2009-10-02 10:04:12 -07002223 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002224
Auke Kok9d5c8242008-01-24 02:22:38 -08002225 pci_disable_device(pdev);
2226}
2227
2228/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00002229 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2230 * @adapter: board private structure to initialize
2231 *
2232 * This function initializes the vf specific data storage and then attempts to
2233 * allocate the VFs. The reason for ordering it this way is because it is much
2234 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2235 * the memory for the VFs.
2236 **/
2237static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2238{
2239#ifdef CONFIG_PCI_IOV
2240 struct pci_dev *pdev = adapter->pdev;
Greg Rose0224d662011-10-14 02:57:14 +00002241 int old_vfs = igb_find_enabled_vfs(adapter);
2242 int i;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002243
Greg Rose0224d662011-10-14 02:57:14 +00002244 if (old_vfs) {
2245 dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
2246 "max_vfs setting of %d\n", old_vfs, max_vfs);
2247 adapter->vfs_allocated_count = old_vfs;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002248 }
2249
Greg Rose0224d662011-10-14 02:57:14 +00002250 if (!adapter->vfs_allocated_count)
2251 return;
2252
2253 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2254 sizeof(struct vf_data_storage), GFP_KERNEL);
2255 /* if allocation failed then we do not support SR-IOV */
2256 if (!adapter->vf_data) {
Alexander Duycka6b623e2009-10-27 23:47:53 +00002257 adapter->vfs_allocated_count = 0;
Greg Rose0224d662011-10-14 02:57:14 +00002258 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2259 "Data Storage\n");
2260 goto out;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002261 }
Greg Rose0224d662011-10-14 02:57:14 +00002262
2263 if (!old_vfs) {
2264 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count))
2265 goto err_out;
2266 }
2267 dev_info(&pdev->dev, "%d VFs allocated\n",
2268 adapter->vfs_allocated_count);
2269 for (i = 0; i < adapter->vfs_allocated_count; i++)
2270 igb_vf_configure(adapter, i);
2271
2272 /* DMA Coalescing is not supported in IOV mode. */
2273 adapter->flags &= ~IGB_FLAG_DMAC;
2274 goto out;
2275err_out:
2276 kfree(adapter->vf_data);
2277 adapter->vf_data = NULL;
2278 adapter->vfs_allocated_count = 0;
2279out:
2280 return;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002281#endif /* CONFIG_PCI_IOV */
2282}
2283
Alexander Duyck115f4592009-11-12 18:37:00 +00002284/**
2285 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
2286 * @adapter: board private structure to initialize
2287 *
2288 * igb_init_hw_timer initializes the function pointer and values for the hw
2289 * timer found in hardware.
2290 **/
2291static void igb_init_hw_timer(struct igb_adapter *adapter)
2292{
2293 struct e1000_hw *hw = &adapter->hw;
2294
2295 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002296 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002297 case e1000_82580:
2298 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2299 adapter->cycles.read = igb_read_clock;
2300 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2301 adapter->cycles.mult = 1;
2302 /*
2303 * The 82580 timesync updates the system timer every 8ns by 8ns
2304 * and the value cannot be shifted. Instead we need to shift
2305 * the registers to generate a 64bit timer value. As a result
2306 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
2307 * 24 in order to generate a larger value for synchronization.
2308 */
2309 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
2310 /* disable system timer temporarily by setting bit 31 */
2311 wr32(E1000_TSAUXC, 0x80000000);
2312 wrfl();
2313
2314 /* Set registers so that rollover occurs soon to test this. */
2315 wr32(E1000_SYSTIMR, 0x00000000);
2316 wr32(E1000_SYSTIML, 0x80000000);
2317 wr32(E1000_SYSTIMH, 0x000000FF);
2318 wrfl();
2319
2320 /* enable system timer by clearing bit 31 */
2321 wr32(E1000_TSAUXC, 0x0);
2322 wrfl();
2323
2324 timecounter_init(&adapter->clock,
2325 &adapter->cycles,
2326 ktime_to_ns(ktime_get_real()));
2327 /*
2328 * Synchronize our NIC clock against system wall clock. NIC
2329 * time stamp reading requires ~3us per sample, each sample
2330 * was pretty stable even under load => only require 10
2331 * samples for each offset comparison.
2332 */
2333 memset(&adapter->compare, 0, sizeof(adapter->compare));
2334 adapter->compare.source = &adapter->clock;
2335 adapter->compare.target = ktime_get_real;
2336 adapter->compare.num_samples = 10;
2337 timecompare_update(&adapter->compare, 0);
2338 break;
Alexander Duyck115f4592009-11-12 18:37:00 +00002339 case e1000_82576:
2340 /*
2341 * Initialize hardware timer: we keep it running just in case
2342 * that some program needs it later on.
2343 */
2344 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2345 adapter->cycles.read = igb_read_clock;
2346 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2347 adapter->cycles.mult = 1;
2348 /**
2349 * Scale the NIC clock cycle by a large factor so that
2350 * relatively small clock corrections can be added or
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002351 * subtracted at each clock tick. The drawbacks of a large
Alexander Duyck115f4592009-11-12 18:37:00 +00002352 * factor are a) that the clock register overflows more quickly
2353 * (not such a big deal) and b) that the increment per tick has
2354 * to fit into 24 bits. As a result we need to use a shift of
2355 * 19 so we can fit a value of 16 into the TIMINCA register.
2356 */
2357 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
2358 wr32(E1000_TIMINCA,
2359 (1 << E1000_TIMINCA_16NS_SHIFT) |
2360 (16 << IGB_82576_TSYNC_SHIFT));
2361
2362 /* Set registers so that rollover occurs soon to test this. */
2363 wr32(E1000_SYSTIML, 0x00000000);
2364 wr32(E1000_SYSTIMH, 0xFF800000);
2365 wrfl();
2366
2367 timecounter_init(&adapter->clock,
2368 &adapter->cycles,
2369 ktime_to_ns(ktime_get_real()));
2370 /*
2371 * Synchronize our NIC clock against system wall clock. NIC
2372 * time stamp reading requires ~3us per sample, each sample
2373 * was pretty stable even under load => only require 10
2374 * samples for each offset comparison.
2375 */
2376 memset(&adapter->compare, 0, sizeof(adapter->compare));
2377 adapter->compare.source = &adapter->clock;
2378 adapter->compare.target = ktime_get_real;
2379 adapter->compare.num_samples = 10;
2380 timecompare_update(&adapter->compare, 0);
2381 break;
2382 case e1000_82575:
2383 /* 82575 does not support timesync */
2384 default:
2385 break;
2386 }
2387
2388}
2389
Alexander Duycka6b623e2009-10-27 23:47:53 +00002390/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002391 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2392 * @adapter: board private structure to initialize
2393 *
2394 * igb_sw_init initializes the Adapter private data structure.
2395 * Fields are initialized based on PCI device information and
2396 * OS network device settings (MTU size).
2397 **/
2398static int __devinit igb_sw_init(struct igb_adapter *adapter)
2399{
2400 struct e1000_hw *hw = &adapter->hw;
2401 struct net_device *netdev = adapter->netdev;
2402 struct pci_dev *pdev = adapter->pdev;
2403
2404 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2405
Alexander Duyck13fde972011-10-05 13:35:24 +00002406 /* set default ring sizes */
Alexander Duyck68fd9912008-11-20 00:48:10 -08002407 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2408 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck13fde972011-10-05 13:35:24 +00002409
2410 /* set default ITR values */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00002411 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2412 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2413
Alexander Duyck13fde972011-10-05 13:35:24 +00002414 /* set default work limits */
2415 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2416
Alexander Duyck153285f2011-08-26 07:43:32 +00002417 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2418 VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08002419 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2420
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002421 adapter->node = -1;
2422
Eric Dumazet12dcd862010-10-15 17:27:10 +00002423 spin_lock_init(&adapter->stats64_lock);
Alexander Duycka6b623e2009-10-27 23:47:53 +00002424#ifdef CONFIG_PCI_IOV
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002425 switch (hw->mac.type) {
2426 case e1000_82576:
2427 case e1000_i350:
Stefan Assmann9b082d72011-02-24 20:03:31 +00002428 if (max_vfs > 7) {
2429 dev_warn(&pdev->dev,
2430 "Maximum of 7 VFs per PF, using max\n");
2431 adapter->vfs_allocated_count = 7;
2432 } else
2433 adapter->vfs_allocated_count = max_vfs;
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002434 break;
2435 default:
2436 break;
2437 }
Alexander Duycka6b623e2009-10-27 23:47:53 +00002438#endif /* CONFIG_PCI_IOV */
Alexander Duycka99955f2009-11-12 18:37:19 +00002439 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
Williams, Mitch A665c8c82011-06-07 14:22:57 -07002440 /* i350 cannot do RSS and SR-IOV at the same time */
2441 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
2442 adapter->rss_queues = 1;
Alexander Duycka99955f2009-11-12 18:37:19 +00002443
2444 /*
2445 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2446 * then we should combine the queues into a queue pair in order to
2447 * conserve interrupts due to limited supply
2448 */
2449 if ((adapter->rss_queues > 4) ||
2450 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
2451 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2452
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002453 /* Setup and initialize a copy of the hw vlan table array */
2454 adapter->shadow_vfta = kzalloc(sizeof(u32) *
2455 E1000_VLAN_FILTER_TBL_SIZE,
2456 GFP_ATOMIC);
2457
Alexander Duycka6b623e2009-10-27 23:47:53 +00002458 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00002459 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002460 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2461 return -ENOMEM;
2462 }
2463
Alexander Duycka6b623e2009-10-27 23:47:53 +00002464 igb_probe_vfs(adapter);
2465
Auke Kok9d5c8242008-01-24 02:22:38 -08002466 /* Explicitly disable IRQ since the NIC can be in any state. */
2467 igb_irq_disable(adapter);
2468
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002469 if (hw->mac.type == e1000_i350)
2470 adapter->flags &= ~IGB_FLAG_DMAC;
2471
Auke Kok9d5c8242008-01-24 02:22:38 -08002472 set_bit(__IGB_DOWN, &adapter->state);
2473 return 0;
2474}
2475
2476/**
2477 * igb_open - Called when a network interface is made active
2478 * @netdev: network interface device structure
2479 *
2480 * Returns 0 on success, negative value on failure
2481 *
2482 * The open entry point is called when a network interface is made
2483 * active by the system (IFF_UP). At this point all resources needed
2484 * for transmit and receive operations are allocated, the interrupt
2485 * handler is registered with the OS, the watchdog timer is started,
2486 * and the stack is notified that the interface is ready.
2487 **/
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002488static int __igb_open(struct net_device *netdev, bool resuming)
Auke Kok9d5c8242008-01-24 02:22:38 -08002489{
2490 struct igb_adapter *adapter = netdev_priv(netdev);
2491 struct e1000_hw *hw = &adapter->hw;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002492 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002493 int err;
2494 int i;
2495
2496 /* disallow open during test */
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002497 if (test_bit(__IGB_TESTING, &adapter->state)) {
2498 WARN_ON(resuming);
Auke Kok9d5c8242008-01-24 02:22:38 -08002499 return -EBUSY;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002500 }
2501
2502 if (!resuming)
2503 pm_runtime_get_sync(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002504
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002505 netif_carrier_off(netdev);
2506
Auke Kok9d5c8242008-01-24 02:22:38 -08002507 /* allocate transmit descriptors */
2508 err = igb_setup_all_tx_resources(adapter);
2509 if (err)
2510 goto err_setup_tx;
2511
2512 /* allocate receive descriptors */
2513 err = igb_setup_all_rx_resources(adapter);
2514 if (err)
2515 goto err_setup_rx;
2516
Nick Nunley88a268c2010-02-17 01:01:59 +00002517 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002518
Auke Kok9d5c8242008-01-24 02:22:38 -08002519 /* before we allocate an interrupt, we must be ready to handle it.
2520 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2521 * as soon as we call pci_request_irq, so we have to setup our
2522 * clean_rx handler before we do so. */
2523 igb_configure(adapter);
2524
2525 err = igb_request_irq(adapter);
2526 if (err)
2527 goto err_req_irq;
2528
2529 /* From here on the code is the same as igb_up() */
2530 clear_bit(__IGB_DOWN, &adapter->state);
2531
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00002532 for (i = 0; i < adapter->num_q_vectors; i++)
2533 napi_enable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08002534
2535 /* Clear any pending interrupts. */
2536 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002537
2538 igb_irq_enable(adapter);
2539
Alexander Duyckd4960302009-10-27 15:53:45 +00002540 /* notify VFs that reset has been completed */
2541 if (adapter->vfs_allocated_count) {
2542 u32 reg_data = rd32(E1000_CTRL_EXT);
2543 reg_data |= E1000_CTRL_EXT_PFRSTD;
2544 wr32(E1000_CTRL_EXT, reg_data);
2545 }
2546
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002547 netif_tx_start_all_queues(netdev);
2548
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002549 if (!resuming)
2550 pm_runtime_put(&pdev->dev);
2551
Alexander Duyck25568a52009-10-27 23:49:59 +00002552 /* start the watchdog. */
2553 hw->mac.get_link_status = 1;
2554 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002555
2556 return 0;
2557
2558err_req_irq:
2559 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002560 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002561 igb_free_all_rx_resources(adapter);
2562err_setup_rx:
2563 igb_free_all_tx_resources(adapter);
2564err_setup_tx:
2565 igb_reset(adapter);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002566 if (!resuming)
2567 pm_runtime_put(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002568
2569 return err;
2570}
2571
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002572static int igb_open(struct net_device *netdev)
2573{
2574 return __igb_open(netdev, false);
2575}
2576
Auke Kok9d5c8242008-01-24 02:22:38 -08002577/**
2578 * igb_close - Disables a network interface
2579 * @netdev: network interface device structure
2580 *
2581 * Returns 0, this is not allowed to fail
2582 *
2583 * The close entry point is called when an interface is de-activated
2584 * by the OS. The hardware is still under the driver's control, but
2585 * needs to be disabled. A global MAC reset is issued to stop the
2586 * hardware, and all transmit and receive resources are freed.
2587 **/
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002588static int __igb_close(struct net_device *netdev, bool suspending)
Auke Kok9d5c8242008-01-24 02:22:38 -08002589{
2590 struct igb_adapter *adapter = netdev_priv(netdev);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002591 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002592
2593 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
Auke Kok9d5c8242008-01-24 02:22:38 -08002594
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002595 if (!suspending)
2596 pm_runtime_get_sync(&pdev->dev);
2597
2598 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002599 igb_free_irq(adapter);
2600
2601 igb_free_all_tx_resources(adapter);
2602 igb_free_all_rx_resources(adapter);
2603
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002604 if (!suspending)
2605 pm_runtime_put_sync(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002606 return 0;
2607}
2608
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002609static int igb_close(struct net_device *netdev)
2610{
2611 return __igb_close(netdev, false);
2612}
2613
Auke Kok9d5c8242008-01-24 02:22:38 -08002614/**
2615 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002616 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2617 *
2618 * Return 0 on success, negative on failure
2619 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002620int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002621{
Alexander Duyck59d71982010-04-27 13:09:25 +00002622 struct device *dev = tx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002623 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002624 int size;
2625
Alexander Duyck06034642011-08-26 07:44:22 +00002626 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002627 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
2628 if (!tx_ring->tx_buffer_info)
2629 tx_ring->tx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002630 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002631 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002632
2633 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002634 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002635 tx_ring->size = ALIGN(tx_ring->size, 4096);
2636
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002637 set_dev_node(dev, tx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002638 tx_ring->desc = dma_alloc_coherent(dev,
2639 tx_ring->size,
2640 &tx_ring->dma,
2641 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002642 set_dev_node(dev, orig_node);
2643 if (!tx_ring->desc)
2644 tx_ring->desc = dma_alloc_coherent(dev,
2645 tx_ring->size,
2646 &tx_ring->dma,
2647 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002648
2649 if (!tx_ring->desc)
2650 goto err;
2651
Auke Kok9d5c8242008-01-24 02:22:38 -08002652 tx_ring->next_to_use = 0;
2653 tx_ring->next_to_clean = 0;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002654
Auke Kok9d5c8242008-01-24 02:22:38 -08002655 return 0;
2656
2657err:
Alexander Duyck06034642011-08-26 07:44:22 +00002658 vfree(tx_ring->tx_buffer_info);
Alexander Duyck59d71982010-04-27 13:09:25 +00002659 dev_err(dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002660 "Unable to allocate memory for the transmit descriptor ring\n");
2661 return -ENOMEM;
2662}
2663
2664/**
2665 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2666 * (Descriptors) for all queues
2667 * @adapter: board private structure
2668 *
2669 * Return 0 on success, negative on failure
2670 **/
2671static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2672{
Alexander Duyck439705e2009-10-27 23:49:20 +00002673 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002674 int i, err = 0;
2675
2676 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002677 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002678 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002679 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002680 "Allocation for Tx Queue %u failed\n", i);
2681 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002682 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002683 break;
2684 }
2685 }
2686
2687 return err;
2688}
2689
2690/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002691 * igb_setup_tctl - configure the transmit control registers
2692 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002693 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002694void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002695{
Auke Kok9d5c8242008-01-24 02:22:38 -08002696 struct e1000_hw *hw = &adapter->hw;
2697 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002698
Alexander Duyck85b430b2009-10-27 15:50:29 +00002699 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2700 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002701
2702 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002703 tctl = rd32(E1000_TCTL);
2704 tctl &= ~E1000_TCTL_CT;
2705 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2706 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2707
2708 igb_config_collision_dist(hw);
2709
Auke Kok9d5c8242008-01-24 02:22:38 -08002710 /* Enable transmits */
2711 tctl |= E1000_TCTL_EN;
2712
2713 wr32(E1000_TCTL, tctl);
2714}
2715
2716/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002717 * igb_configure_tx_ring - Configure transmit ring after Reset
2718 * @adapter: board private structure
2719 * @ring: tx ring to configure
2720 *
2721 * Configure a transmit ring after a reset.
2722 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002723void igb_configure_tx_ring(struct igb_adapter *adapter,
2724 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002725{
2726 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka74420e2011-08-26 07:43:27 +00002727 u32 txdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002728 u64 tdba = ring->dma;
2729 int reg_idx = ring->reg_idx;
2730
2731 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00002732 wr32(E1000_TXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002733 wrfl();
2734 mdelay(10);
2735
2736 wr32(E1000_TDLEN(reg_idx),
2737 ring->count * sizeof(union e1000_adv_tx_desc));
2738 wr32(E1000_TDBAL(reg_idx),
2739 tdba & 0x00000000ffffffffULL);
2740 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2741
Alexander Duyckfce99e32009-10-27 15:51:27 +00002742 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00002743 wr32(E1000_TDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00002744 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002745
2746 txdctl |= IGB_TX_PTHRESH;
2747 txdctl |= IGB_TX_HTHRESH << 8;
2748 txdctl |= IGB_TX_WTHRESH << 16;
2749
2750 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2751 wr32(E1000_TXDCTL(reg_idx), txdctl);
2752}
2753
2754/**
2755 * igb_configure_tx - Configure transmit Unit after Reset
2756 * @adapter: board private structure
2757 *
2758 * Configure the Tx unit of the MAC after a reset.
2759 **/
2760static void igb_configure_tx(struct igb_adapter *adapter)
2761{
2762 int i;
2763
2764 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002765 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002766}
2767
2768/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002769 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002770 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2771 *
2772 * Returns 0 on success, negative on failure
2773 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002774int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002775{
Alexander Duyck59d71982010-04-27 13:09:25 +00002776 struct device *dev = rx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002777 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002778 int size, desc_len;
2779
Alexander Duyck06034642011-08-26 07:44:22 +00002780 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002781 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
2782 if (!rx_ring->rx_buffer_info)
2783 rx_ring->rx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002784 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002785 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002786
2787 desc_len = sizeof(union e1000_adv_rx_desc);
2788
2789 /* Round up to nearest 4K */
2790 rx_ring->size = rx_ring->count * desc_len;
2791 rx_ring->size = ALIGN(rx_ring->size, 4096);
2792
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002793 set_dev_node(dev, rx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002794 rx_ring->desc = dma_alloc_coherent(dev,
2795 rx_ring->size,
2796 &rx_ring->dma,
2797 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002798 set_dev_node(dev, orig_node);
2799 if (!rx_ring->desc)
2800 rx_ring->desc = dma_alloc_coherent(dev,
2801 rx_ring->size,
2802 &rx_ring->dma,
2803 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002804
2805 if (!rx_ring->desc)
2806 goto err;
2807
2808 rx_ring->next_to_clean = 0;
2809 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002810
Auke Kok9d5c8242008-01-24 02:22:38 -08002811 return 0;
2812
2813err:
Alexander Duyck06034642011-08-26 07:44:22 +00002814 vfree(rx_ring->rx_buffer_info);
2815 rx_ring->rx_buffer_info = NULL;
Alexander Duyck59d71982010-04-27 13:09:25 +00002816 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2817 " ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002818 return -ENOMEM;
2819}
2820
2821/**
2822 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2823 * (Descriptors) for all queues
2824 * @adapter: board private structure
2825 *
2826 * Return 0 on success, negative on failure
2827 **/
2828static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2829{
Alexander Duyck439705e2009-10-27 23:49:20 +00002830 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002831 int i, err = 0;
2832
2833 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002834 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002835 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002836 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002837 "Allocation for Rx Queue %u failed\n", i);
2838 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002839 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002840 break;
2841 }
2842 }
2843
2844 return err;
2845}
2846
2847/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002848 * igb_setup_mrqc - configure the multiple receive queue control registers
2849 * @adapter: Board private structure
2850 **/
2851static void igb_setup_mrqc(struct igb_adapter *adapter)
2852{
2853 struct e1000_hw *hw = &adapter->hw;
2854 u32 mrqc, rxcsum;
2855 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2856 union e1000_reta {
2857 u32 dword;
2858 u8 bytes[4];
2859 } reta;
2860 static const u8 rsshash[40] = {
2861 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2862 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2863 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2864 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2865
2866 /* Fill out hash function seeds */
2867 for (j = 0; j < 10; j++) {
2868 u32 rsskey = rsshash[(j * 4)];
2869 rsskey |= rsshash[(j * 4) + 1] << 8;
2870 rsskey |= rsshash[(j * 4) + 2] << 16;
2871 rsskey |= rsshash[(j * 4) + 3] << 24;
2872 array_wr32(E1000_RSSRK(0), j, rsskey);
2873 }
2874
Alexander Duycka99955f2009-11-12 18:37:19 +00002875 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002876
2877 if (adapter->vfs_allocated_count) {
2878 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2879 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002880 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002881 case e1000_82580:
2882 num_rx_queues = 1;
2883 shift = 0;
2884 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002885 case e1000_82576:
2886 shift = 3;
2887 num_rx_queues = 2;
2888 break;
2889 case e1000_82575:
2890 shift = 2;
2891 shift2 = 6;
2892 default:
2893 break;
2894 }
2895 } else {
2896 if (hw->mac.type == e1000_82575)
2897 shift = 6;
2898 }
2899
2900 for (j = 0; j < (32 * 4); j++) {
2901 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2902 if (shift2)
2903 reta.bytes[j & 3] |= num_rx_queues << shift2;
2904 if ((j & 3) == 3)
2905 wr32(E1000_RETA(j >> 2), reta.dword);
2906 }
2907
2908 /*
2909 * Disable raw packet checksumming so that RSS hash is placed in
2910 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2911 * offloads as they are enabled by default
2912 */
2913 rxcsum = rd32(E1000_RXCSUM);
2914 rxcsum |= E1000_RXCSUM_PCSD;
2915
2916 if (adapter->hw.mac.type >= e1000_82576)
2917 /* Enable Receive Checksum Offload for SCTP */
2918 rxcsum |= E1000_RXCSUM_CRCOFL;
2919
2920 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2921 wr32(E1000_RXCSUM, rxcsum);
2922
2923 /* If VMDq is enabled then we set the appropriate mode for that, else
2924 * we default to RSS so that an RSS hash is calculated per packet even
2925 * if we are only using one queue */
2926 if (adapter->vfs_allocated_count) {
2927 if (hw->mac.type > e1000_82575) {
2928 /* Set the default pool for the PF's first queue */
2929 u32 vtctl = rd32(E1000_VT_CTL);
2930 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2931 E1000_VT_CTL_DISABLE_DEF_POOL);
2932 vtctl |= adapter->vfs_allocated_count <<
2933 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2934 wr32(E1000_VT_CTL, vtctl);
2935 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002936 if (adapter->rss_queues > 1)
Alexander Duyck06cf2662009-10-27 15:53:25 +00002937 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2938 else
2939 mrqc = E1000_MRQC_ENABLE_VMDQ;
2940 } else {
2941 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2942 }
2943 igb_vmm_control(adapter);
2944
Alexander Duyck4478a9c2010-07-01 20:01:05 +00002945 /*
2946 * Generate RSS hash based on TCP port numbers and/or
2947 * IPv4/v6 src and dst addresses since UDP cannot be
2948 * hashed reliably due to IP fragmentation
2949 */
2950 mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2951 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2952 E1000_MRQC_RSS_FIELD_IPV6 |
2953 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2954 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002955
2956 wr32(E1000_MRQC, mrqc);
2957}
2958
2959/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002960 * igb_setup_rctl - configure the receive control registers
2961 * @adapter: Board private structure
2962 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002963void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002964{
2965 struct e1000_hw *hw = &adapter->hw;
2966 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002967
2968 rctl = rd32(E1000_RCTL);
2969
2970 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002971 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002972
Alexander Duyck69d728b2008-11-25 01:04:03 -08002973 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002974 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002975
Auke Kok87cb7e82008-07-08 15:08:29 -07002976 /*
2977 * enable stripping of CRC. It's unlikely this will break BMC
2978 * redirection as it did with e1000. Newer features require
2979 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002980 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002981 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002982
Alexander Duyck559e9c42009-10-27 23:52:50 +00002983 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002984 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002985
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002986 /* enable LPE to prevent packets larger than max_frame_size */
2987 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002988
Alexander Duyck952f72a2009-10-27 15:51:07 +00002989 /* disable queue 0 to prevent tail write w/o re-config */
2990 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002991
Alexander Duycke1739522009-02-19 20:39:44 -08002992 /* Attention!!! For SR-IOV PF driver operations you must enable
2993 * queue drop for all VF and PF queues to prevent head of line blocking
2994 * if an un-trusted VF does not provide descriptors to hardware.
2995 */
2996 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08002997 /* set all queue drop enable bits */
2998 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08002999 }
3000
Auke Kok9d5c8242008-01-24 02:22:38 -08003001 wr32(E1000_RCTL, rctl);
3002}
3003
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003004static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3005 int vfn)
3006{
3007 struct e1000_hw *hw = &adapter->hw;
3008 u32 vmolr;
3009
3010 /* if it isn't the PF check to see if VFs are enabled and
3011 * increase the size to support vlan tags */
3012 if (vfn < adapter->vfs_allocated_count &&
3013 adapter->vf_data[vfn].vlans_enabled)
3014 size += VLAN_TAG_SIZE;
3015
3016 vmolr = rd32(E1000_VMOLR(vfn));
3017 vmolr &= ~E1000_VMOLR_RLPML_MASK;
3018 vmolr |= size | E1000_VMOLR_LPE;
3019 wr32(E1000_VMOLR(vfn), vmolr);
3020
3021 return 0;
3022}
3023
Auke Kok9d5c8242008-01-24 02:22:38 -08003024/**
Alexander Duycke1739522009-02-19 20:39:44 -08003025 * igb_rlpml_set - set maximum receive packet size
3026 * @adapter: board private structure
3027 *
3028 * Configure maximum receivable packet size.
3029 **/
3030static void igb_rlpml_set(struct igb_adapter *adapter)
3031{
Alexander Duyck153285f2011-08-26 07:43:32 +00003032 u32 max_frame_size = adapter->max_frame_size;
Alexander Duycke1739522009-02-19 20:39:44 -08003033 struct e1000_hw *hw = &adapter->hw;
3034 u16 pf_id = adapter->vfs_allocated_count;
3035
Alexander Duycke1739522009-02-19 20:39:44 -08003036 if (pf_id) {
3037 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck153285f2011-08-26 07:43:32 +00003038 /*
3039 * If we're in VMDQ or SR-IOV mode, then set global RLPML
3040 * to our max jumbo frame size, in case we need to enable
3041 * jumbo frames on one of the rings later.
3042 * This will not pass over-length frames into the default
3043 * queue because it's gated by the VMOLR.RLPML.
3044 */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003045 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08003046 }
3047
3048 wr32(E1000_RLPML, max_frame_size);
3049}
3050
Williams, Mitch A8151d292010-02-10 01:44:24 +00003051static inline void igb_set_vmolr(struct igb_adapter *adapter,
3052 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003053{
3054 struct e1000_hw *hw = &adapter->hw;
3055 u32 vmolr;
3056
3057 /*
3058 * This register exists only on 82576 and newer so if we are older then
3059 * we should exit and do nothing
3060 */
3061 if (hw->mac.type < e1000_82576)
3062 return;
3063
3064 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00003065 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3066 if (aupe)
3067 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3068 else
3069 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003070
3071 /* clear all bits that might not be set */
3072 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3073
Alexander Duycka99955f2009-11-12 18:37:19 +00003074 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003075 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3076 /*
3077 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3078 * multicast packets
3079 */
3080 if (vfn <= adapter->vfs_allocated_count)
3081 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3082
3083 wr32(E1000_VMOLR(vfn), vmolr);
3084}
3085
Alexander Duycke1739522009-02-19 20:39:44 -08003086/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00003087 * igb_configure_rx_ring - Configure a receive ring after Reset
3088 * @adapter: board private structure
3089 * @ring: receive ring to be configured
3090 *
3091 * Configure the Rx unit of the MAC after a reset.
3092 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003093void igb_configure_rx_ring(struct igb_adapter *adapter,
3094 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00003095{
3096 struct e1000_hw *hw = &adapter->hw;
3097 u64 rdba = ring->dma;
3098 int reg_idx = ring->reg_idx;
Alexander Duycka74420e2011-08-26 07:43:27 +00003099 u32 srrctl = 0, rxdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003100
3101 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00003102 wr32(E1000_RXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003103
3104 /* Set DMA base address registers */
3105 wr32(E1000_RDBAL(reg_idx),
3106 rdba & 0x00000000ffffffffULL);
3107 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3108 wr32(E1000_RDLEN(reg_idx),
3109 ring->count * sizeof(union e1000_adv_rx_desc));
3110
3111 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00003112 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00003113 wr32(E1000_RDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00003114 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003115
Alexander Duyck952f72a2009-10-27 15:51:07 +00003116 /* set descriptor configuration */
Alexander Duyck44390ca2011-08-26 07:43:38 +00003117 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003118#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
Alexander Duyck44390ca2011-08-26 07:43:38 +00003119 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003120#else
Alexander Duyck44390ca2011-08-26 07:43:38 +00003121 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003122#endif
Alexander Duyck44390ca2011-08-26 07:43:38 +00003123 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
Alexander Duyck06218a82011-08-26 07:46:55 +00003124 if (hw->mac.type >= e1000_82580)
Nick Nunley757b77e2010-03-26 11:36:47 +00003125 srrctl |= E1000_SRRCTL_TIMESTAMP;
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00003126 /* Only set Drop Enable if we are supporting multiple queues */
3127 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3128 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003129
3130 wr32(E1000_SRRCTL(reg_idx), srrctl);
3131
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003132 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00003133 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003134
Alexander Duyck85b430b2009-10-27 15:50:29 +00003135 rxdctl |= IGB_RX_PTHRESH;
3136 rxdctl |= IGB_RX_HTHRESH << 8;
3137 rxdctl |= IGB_RX_WTHRESH << 16;
Alexander Duycka74420e2011-08-26 07:43:27 +00003138
3139 /* enable receive descriptor fetching */
3140 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003141 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3142}
3143
3144/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003145 * igb_configure_rx - Configure receive Unit after Reset
3146 * @adapter: board private structure
3147 *
3148 * Configure the Rx unit of the MAC after a reset.
3149 **/
3150static void igb_configure_rx(struct igb_adapter *adapter)
3151{
Hannes Eder91075842009-02-18 19:36:04 -08003152 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003153
Alexander Duyck68d480c2009-10-05 06:33:08 +00003154 /* set UTA to appropriate mode */
3155 igb_set_uta(adapter);
3156
Alexander Duyck26ad9172009-10-05 06:32:49 +00003157 /* set the correct pool for the PF default MAC address in entry 0 */
3158 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3159 adapter->vfs_allocated_count);
3160
Alexander Duyck06cf2662009-10-27 15:53:25 +00003161 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3162 * the Base and Length of the Rx Descriptor Ring */
3163 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003164 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003165}
3166
3167/**
3168 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003169 * @tx_ring: Tx descriptor ring for a specific queue
3170 *
3171 * Free all transmit software resources
3172 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003173void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003174{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003175 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003176
Alexander Duyck06034642011-08-26 07:44:22 +00003177 vfree(tx_ring->tx_buffer_info);
3178 tx_ring->tx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003179
Alexander Duyck439705e2009-10-27 23:49:20 +00003180 /* if not set, then don't free */
3181 if (!tx_ring->desc)
3182 return;
3183
Alexander Duyck59d71982010-04-27 13:09:25 +00003184 dma_free_coherent(tx_ring->dev, tx_ring->size,
3185 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003186
3187 tx_ring->desc = NULL;
3188}
3189
3190/**
3191 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3192 * @adapter: board private structure
3193 *
3194 * Free all transmit software resources
3195 **/
3196static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3197{
3198 int i;
3199
3200 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003201 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003202}
3203
Alexander Duyckebe42d12011-08-26 07:45:09 +00003204void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3205 struct igb_tx_buffer *tx_buffer)
Auke Kok9d5c8242008-01-24 02:22:38 -08003206{
Alexander Duyckebe42d12011-08-26 07:45:09 +00003207 if (tx_buffer->skb) {
3208 dev_kfree_skb_any(tx_buffer->skb);
3209 if (tx_buffer->dma)
3210 dma_unmap_single(ring->dev,
3211 tx_buffer->dma,
3212 tx_buffer->length,
3213 DMA_TO_DEVICE);
3214 } else if (tx_buffer->dma) {
3215 dma_unmap_page(ring->dev,
3216 tx_buffer->dma,
3217 tx_buffer->length,
3218 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003219 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00003220 tx_buffer->next_to_watch = NULL;
3221 tx_buffer->skb = NULL;
3222 tx_buffer->dma = 0;
3223 /* buffer_info must be completely set up in the transmit path */
Auke Kok9d5c8242008-01-24 02:22:38 -08003224}
3225
3226/**
3227 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08003228 * @tx_ring: ring to be cleaned
3229 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003230static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003231{
Alexander Duyck06034642011-08-26 07:44:22 +00003232 struct igb_tx_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08003233 unsigned long size;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00003234 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003235
Alexander Duyck06034642011-08-26 07:44:22 +00003236 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003237 return;
3238 /* Free all the Tx ring sk_buffs */
3239
3240 for (i = 0; i < tx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003241 buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00003242 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08003243 }
Eric Dumazetbdbc0632012-01-04 20:23:36 +00003244 netdev_tx_reset_queue(txring_txq(tx_ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08003245
Alexander Duyck06034642011-08-26 07:44:22 +00003246 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3247 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003248
3249 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08003250 memset(tx_ring->desc, 0, tx_ring->size);
3251
3252 tx_ring->next_to_use = 0;
3253 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003254}
3255
3256/**
3257 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3258 * @adapter: board private structure
3259 **/
3260static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3261{
3262 int i;
3263
3264 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003265 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003266}
3267
3268/**
3269 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003270 * @rx_ring: ring to clean the resources from
3271 *
3272 * Free all receive software resources
3273 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003274void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003275{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003276 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003277
Alexander Duyck06034642011-08-26 07:44:22 +00003278 vfree(rx_ring->rx_buffer_info);
3279 rx_ring->rx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003280
Alexander Duyck439705e2009-10-27 23:49:20 +00003281 /* if not set, then don't free */
3282 if (!rx_ring->desc)
3283 return;
3284
Alexander Duyck59d71982010-04-27 13:09:25 +00003285 dma_free_coherent(rx_ring->dev, rx_ring->size,
3286 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003287
3288 rx_ring->desc = NULL;
3289}
3290
3291/**
3292 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3293 * @adapter: board private structure
3294 *
3295 * Free all receive software resources
3296 **/
3297static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3298{
3299 int i;
3300
3301 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003302 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003303}
3304
3305/**
3306 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003307 * @rx_ring: ring to free buffers from
3308 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003309static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003310{
Auke Kok9d5c8242008-01-24 02:22:38 -08003311 unsigned long size;
Alexander Duyckc023cd82011-08-26 07:43:43 +00003312 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003313
Alexander Duyck06034642011-08-26 07:44:22 +00003314 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003315 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00003316
Auke Kok9d5c8242008-01-24 02:22:38 -08003317 /* Free all the Rx ring sk_buffs */
3318 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003319 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08003320 if (buffer_info->dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003321 dma_unmap_single(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003322 buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00003323 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00003324 DMA_FROM_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08003325 buffer_info->dma = 0;
3326 }
3327
3328 if (buffer_info->skb) {
3329 dev_kfree_skb(buffer_info->skb);
3330 buffer_info->skb = NULL;
3331 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003332 if (buffer_info->page_dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003333 dma_unmap_page(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003334 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003335 PAGE_SIZE / 2,
Alexander Duyck59d71982010-04-27 13:09:25 +00003336 DMA_FROM_DEVICE);
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003337 buffer_info->page_dma = 0;
3338 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003339 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003340 put_page(buffer_info->page);
3341 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07003342 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003343 }
3344 }
3345
Alexander Duyck06034642011-08-26 07:44:22 +00003346 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3347 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003348
3349 /* Zero out the descriptor ring */
3350 memset(rx_ring->desc, 0, rx_ring->size);
3351
3352 rx_ring->next_to_clean = 0;
3353 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003354}
3355
3356/**
3357 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3358 * @adapter: board private structure
3359 **/
3360static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3361{
3362 int i;
3363
3364 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003365 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003366}
3367
3368/**
3369 * igb_set_mac - Change the Ethernet Address of the NIC
3370 * @netdev: network interface device structure
3371 * @p: pointer to an address structure
3372 *
3373 * Returns 0 on success, negative on failure
3374 **/
3375static int igb_set_mac(struct net_device *netdev, void *p)
3376{
3377 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00003378 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003379 struct sockaddr *addr = p;
3380
3381 if (!is_valid_ether_addr(addr->sa_data))
3382 return -EADDRNOTAVAIL;
3383
3384 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00003385 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08003386
Alexander Duyck26ad9172009-10-05 06:32:49 +00003387 /* set the correct pool for the new PF MAC address in entry 0 */
3388 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3389 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08003390
Auke Kok9d5c8242008-01-24 02:22:38 -08003391 return 0;
3392}
3393
3394/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00003395 * igb_write_mc_addr_list - write multicast addresses to MTA
3396 * @netdev: network interface device structure
3397 *
3398 * Writes multicast address list to the MTA hash table.
3399 * Returns: -ENOMEM on failure
3400 * 0 on no addresses written
3401 * X on writing X addresses to MTA
3402 **/
3403static int igb_write_mc_addr_list(struct net_device *netdev)
3404{
3405 struct igb_adapter *adapter = netdev_priv(netdev);
3406 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003407 struct netdev_hw_addr *ha;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003408 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003409 int i;
3410
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003411 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003412 /* nothing to program, so clear mc list */
3413 igb_update_mc_addr_list(hw, NULL, 0);
3414 igb_restore_vf_multicasts(adapter);
3415 return 0;
3416 }
3417
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003418 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003419 if (!mta_list)
3420 return -ENOMEM;
3421
Alexander Duyck68d480c2009-10-05 06:33:08 +00003422 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00003423 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003424 netdev_for_each_mc_addr(ha, netdev)
3425 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003426
Alexander Duyck68d480c2009-10-05 06:33:08 +00003427 igb_update_mc_addr_list(hw, mta_list, i);
3428 kfree(mta_list);
3429
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003430 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003431}
3432
3433/**
3434 * igb_write_uc_addr_list - write unicast addresses to RAR table
3435 * @netdev: network interface device structure
3436 *
3437 * Writes unicast address list to the RAR table.
3438 * Returns: -ENOMEM on failure/insufficient address space
3439 * 0 on no addresses written
3440 * X on writing X addresses to the RAR table
3441 **/
3442static int igb_write_uc_addr_list(struct net_device *netdev)
3443{
3444 struct igb_adapter *adapter = netdev_priv(netdev);
3445 struct e1000_hw *hw = &adapter->hw;
3446 unsigned int vfn = adapter->vfs_allocated_count;
3447 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3448 int count = 0;
3449
3450 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003451 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00003452 return -ENOMEM;
3453
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003454 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003455 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003456
3457 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003458 if (!rar_entries)
3459 break;
3460 igb_rar_set_qsel(adapter, ha->addr,
3461 rar_entries--,
3462 vfn);
3463 count++;
3464 }
3465 }
3466 /* write the addresses in reverse order to avoid write combining */
3467 for (; rar_entries > 0 ; rar_entries--) {
3468 wr32(E1000_RAH(rar_entries), 0);
3469 wr32(E1000_RAL(rar_entries), 0);
3470 }
3471 wrfl();
3472
3473 return count;
3474}
3475
3476/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003477 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08003478 * @netdev: network interface device structure
3479 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003480 * The set_rx_mode entry point is called whenever the unicast or multicast
3481 * address lists or the network interface flags are updated. This routine is
3482 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08003483 * promiscuous mode, and all-multi behavior.
3484 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003485static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003486{
3487 struct igb_adapter *adapter = netdev_priv(netdev);
3488 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003489 unsigned int vfn = adapter->vfs_allocated_count;
3490 u32 rctl, vmolr = 0;
3491 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08003492
3493 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08003494 rctl = rd32(E1000_RCTL);
3495
Alexander Duyck68d480c2009-10-05 06:33:08 +00003496 /* clear the effected bits */
3497 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3498
Patrick McHardy746b9f02008-07-16 20:15:45 -07003499 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003500 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003501 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07003502 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003503 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003504 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003505 vmolr |= E1000_VMOLR_MPME;
3506 } else {
3507 /*
3508 * Write addresses to the MTA, if the attempt fails
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003509 * then we should just turn on promiscuous mode so
Alexander Duyck68d480c2009-10-05 06:33:08 +00003510 * that we can at least receive multicast traffic
3511 */
3512 count = igb_write_mc_addr_list(netdev);
3513 if (count < 0) {
3514 rctl |= E1000_RCTL_MPE;
3515 vmolr |= E1000_VMOLR_MPME;
3516 } else if (count) {
3517 vmolr |= E1000_VMOLR_ROMPE;
3518 }
3519 }
3520 /*
3521 * Write addresses to available RAR registers, if there is not
3522 * sufficient space to store all the addresses then enable
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003523 * unicast promiscuous mode
Alexander Duyck68d480c2009-10-05 06:33:08 +00003524 */
3525 count = igb_write_uc_addr_list(netdev);
3526 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003527 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003528 vmolr |= E1000_VMOLR_ROPE;
3529 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003530 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003531 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003532 wr32(E1000_RCTL, rctl);
3533
Alexander Duyck68d480c2009-10-05 06:33:08 +00003534 /*
3535 * In order to support SR-IOV and eventually VMDq it is necessary to set
3536 * the VMOLR to enable the appropriate modes. Without this workaround
3537 * we will have issues with VLAN tag stripping not being done for frames
3538 * that are only arriving because we are the default pool
3539 */
3540 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003541 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003542
Alexander Duyck68d480c2009-10-05 06:33:08 +00003543 vmolr |= rd32(E1000_VMOLR(vfn)) &
3544 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3545 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003546 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003547}
3548
Greg Rose13800462010-11-06 02:08:26 +00003549static void igb_check_wvbr(struct igb_adapter *adapter)
3550{
3551 struct e1000_hw *hw = &adapter->hw;
3552 u32 wvbr = 0;
3553
3554 switch (hw->mac.type) {
3555 case e1000_82576:
3556 case e1000_i350:
3557 if (!(wvbr = rd32(E1000_WVBR)))
3558 return;
3559 break;
3560 default:
3561 break;
3562 }
3563
3564 adapter->wvbr |= wvbr;
3565}
3566
3567#define IGB_STAGGERED_QUEUE_OFFSET 8
3568
3569static void igb_spoof_check(struct igb_adapter *adapter)
3570{
3571 int j;
3572
3573 if (!adapter->wvbr)
3574 return;
3575
3576 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3577 if (adapter->wvbr & (1 << j) ||
3578 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3579 dev_warn(&adapter->pdev->dev,
3580 "Spoof event(s) detected on VF %d\n", j);
3581 adapter->wvbr &=
3582 ~((1 << j) |
3583 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3584 }
3585 }
3586}
3587
Auke Kok9d5c8242008-01-24 02:22:38 -08003588/* Need to wait a few seconds after link up to get diagnostic information from
3589 * the phy */
3590static void igb_update_phy_info(unsigned long data)
3591{
3592 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003593 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003594}
3595
3596/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003597 * igb_has_link - check shared code for link and determine up/down
3598 * @adapter: pointer to driver private info
3599 **/
Nick Nunley31455352010-02-17 01:01:21 +00003600bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003601{
3602 struct e1000_hw *hw = &adapter->hw;
3603 bool link_active = false;
3604 s32 ret_val = 0;
3605
3606 /* get_link_status is set on LSC (link status) interrupt or
3607 * rx sequence error interrupt. get_link_status will stay
3608 * false until the e1000_check_for_link establishes link
3609 * for copper adapters ONLY
3610 */
3611 switch (hw->phy.media_type) {
3612 case e1000_media_type_copper:
3613 if (hw->mac.get_link_status) {
3614 ret_val = hw->mac.ops.check_for_link(hw);
3615 link_active = !hw->mac.get_link_status;
3616 } else {
3617 link_active = true;
3618 }
3619 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003620 case e1000_media_type_internal_serdes:
3621 ret_val = hw->mac.ops.check_for_link(hw);
3622 link_active = hw->mac.serdes_has_link;
3623 break;
3624 default:
3625 case e1000_media_type_unknown:
3626 break;
3627 }
3628
3629 return link_active;
3630}
3631
Stefan Assmann563988d2011-04-05 04:27:15 +00003632static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3633{
3634 bool ret = false;
3635 u32 ctrl_ext, thstat;
3636
3637 /* check for thermal sensor event on i350, copper only */
3638 if (hw->mac.type == e1000_i350) {
3639 thstat = rd32(E1000_THSTAT);
3640 ctrl_ext = rd32(E1000_CTRL_EXT);
3641
3642 if ((hw->phy.media_type == e1000_media_type_copper) &&
3643 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3644 ret = !!(thstat & event);
3645 }
3646 }
3647
3648 return ret;
3649}
3650
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003651/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003652 * igb_watchdog - Timer Call-back
3653 * @data: pointer to adapter cast into an unsigned long
3654 **/
3655static void igb_watchdog(unsigned long data)
3656{
3657 struct igb_adapter *adapter = (struct igb_adapter *)data;
3658 /* Do the rest outside of interrupt context */
3659 schedule_work(&adapter->watchdog_task);
3660}
3661
3662static void igb_watchdog_task(struct work_struct *work)
3663{
3664 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003665 struct igb_adapter,
3666 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003667 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003668 struct net_device *netdev = adapter->netdev;
Stefan Assmann563988d2011-04-05 04:27:15 +00003669 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003670 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003671
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003672 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003673 if (link) {
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003674 /* Cancel scheduled suspend requests. */
3675 pm_runtime_resume(netdev->dev.parent);
3676
Auke Kok9d5c8242008-01-24 02:22:38 -08003677 if (!netif_carrier_ok(netdev)) {
3678 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003679 hw->mac.ops.get_speed_and_duplex(hw,
3680 &adapter->link_speed,
3681 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003682
3683 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003684 /* Links status message must follow this format */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003685 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
3686 "Duplex, Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003687 netdev->name,
3688 adapter->link_speed,
3689 adapter->link_duplex == FULL_DUPLEX ?
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003690 "Full" : "Half",
3691 (ctrl & E1000_CTRL_TFCE) &&
3692 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
3693 (ctrl & E1000_CTRL_RFCE) ? "RX" :
3694 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
Auke Kok9d5c8242008-01-24 02:22:38 -08003695
Stefan Assmann563988d2011-04-05 04:27:15 +00003696 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003697 if (igb_thermal_sensor_event(hw,
3698 E1000_THSTAT_LINK_THROTTLE)) {
3699 netdev_info(netdev, "The network adapter link "
3700 "speed was downshifted because it "
3701 "overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003702 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003703
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003704 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003705 adapter->tx_timeout_factor = 1;
3706 switch (adapter->link_speed) {
3707 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003708 adapter->tx_timeout_factor = 14;
3709 break;
3710 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003711 /* maybe add some timeout factor ? */
3712 break;
3713 }
3714
3715 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003716
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003717 igb_ping_all_vfs(adapter);
Lior Levy17dc5662011-02-08 02:28:46 +00003718 igb_check_vf_rate_limit(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003719
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003720 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003721 if (!test_bit(__IGB_DOWN, &adapter->state))
3722 mod_timer(&adapter->phy_info_timer,
3723 round_jiffies(jiffies + 2 * HZ));
3724 }
3725 } else {
3726 if (netif_carrier_ok(netdev)) {
3727 adapter->link_speed = 0;
3728 adapter->link_duplex = 0;
Stefan Assmann563988d2011-04-05 04:27:15 +00003729
3730 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003731 if (igb_thermal_sensor_event(hw,
3732 E1000_THSTAT_PWR_DOWN)) {
3733 netdev_err(netdev, "The network adapter was "
3734 "stopped because it overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003735 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003736
Alexander Duyck527d47c2008-11-27 00:21:39 -08003737 /* Links status message must follow this format */
3738 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3739 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003740 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003741
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003742 igb_ping_all_vfs(adapter);
3743
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003744 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003745 if (!test_bit(__IGB_DOWN, &adapter->state))
3746 mod_timer(&adapter->phy_info_timer,
3747 round_jiffies(jiffies + 2 * HZ));
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003748
3749 pm_schedule_suspend(netdev->dev.parent,
3750 MSEC_PER_SEC * 5);
Auke Kok9d5c8242008-01-24 02:22:38 -08003751 }
3752 }
3753
Eric Dumazet12dcd862010-10-15 17:27:10 +00003754 spin_lock(&adapter->stats64_lock);
3755 igb_update_stats(adapter, &adapter->stats64);
3756 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08003757
Alexander Duyckdbabb062009-11-12 18:38:16 +00003758 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003759 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003760 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003761 /* We've lost link, so the controller stops DMA,
3762 * but we've got queued Tx work that's never going
3763 * to get done, so reset controller to flush Tx.
3764 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003765 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3766 adapter->tx_timeout_count++;
3767 schedule_work(&adapter->reset_task);
3768 /* return immediately since reset is imminent */
3769 return;
3770 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003771 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003772
Alexander Duyckdbabb062009-11-12 18:38:16 +00003773 /* Force detection of hung controller every watchdog period */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00003774 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyckdbabb062009-11-12 18:38:16 +00003775 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003776
Auke Kok9d5c8242008-01-24 02:22:38 -08003777 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003778 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003779 u32 eics = 0;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00003780 for (i = 0; i < adapter->num_q_vectors; i++)
3781 eics |= adapter->q_vector[i]->eims_value;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003782 wr32(E1000_EICS, eics);
3783 } else {
3784 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3785 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003786
Greg Rose13800462010-11-06 02:08:26 +00003787 igb_spoof_check(adapter);
3788
Auke Kok9d5c8242008-01-24 02:22:38 -08003789 /* Reset the timer */
3790 if (!test_bit(__IGB_DOWN, &adapter->state))
3791 mod_timer(&adapter->watchdog_timer,
3792 round_jiffies(jiffies + 2 * HZ));
3793}
3794
3795enum latency_range {
3796 lowest_latency = 0,
3797 low_latency = 1,
3798 bulk_latency = 2,
3799 latency_invalid = 255
3800};
3801
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003802/**
3803 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3804 *
3805 * Stores a new ITR value based on strictly on packet size. This
3806 * algorithm is less sophisticated than that used in igb_update_itr,
3807 * due to the difficulty of synchronizing statistics across multiple
Stefan Weileef35c22010-08-06 21:11:15 +02003808 * receive rings. The divisors and thresholds used by this function
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003809 * were determined based on theoretical maximum wire speed and testing
3810 * data, in order to minimize response time while increasing bulk
3811 * throughput.
3812 * This functionality is controlled by the InterruptThrottleRate module
3813 * parameter (see igb_param.c)
3814 * NOTE: This function is called only when operating in a multiqueue
3815 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003816 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003817 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003818static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003819{
Alexander Duyck047e0032009-10-27 15:49:27 +00003820 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003821 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003822 struct igb_adapter *adapter = q_vector->adapter;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003823 unsigned int packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003824
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003825 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3826 * ints/sec - ITR timer value of 120 ticks.
3827 */
3828 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003829 new_val = IGB_4K_ITR;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003830 goto set_itr_val;
3831 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003832
Alexander Duyck0ba82992011-08-26 07:45:47 +00003833 packets = q_vector->rx.total_packets;
3834 if (packets)
3835 avg_wire_size = q_vector->rx.total_bytes / packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003836
Alexander Duyck0ba82992011-08-26 07:45:47 +00003837 packets = q_vector->tx.total_packets;
3838 if (packets)
3839 avg_wire_size = max_t(u32, avg_wire_size,
3840 q_vector->tx.total_bytes / packets);
Alexander Duyck047e0032009-10-27 15:49:27 +00003841
3842 /* if avg_wire_size isn't set no work was done */
3843 if (!avg_wire_size)
3844 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003845
3846 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3847 avg_wire_size += 24;
3848
3849 /* Don't starve jumbo frames */
3850 avg_wire_size = min(avg_wire_size, 3000);
3851
3852 /* Give a little boost to mid-size frames */
3853 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3854 new_val = avg_wire_size / 3;
3855 else
3856 new_val = avg_wire_size / 2;
3857
Alexander Duyck0ba82992011-08-26 07:45:47 +00003858 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3859 if (new_val < IGB_20K_ITR &&
3860 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3861 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3862 new_val = IGB_20K_ITR;
Nick Nunleyabe1c362010-02-17 01:03:19 +00003863
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003864set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003865 if (new_val != q_vector->itr_val) {
3866 q_vector->itr_val = new_val;
3867 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003868 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003869clear_counts:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003870 q_vector->rx.total_bytes = 0;
3871 q_vector->rx.total_packets = 0;
3872 q_vector->tx.total_bytes = 0;
3873 q_vector->tx.total_packets = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003874}
3875
3876/**
3877 * igb_update_itr - update the dynamic ITR value based on statistics
3878 * Stores a new ITR value based on packets and byte
3879 * counts during the last interrupt. The advantage of per interrupt
3880 * computation is faster updates and more accurate ITR for the current
3881 * traffic pattern. Constants in this function were computed
3882 * based on theoretical maximum wire speed and thresholds were set based
3883 * on testing data as well as attempting to minimize response time
3884 * while increasing bulk throughput.
3885 * this functionality is controlled by the InterruptThrottleRate module
3886 * parameter (see igb_param.c)
3887 * NOTE: These calculations are only valid when operating in a single-
3888 * queue environment.
Alexander Duyck0ba82992011-08-26 07:45:47 +00003889 * @q_vector: pointer to q_vector
3890 * @ring_container: ring info to update the itr for
Auke Kok9d5c8242008-01-24 02:22:38 -08003891 **/
Alexander Duyck0ba82992011-08-26 07:45:47 +00003892static void igb_update_itr(struct igb_q_vector *q_vector,
3893 struct igb_ring_container *ring_container)
Auke Kok9d5c8242008-01-24 02:22:38 -08003894{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003895 unsigned int packets = ring_container->total_packets;
3896 unsigned int bytes = ring_container->total_bytes;
3897 u8 itrval = ring_container->itr;
Auke Kok9d5c8242008-01-24 02:22:38 -08003898
Alexander Duyck0ba82992011-08-26 07:45:47 +00003899 /* no packets, exit with status unchanged */
Auke Kok9d5c8242008-01-24 02:22:38 -08003900 if (packets == 0)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003901 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08003902
Alexander Duyck0ba82992011-08-26 07:45:47 +00003903 switch (itrval) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003904 case lowest_latency:
3905 /* handle TSO and jumbo frames */
3906 if (bytes/packets > 8000)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003907 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003908 else if ((packets < 5) && (bytes > 512))
Alexander Duyck0ba82992011-08-26 07:45:47 +00003909 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003910 break;
3911 case low_latency: /* 50 usec aka 20000 ints/s */
3912 if (bytes > 10000) {
3913 /* this if handles the TSO accounting */
3914 if (bytes/packets > 8000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003915 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003916 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003917 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003918 } else if ((packets > 35)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003919 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003920 }
3921 } else if (bytes/packets > 2000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003922 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003923 } else if (packets <= 2 && bytes < 512) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003924 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003925 }
3926 break;
3927 case bulk_latency: /* 250 usec aka 4000 ints/s */
3928 if (bytes > 25000) {
3929 if (packets > 35)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003930 itrval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003931 } else if (bytes < 1500) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003932 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003933 }
3934 break;
3935 }
3936
Alexander Duyck0ba82992011-08-26 07:45:47 +00003937 /* clear work counters since we have the values we need */
3938 ring_container->total_bytes = 0;
3939 ring_container->total_packets = 0;
3940
3941 /* write updated itr to ring container */
3942 ring_container->itr = itrval;
Auke Kok9d5c8242008-01-24 02:22:38 -08003943}
3944
Alexander Duyck0ba82992011-08-26 07:45:47 +00003945static void igb_set_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003946{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003947 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00003948 u32 new_itr = q_vector->itr_val;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003949 u8 current_itr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003950
3951 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3952 if (adapter->link_speed != SPEED_1000) {
3953 current_itr = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003954 new_itr = IGB_4K_ITR;
Auke Kok9d5c8242008-01-24 02:22:38 -08003955 goto set_itr_now;
3956 }
3957
Alexander Duyck0ba82992011-08-26 07:45:47 +00003958 igb_update_itr(q_vector, &q_vector->tx);
3959 igb_update_itr(q_vector, &q_vector->rx);
Auke Kok9d5c8242008-01-24 02:22:38 -08003960
Alexander Duyck0ba82992011-08-26 07:45:47 +00003961 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003962
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003963 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck0ba82992011-08-26 07:45:47 +00003964 if (current_itr == lowest_latency &&
3965 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3966 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003967 current_itr = low_latency;
3968
Auke Kok9d5c8242008-01-24 02:22:38 -08003969 switch (current_itr) {
3970 /* counts and packets in update_itr are dependent on these numbers */
3971 case lowest_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003972 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003973 break;
3974 case low_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003975 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003976 break;
3977 case bulk_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003978 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003979 break;
3980 default:
3981 break;
3982 }
3983
3984set_itr_now:
Alexander Duyck047e0032009-10-27 15:49:27 +00003985 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003986 /* this attempts to bias the interrupt rate towards Bulk
3987 * by adding intermediate steps when interrupt rate is
3988 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003989 new_itr = new_itr > q_vector->itr_val ?
3990 max((new_itr * q_vector->itr_val) /
3991 (new_itr + (q_vector->itr_val >> 2)),
Alexander Duyck0ba82992011-08-26 07:45:47 +00003992 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003993 new_itr;
3994 /* Don't write the value here; it resets the adapter's
3995 * internal timer, and causes us to delay far longer than
3996 * we should between interrupts. Instead, we write the ITR
3997 * value at the beginning of the next interrupt so the timing
3998 * ends up being correct.
3999 */
Alexander Duyck047e0032009-10-27 15:49:27 +00004000 q_vector->itr_val = new_itr;
4001 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004002 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004003}
4004
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004005void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
4006 u32 type_tucmd, u32 mss_l4len_idx)
4007{
4008 struct e1000_adv_tx_context_desc *context_desc;
4009 u16 i = tx_ring->next_to_use;
4010
4011 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
4012
4013 i++;
4014 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
4015
4016 /* set bits to identify this as an advanced context descriptor */
4017 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
4018
4019 /* For 82575, context index must be unique per ring. */
Alexander Duyck866cff02011-08-26 07:45:36 +00004020 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004021 mss_l4len_idx |= tx_ring->reg_idx << 4;
4022
4023 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4024 context_desc->seqnum_seed = 0;
4025 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
4026 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4027}
4028
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004029static int igb_tso(struct igb_ring *tx_ring,
4030 struct igb_tx_buffer *first,
4031 u8 *hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004032{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004033 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004034 u32 vlan_macip_lens, type_tucmd;
4035 u32 mss_l4len_idx, l4len;
4036
4037 if (!skb_is_gso(skb))
4038 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004039
4040 if (skb_header_cloned(skb)) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004041 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004042 if (err)
4043 return err;
4044 }
4045
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004046 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4047 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
Auke Kok9d5c8242008-01-24 02:22:38 -08004048
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004049 if (first->protocol == __constant_htons(ETH_P_IP)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004050 struct iphdr *iph = ip_hdr(skb);
4051 iph->tot_len = 0;
4052 iph->check = 0;
4053 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4054 iph->daddr, 0,
4055 IPPROTO_TCP,
4056 0);
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004057 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004058 first->tx_flags |= IGB_TX_FLAGS_TSO |
4059 IGB_TX_FLAGS_CSUM |
4060 IGB_TX_FLAGS_IPV4;
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08004061 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004062 ipv6_hdr(skb)->payload_len = 0;
4063 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4064 &ipv6_hdr(skb)->daddr,
4065 0, IPPROTO_TCP, 0);
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004066 first->tx_flags |= IGB_TX_FLAGS_TSO |
4067 IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004068 }
4069
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004070 /* compute header lengths */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004071 l4len = tcp_hdrlen(skb);
4072 *hdr_len = skb_transport_offset(skb) + l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08004073
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004074 /* update gso size and bytecount with header size */
4075 first->gso_segs = skb_shinfo(skb)->gso_segs;
4076 first->bytecount += (first->gso_segs - 1) * *hdr_len;
4077
Auke Kok9d5c8242008-01-24 02:22:38 -08004078 /* MSS L4LEN IDX */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004079 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4080 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
Auke Kok9d5c8242008-01-24 02:22:38 -08004081
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004082 /* VLAN MACLEN IPLEN */
4083 vlan_macip_lens = skb_network_header_len(skb);
4084 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004085 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004086
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004087 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004088
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004089 return 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004090}
4091
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004092static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
Auke Kok9d5c8242008-01-24 02:22:38 -08004093{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004094 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004095 u32 vlan_macip_lens = 0;
4096 u32 mss_l4len_idx = 0;
4097 u32 type_tucmd = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004098
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004099 if (skb->ip_summed != CHECKSUM_PARTIAL) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004100 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4101 return;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004102 } else {
4103 u8 l4_hdr = 0;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004104 switch (first->protocol) {
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004105 case __constant_htons(ETH_P_IP):
4106 vlan_macip_lens |= skb_network_header_len(skb);
4107 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4108 l4_hdr = ip_hdr(skb)->protocol;
4109 break;
4110 case __constant_htons(ETH_P_IPV6):
4111 vlan_macip_lens |= skb_network_header_len(skb);
4112 l4_hdr = ipv6_hdr(skb)->nexthdr;
4113 break;
4114 default:
4115 if (unlikely(net_ratelimit())) {
4116 dev_warn(tx_ring->dev,
4117 "partial checksum but proto=%x!\n",
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004118 first->protocol);
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07004119 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004120 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08004121 }
4122
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004123 switch (l4_hdr) {
4124 case IPPROTO_TCP:
4125 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4126 mss_l4len_idx = tcp_hdrlen(skb) <<
4127 E1000_ADVTXD_L4LEN_SHIFT;
4128 break;
4129 case IPPROTO_SCTP:
4130 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4131 mss_l4len_idx = sizeof(struct sctphdr) <<
4132 E1000_ADVTXD_L4LEN_SHIFT;
4133 break;
4134 case IPPROTO_UDP:
4135 mss_l4len_idx = sizeof(struct udphdr) <<
4136 E1000_ADVTXD_L4LEN_SHIFT;
4137 break;
4138 default:
4139 if (unlikely(net_ratelimit())) {
4140 dev_warn(tx_ring->dev,
4141 "partial checksum but l4 proto=%x!\n",
4142 l4_hdr);
4143 }
4144 break;
4145 }
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004146
4147 /* update TX checksum flag */
4148 first->tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004149 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004150
4151 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004152 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004153
4154 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004155}
4156
Alexander Duycke032afc2011-08-26 07:44:48 +00004157static __le32 igb_tx_cmd_type(u32 tx_flags)
4158{
4159 /* set type for advanced descriptor with frame checksum insertion */
4160 __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
4161 E1000_ADVTXD_DCMD_IFCS |
4162 E1000_ADVTXD_DCMD_DEXT);
4163
4164 /* set HW vlan bit if vlan is present */
4165 if (tx_flags & IGB_TX_FLAGS_VLAN)
4166 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
4167
4168 /* set timestamp bit if present */
4169 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
4170 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
4171
4172 /* set segmentation bits for TSO */
4173 if (tx_flags & IGB_TX_FLAGS_TSO)
4174 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
4175
4176 return cmd_type;
4177}
4178
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004179static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4180 union e1000_adv_tx_desc *tx_desc,
4181 u32 tx_flags, unsigned int paylen)
Alexander Duycke032afc2011-08-26 07:44:48 +00004182{
4183 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4184
4185 /* 82575 requires a unique index per ring if any offload is enabled */
4186 if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
Alexander Duyck866cff02011-08-26 07:45:36 +00004187 test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duycke032afc2011-08-26 07:44:48 +00004188 olinfo_status |= tx_ring->reg_idx << 4;
4189
4190 /* insert L4 checksum */
4191 if (tx_flags & IGB_TX_FLAGS_CSUM) {
4192 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4193
4194 /* insert IPv4 checksum */
4195 if (tx_flags & IGB_TX_FLAGS_IPV4)
4196 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4197 }
4198
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004199 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duycke032afc2011-08-26 07:44:48 +00004200}
4201
Alexander Duyckebe42d12011-08-26 07:45:09 +00004202/*
4203 * The largest size we can write to the descriptor is 65535. In order to
4204 * maintain a power of two alignment we have to limit ourselves to 32K.
4205 */
4206#define IGB_MAX_TXD_PWR 15
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004207#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
Auke Kok9d5c8242008-01-24 02:22:38 -08004208
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004209static void igb_tx_map(struct igb_ring *tx_ring,
4210 struct igb_tx_buffer *first,
Alexander Duyckebe42d12011-08-26 07:45:09 +00004211 const u8 hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004212{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004213 struct sk_buff *skb = first->skb;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004214 struct igb_tx_buffer *tx_buffer_info;
4215 union e1000_adv_tx_desc *tx_desc;
4216 dma_addr_t dma;
4217 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
4218 unsigned int data_len = skb->data_len;
4219 unsigned int size = skb_headlen(skb);
4220 unsigned int paylen = skb->len - hdr_len;
4221 __le32 cmd_type;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004222 u32 tx_flags = first->tx_flags;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004223 u16 i = tx_ring->next_to_use;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004224
4225 tx_desc = IGB_TX_DESC(tx_ring, i);
4226
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004227 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004228 cmd_type = igb_tx_cmd_type(tx_flags);
4229
4230 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
4231 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004232 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08004233
Alexander Duyckebe42d12011-08-26 07:45:09 +00004234 /* record length, and DMA address */
4235 first->length = size;
4236 first->dma = dma;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004237 tx_desc->read.buffer_addr = cpu_to_le64(dma);
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00004238
Alexander Duyckebe42d12011-08-26 07:45:09 +00004239 for (;;) {
4240 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4241 tx_desc->read.cmd_type_len =
4242 cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
Auke Kok9d5c8242008-01-24 02:22:38 -08004243
Alexander Duyckebe42d12011-08-26 07:45:09 +00004244 i++;
4245 tx_desc++;
4246 if (i == tx_ring->count) {
4247 tx_desc = IGB_TX_DESC(tx_ring, 0);
4248 i = 0;
4249 }
4250
4251 dma += IGB_MAX_DATA_PER_TXD;
4252 size -= IGB_MAX_DATA_PER_TXD;
4253
4254 tx_desc->read.olinfo_status = 0;
4255 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4256 }
4257
4258 if (likely(!data_len))
4259 break;
4260
4261 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
4262
Alexander Duyck65689fe2009-03-20 00:17:43 +00004263 i++;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004264 tx_desc++;
4265 if (i == tx_ring->count) {
4266 tx_desc = IGB_TX_DESC(tx_ring, 0);
Alexander Duyck65689fe2009-03-20 00:17:43 +00004267 i = 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004268 }
Alexander Duyck65689fe2009-03-20 00:17:43 +00004269
Eric Dumazet9e903e02011-10-18 21:00:24 +00004270 size = skb_frag_size(frag);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004271 data_len -= size;
4272
4273 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
4274 size, DMA_TO_DEVICE);
4275 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004276 goto dma_error;
4277
Alexander Duyckebe42d12011-08-26 07:45:09 +00004278 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4279 tx_buffer_info->length = size;
4280 tx_buffer_info->dma = dma;
4281
4282 tx_desc->read.olinfo_status = 0;
4283 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4284
4285 frag++;
Auke Kok9d5c8242008-01-24 02:22:38 -08004286 }
4287
Eric Dumazetbdbc0632012-01-04 20:23:36 +00004288 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
4289
Alexander Duyckebe42d12011-08-26 07:45:09 +00004290 /* write last descriptor with RS and EOP bits */
4291 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
4292 tx_desc->read.cmd_type_len = cmd_type;
Alexander Duyck8542db02011-08-26 07:44:43 +00004293
4294 /* set the timestamp */
4295 first->time_stamp = jiffies;
4296
Alexander Duyckebe42d12011-08-26 07:45:09 +00004297 /*
4298 * Force memory writes to complete before letting h/w know there
4299 * are new descriptors to fetch. (Only applicable for weak-ordered
4300 * memory model archs, such as IA-64).
4301 *
4302 * We also need this memory barrier to make certain all of the
4303 * status bits have been updated before next_to_watch is written.
4304 */
Auke Kok9d5c8242008-01-24 02:22:38 -08004305 wmb();
4306
Alexander Duyckebe42d12011-08-26 07:45:09 +00004307 /* set next_to_watch value indicating a packet is present */
4308 first->next_to_watch = tx_desc;
4309
4310 i++;
4311 if (i == tx_ring->count)
4312 i = 0;
4313
Auke Kok9d5c8242008-01-24 02:22:38 -08004314 tx_ring->next_to_use = i;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004315
Alexander Duyckfce99e32009-10-27 15:51:27 +00004316 writel(i, tx_ring->tail);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004317
Auke Kok9d5c8242008-01-24 02:22:38 -08004318 /* we need this if more than one processor can write to our tail
4319 * at a time, it syncronizes IO on IA64/Altix systems */
4320 mmiowb();
Alexander Duyckebe42d12011-08-26 07:45:09 +00004321
4322 return;
4323
4324dma_error:
4325 dev_err(tx_ring->dev, "TX DMA map failed\n");
4326
4327 /* clear dma mappings for failed tx_buffer_info map */
4328 for (;;) {
4329 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4330 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4331 if (tx_buffer_info == first)
4332 break;
4333 if (i == 0)
4334 i = tx_ring->count;
4335 i--;
4336 }
4337
4338 tx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004339}
4340
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004341static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004342{
Alexander Duycke694e962009-10-27 15:53:06 +00004343 struct net_device *netdev = tx_ring->netdev;
4344
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004345 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004346
Auke Kok9d5c8242008-01-24 02:22:38 -08004347 /* Herbert's original patch had:
4348 * smp_mb__after_netif_stop_queue();
4349 * but since that doesn't exist yet, just open code it. */
4350 smp_mb();
4351
4352 /* We need to check again in a case another CPU has just
4353 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00004354 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004355 return -EBUSY;
4356
4357 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004358 netif_wake_subqueue(netdev, tx_ring->queue_index);
Eric Dumazet12dcd862010-10-15 17:27:10 +00004359
4360 u64_stats_update_begin(&tx_ring->tx_syncp2);
4361 tx_ring->tx_stats.restart_queue2++;
4362 u64_stats_update_end(&tx_ring->tx_syncp2);
4363
Auke Kok9d5c8242008-01-24 02:22:38 -08004364 return 0;
4365}
4366
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004367static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004368{
Alexander Duyckc493ea42009-03-20 00:16:50 +00004369 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004370 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00004371 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08004372}
4373
Alexander Duyckcd392f52011-08-26 07:43:59 +00004374netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4375 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004376{
Alexander Duyck8542db02011-08-26 07:44:43 +00004377 struct igb_tx_buffer *first;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004378 int tso;
Nick Nunley91d4ee32010-02-17 01:04:56 +00004379 u32 tx_flags = 0;
Alexander Duyck31f6adb2011-08-26 07:44:53 +00004380 __be16 protocol = vlan_get_protocol(skb);
Nick Nunley91d4ee32010-02-17 01:04:56 +00004381 u8 hdr_len = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004382
Auke Kok9d5c8242008-01-24 02:22:38 -08004383 /* need: 1 descriptor per page,
4384 * + 2 desc gap to keep tail from touching head,
4385 * + 1 desc for skb->data,
4386 * + 1 desc for context descriptor,
4387 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00004388 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004389 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08004390 return NETDEV_TX_BUSY;
4391 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004392
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004393 /* record the location of the first descriptor for this packet */
4394 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4395 first->skb = skb;
4396 first->bytecount = skb->len;
4397 first->gso_segs = 1;
4398
Oliver Hartkopp2244d072010-08-17 08:59:14 +00004399 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4400 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004401 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004402 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004403
Jesse Grosseab6d182010-10-20 13:56:03 +00004404 if (vlan_tx_tag_present(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004405 tx_flags |= IGB_TX_FLAGS_VLAN;
4406 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4407 }
4408
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004409 /* record initial flags and protocol */
4410 first->tx_flags = tx_flags;
4411 first->protocol = protocol;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004412
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004413 tso = igb_tso(tx_ring, first, &hdr_len);
4414 if (tso < 0)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004415 goto out_drop;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004416 else if (!tso)
4417 igb_tx_csum(tx_ring, first);
Auke Kok9d5c8242008-01-24 02:22:38 -08004418
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004419 igb_tx_map(tx_ring, first, hdr_len);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004420
4421 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00004422 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004423
Auke Kok9d5c8242008-01-24 02:22:38 -08004424 return NETDEV_TX_OK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004425
4426out_drop:
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004427 igb_unmap_and_free_tx_resource(tx_ring, first);
4428
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004429 return NETDEV_TX_OK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004430}
4431
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004432static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4433 struct sk_buff *skb)
4434{
4435 unsigned int r_idx = skb->queue_mapping;
4436
4437 if (r_idx >= adapter->num_tx_queues)
4438 r_idx = r_idx % adapter->num_tx_queues;
4439
4440 return adapter->tx_ring[r_idx];
4441}
4442
Alexander Duyckcd392f52011-08-26 07:43:59 +00004443static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4444 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08004445{
4446 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckb1a436c2009-10-27 15:54:43 +00004447
4448 if (test_bit(__IGB_DOWN, &adapter->state)) {
4449 dev_kfree_skb_any(skb);
4450 return NETDEV_TX_OK;
4451 }
4452
4453 if (skb->len <= 0) {
4454 dev_kfree_skb_any(skb);
4455 return NETDEV_TX_OK;
4456 }
4457
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004458 /*
4459 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4460 * in order to meet this minimum size requirement.
4461 */
4462 if (skb->len < 17) {
4463 if (skb_padto(skb, 17))
4464 return NETDEV_TX_OK;
4465 skb->len = 17;
4466 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004467
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004468 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
Auke Kok9d5c8242008-01-24 02:22:38 -08004469}
4470
4471/**
4472 * igb_tx_timeout - Respond to a Tx Hang
4473 * @netdev: network interface device structure
4474 **/
4475static void igb_tx_timeout(struct net_device *netdev)
4476{
4477 struct igb_adapter *adapter = netdev_priv(netdev);
4478 struct e1000_hw *hw = &adapter->hw;
4479
4480 /* Do the reset outside of interrupt context */
4481 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00004482
Alexander Duyck06218a82011-08-26 07:46:55 +00004483 if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00004484 hw->dev_spec._82575.global_device_reset = true;
4485
Auke Kok9d5c8242008-01-24 02:22:38 -08004486 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00004487 wr32(E1000_EICS,
4488 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08004489}
4490
4491static void igb_reset_task(struct work_struct *work)
4492{
4493 struct igb_adapter *adapter;
4494 adapter = container_of(work, struct igb_adapter, reset_task);
4495
Taku Izumic97ec422010-04-27 14:39:30 +00004496 igb_dump(adapter);
4497 netdev_err(adapter->netdev, "Reset adapter\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004498 igb_reinit_locked(adapter);
4499}
4500
4501/**
Eric Dumazet12dcd862010-10-15 17:27:10 +00004502 * igb_get_stats64 - Get System Network Statistics
Auke Kok9d5c8242008-01-24 02:22:38 -08004503 * @netdev: network interface device structure
Eric Dumazet12dcd862010-10-15 17:27:10 +00004504 * @stats: rtnl_link_stats64 pointer
Auke Kok9d5c8242008-01-24 02:22:38 -08004505 *
Auke Kok9d5c8242008-01-24 02:22:38 -08004506 **/
Eric Dumazet12dcd862010-10-15 17:27:10 +00004507static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4508 struct rtnl_link_stats64 *stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004509{
Eric Dumazet12dcd862010-10-15 17:27:10 +00004510 struct igb_adapter *adapter = netdev_priv(netdev);
4511
4512 spin_lock(&adapter->stats64_lock);
4513 igb_update_stats(adapter, &adapter->stats64);
4514 memcpy(stats, &adapter->stats64, sizeof(*stats));
4515 spin_unlock(&adapter->stats64_lock);
4516
4517 return stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08004518}
4519
4520/**
4521 * igb_change_mtu - Change the Maximum Transfer Unit
4522 * @netdev: network interface device structure
4523 * @new_mtu: new value for maximum frame size
4524 *
4525 * Returns 0 on success, negative on failure
4526 **/
4527static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4528{
4529 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004530 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck153285f2011-08-26 07:43:32 +00004531 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08004532
Alexander Duyckc809d222009-10-27 23:52:13 +00004533 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004534 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004535 return -EINVAL;
4536 }
4537
Alexander Duyck153285f2011-08-26 07:43:32 +00004538#define MAX_STD_JUMBO_FRAME_SIZE 9238
Auke Kok9d5c8242008-01-24 02:22:38 -08004539 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004540 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004541 return -EINVAL;
4542 }
4543
4544 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4545 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004546
Auke Kok9d5c8242008-01-24 02:22:38 -08004547 /* igb_down has a dependency on max_frame_size */
4548 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00004549
Alexander Duyck4c844852009-10-27 15:52:07 +00004550 if (netif_running(netdev))
4551 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08004552
Alexander Duyck090b1792009-10-27 23:51:55 +00004553 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08004554 netdev->mtu, new_mtu);
4555 netdev->mtu = new_mtu;
4556
4557 if (netif_running(netdev))
4558 igb_up(adapter);
4559 else
4560 igb_reset(adapter);
4561
4562 clear_bit(__IGB_RESETTING, &adapter->state);
4563
4564 return 0;
4565}
4566
4567/**
4568 * igb_update_stats - Update the board statistics counters
4569 * @adapter: board private structure
4570 **/
4571
Eric Dumazet12dcd862010-10-15 17:27:10 +00004572void igb_update_stats(struct igb_adapter *adapter,
4573 struct rtnl_link_stats64 *net_stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004574{
4575 struct e1000_hw *hw = &adapter->hw;
4576 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004577 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004578 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004579 int i;
4580 u64 bytes, packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004581 unsigned int start;
4582 u64 _bytes, _packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004583
4584#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4585
4586 /*
4587 * Prevent stats update while adapter is being reset, or if the pci
4588 * connection is down.
4589 */
4590 if (adapter->link_speed == 0)
4591 return;
4592 if (pci_channel_offline(pdev))
4593 return;
4594
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004595 bytes = 0;
4596 packets = 0;
4597 for (i = 0; i < adapter->num_rx_queues; i++) {
4598 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
Alexander Duyck3025a442010-02-17 01:02:39 +00004599 struct igb_ring *ring = adapter->rx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004600
Alexander Duyck3025a442010-02-17 01:02:39 +00004601 ring->rx_stats.drops += rqdpc_tmp;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004602 net_stats->rx_fifo_errors += rqdpc_tmp;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004603
4604 do {
4605 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4606 _bytes = ring->rx_stats.bytes;
4607 _packets = ring->rx_stats.packets;
4608 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4609 bytes += _bytes;
4610 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004611 }
4612
Alexander Duyck128e45e2009-11-12 18:37:38 +00004613 net_stats->rx_bytes = bytes;
4614 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004615
4616 bytes = 0;
4617 packets = 0;
4618 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004619 struct igb_ring *ring = adapter->tx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004620 do {
4621 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4622 _bytes = ring->tx_stats.bytes;
4623 _packets = ring->tx_stats.packets;
4624 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4625 bytes += _bytes;
4626 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004627 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00004628 net_stats->tx_bytes = bytes;
4629 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004630
4631 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004632 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4633 adapter->stats.gprc += rd32(E1000_GPRC);
4634 adapter->stats.gorc += rd32(E1000_GORCL);
4635 rd32(E1000_GORCH); /* clear GORCL */
4636 adapter->stats.bprc += rd32(E1000_BPRC);
4637 adapter->stats.mprc += rd32(E1000_MPRC);
4638 adapter->stats.roc += rd32(E1000_ROC);
4639
4640 adapter->stats.prc64 += rd32(E1000_PRC64);
4641 adapter->stats.prc127 += rd32(E1000_PRC127);
4642 adapter->stats.prc255 += rd32(E1000_PRC255);
4643 adapter->stats.prc511 += rd32(E1000_PRC511);
4644 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4645 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4646 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4647 adapter->stats.sec += rd32(E1000_SEC);
4648
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004649 mpc = rd32(E1000_MPC);
4650 adapter->stats.mpc += mpc;
4651 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004652 adapter->stats.scc += rd32(E1000_SCC);
4653 adapter->stats.ecol += rd32(E1000_ECOL);
4654 adapter->stats.mcc += rd32(E1000_MCC);
4655 adapter->stats.latecol += rd32(E1000_LATECOL);
4656 adapter->stats.dc += rd32(E1000_DC);
4657 adapter->stats.rlec += rd32(E1000_RLEC);
4658 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4659 adapter->stats.xontxc += rd32(E1000_XONTXC);
4660 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4661 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4662 adapter->stats.fcruc += rd32(E1000_FCRUC);
4663 adapter->stats.gptc += rd32(E1000_GPTC);
4664 adapter->stats.gotc += rd32(E1000_GOTCL);
4665 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004666 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004667 adapter->stats.ruc += rd32(E1000_RUC);
4668 adapter->stats.rfc += rd32(E1000_RFC);
4669 adapter->stats.rjc += rd32(E1000_RJC);
4670 adapter->stats.tor += rd32(E1000_TORH);
4671 adapter->stats.tot += rd32(E1000_TOTH);
4672 adapter->stats.tpr += rd32(E1000_TPR);
4673
4674 adapter->stats.ptc64 += rd32(E1000_PTC64);
4675 adapter->stats.ptc127 += rd32(E1000_PTC127);
4676 adapter->stats.ptc255 += rd32(E1000_PTC255);
4677 adapter->stats.ptc511 += rd32(E1000_PTC511);
4678 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4679 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4680
4681 adapter->stats.mptc += rd32(E1000_MPTC);
4682 adapter->stats.bptc += rd32(E1000_BPTC);
4683
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004684 adapter->stats.tpt += rd32(E1000_TPT);
4685 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004686
4687 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004688 /* read internal phy specific stats */
4689 reg = rd32(E1000_CTRL_EXT);
4690 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4691 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4692 adapter->stats.tncrs += rd32(E1000_TNCRS);
4693 }
4694
Auke Kok9d5c8242008-01-24 02:22:38 -08004695 adapter->stats.tsctc += rd32(E1000_TSCTC);
4696 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4697
4698 adapter->stats.iac += rd32(E1000_IAC);
4699 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4700 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4701 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4702 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4703 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4704 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4705 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4706 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4707
4708 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004709 net_stats->multicast = adapter->stats.mprc;
4710 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004711
4712 /* Rx Errors */
4713
4714 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004715 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004716 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004717 adapter->stats.crcerrs + adapter->stats.algnerrc +
4718 adapter->stats.ruc + adapter->stats.roc +
4719 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004720 net_stats->rx_length_errors = adapter->stats.ruc +
4721 adapter->stats.roc;
4722 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4723 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4724 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004725
4726 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004727 net_stats->tx_errors = adapter->stats.ecol +
4728 adapter->stats.latecol;
4729 net_stats->tx_aborted_errors = adapter->stats.ecol;
4730 net_stats->tx_window_errors = adapter->stats.latecol;
4731 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004732
4733 /* Tx Dropped needs to be maintained elsewhere */
4734
4735 /* Phy Stats */
4736 if (hw->phy.media_type == e1000_media_type_copper) {
4737 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004738 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004739 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4740 adapter->phy_stats.idle_errors += phy_tmp;
4741 }
4742 }
4743
4744 /* Management Stats */
4745 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4746 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4747 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
Carolyn Wyborny0a915b92011-02-26 07:42:37 +00004748
4749 /* OS2BMC Stats */
4750 reg = rd32(E1000_MANC);
4751 if (reg & E1000_MANC_EN_BMC2OS) {
4752 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4753 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4754 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4755 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4756 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004757}
4758
Auke Kok9d5c8242008-01-24 02:22:38 -08004759static irqreturn_t igb_msix_other(int irq, void *data)
4760{
Alexander Duyck047e0032009-10-27 15:49:27 +00004761 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004762 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004763 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004764 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004765
Alexander Duyck7f081d42010-01-07 17:41:00 +00004766 if (icr & E1000_ICR_DRSTA)
4767 schedule_work(&adapter->reset_task);
4768
Alexander Duyck047e0032009-10-27 15:49:27 +00004769 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004770 /* HW is reporting DMA is out of sync */
4771 adapter->stats.doosync++;
Greg Rose13800462010-11-06 02:08:26 +00004772 /* The DMA Out of Sync is also indication of a spoof event
4773 * in IOV mode. Check the Wrong VM Behavior register to
4774 * see if it is really a spoof event. */
4775 igb_check_wvbr(adapter);
Alexander Duyckdda0e082009-02-06 23:19:08 +00004776 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004777
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004778 /* Check for a mailbox event */
4779 if (icr & E1000_ICR_VMMB)
4780 igb_msg_task(adapter);
4781
4782 if (icr & E1000_ICR_LSC) {
4783 hw->mac.get_link_status = 1;
4784 /* guard against interrupt when we're going down */
4785 if (!test_bit(__IGB_DOWN, &adapter->state))
4786 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4787 }
4788
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004789 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004790
4791 return IRQ_HANDLED;
4792}
4793
Alexander Duyck047e0032009-10-27 15:49:27 +00004794static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004795{
Alexander Duyck26b39272010-02-17 01:00:41 +00004796 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004797 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004798
Alexander Duyck047e0032009-10-27 15:49:27 +00004799 if (!q_vector->set_itr)
4800 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004801
Alexander Duyck047e0032009-10-27 15:49:27 +00004802 if (!itr_val)
4803 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004804
Alexander Duyck26b39272010-02-17 01:00:41 +00004805 if (adapter->hw.mac.type == e1000_82575)
4806 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004807 else
Alexander Duyck0ba82992011-08-26 07:45:47 +00004808 itr_val |= E1000_EITR_CNT_IGNR;
Alexander Duyck047e0032009-10-27 15:49:27 +00004809
4810 writel(itr_val, q_vector->itr_register);
4811 q_vector->set_itr = 0;
4812}
4813
4814static irqreturn_t igb_msix_ring(int irq, void *data)
4815{
4816 struct igb_q_vector *q_vector = data;
4817
4818 /* Write the ITR value calculated from the previous interrupt. */
4819 igb_write_itr(q_vector);
4820
4821 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004822
Auke Kok9d5c8242008-01-24 02:22:38 -08004823 return IRQ_HANDLED;
4824}
4825
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004826#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004827static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004828{
Alexander Duyck047e0032009-10-27 15:49:27 +00004829 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004830 struct e1000_hw *hw = &adapter->hw;
4831 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004832
Alexander Duyck047e0032009-10-27 15:49:27 +00004833 if (q_vector->cpu == cpu)
4834 goto out_no_update;
4835
Alexander Duyck0ba82992011-08-26 07:45:47 +00004836 if (q_vector->tx.ring) {
4837 int q = q_vector->tx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004838 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4839 if (hw->mac.type == e1000_82575) {
4840 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4841 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4842 } else {
4843 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4844 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4845 E1000_DCA_TXCTRL_CPUID_SHIFT;
4846 }
4847 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4848 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4849 }
Alexander Duyck0ba82992011-08-26 07:45:47 +00004850 if (q_vector->rx.ring) {
4851 int q = q_vector->rx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004852 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4853 if (hw->mac.type == e1000_82575) {
4854 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4855 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4856 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004857 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004858 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004859 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004860 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004861 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4862 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4863 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4864 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004865 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004866 q_vector->cpu = cpu;
4867out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004868 put_cpu();
4869}
4870
4871static void igb_setup_dca(struct igb_adapter *adapter)
4872{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004873 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004874 int i;
4875
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004876 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004877 return;
4878
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004879 /* Always use CB2 mode, difference is masked in the CB driver. */
4880 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4881
Alexander Duyck047e0032009-10-27 15:49:27 +00004882 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004883 adapter->q_vector[i]->cpu = -1;
4884 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004885 }
4886}
4887
4888static int __igb_notify_dca(struct device *dev, void *data)
4889{
4890 struct net_device *netdev = dev_get_drvdata(dev);
4891 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004892 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004893 struct e1000_hw *hw = &adapter->hw;
4894 unsigned long event = *(unsigned long *)data;
4895
4896 switch (event) {
4897 case DCA_PROVIDER_ADD:
4898 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004899 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004900 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004901 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004902 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004903 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004904 igb_setup_dca(adapter);
4905 break;
4906 }
4907 /* Fall Through since DCA is disabled. */
4908 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004909 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004910 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004911 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004912 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004913 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004914 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004915 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004916 }
4917 break;
4918 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004919
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004920 return 0;
4921}
4922
4923static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4924 void *p)
4925{
4926 int ret_val;
4927
4928 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4929 __igb_notify_dca);
4930
4931 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4932}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004933#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004934
Greg Rose0224d662011-10-14 02:57:14 +00004935#ifdef CONFIG_PCI_IOV
4936static int igb_vf_configure(struct igb_adapter *adapter, int vf)
4937{
4938 unsigned char mac_addr[ETH_ALEN];
4939 struct pci_dev *pdev = adapter->pdev;
4940 struct e1000_hw *hw = &adapter->hw;
4941 struct pci_dev *pvfdev;
4942 unsigned int device_id;
4943 u16 thisvf_devfn;
4944
4945 random_ether_addr(mac_addr);
4946 igb_set_vf_mac(adapter, vf, mac_addr);
4947
4948 switch (adapter->hw.mac.type) {
4949 case e1000_82576:
4950 device_id = IGB_82576_VF_DEV_ID;
4951 /* VF Stride for 82576 is 2 */
4952 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) |
4953 (pdev->devfn & 1);
4954 break;
4955 case e1000_i350:
4956 device_id = IGB_I350_VF_DEV_ID;
4957 /* VF Stride for I350 is 4 */
4958 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) |
4959 (pdev->devfn & 3);
4960 break;
4961 default:
4962 device_id = 0;
4963 thisvf_devfn = 0;
4964 break;
4965 }
4966
4967 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
4968 while (pvfdev) {
4969 if (pvfdev->devfn == thisvf_devfn)
4970 break;
4971 pvfdev = pci_get_device(hw->vendor_id,
4972 device_id, pvfdev);
4973 }
4974
4975 if (pvfdev)
4976 adapter->vf_data[vf].vfdev = pvfdev;
4977 else
4978 dev_err(&pdev->dev,
4979 "Couldn't find pci dev ptr for VF %4.4x\n",
4980 thisvf_devfn);
4981 return pvfdev != NULL;
4982}
4983
4984static int igb_find_enabled_vfs(struct igb_adapter *adapter)
4985{
4986 struct e1000_hw *hw = &adapter->hw;
4987 struct pci_dev *pdev = adapter->pdev;
4988 struct pci_dev *pvfdev;
4989 u16 vf_devfn = 0;
4990 u16 vf_stride;
4991 unsigned int device_id;
4992 int vfs_found = 0;
4993
4994 switch (adapter->hw.mac.type) {
4995 case e1000_82576:
4996 device_id = IGB_82576_VF_DEV_ID;
4997 /* VF Stride for 82576 is 2 */
4998 vf_stride = 2;
4999 break;
5000 case e1000_i350:
5001 device_id = IGB_I350_VF_DEV_ID;
5002 /* VF Stride for I350 is 4 */
5003 vf_stride = 4;
5004 break;
5005 default:
5006 device_id = 0;
5007 vf_stride = 0;
5008 break;
5009 }
5010
5011 vf_devfn = pdev->devfn + 0x80;
5012 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
5013 while (pvfdev) {
5014 if (pvfdev->devfn == vf_devfn)
5015 vfs_found++;
5016 vf_devfn += vf_stride;
5017 pvfdev = pci_get_device(hw->vendor_id,
5018 device_id, pvfdev);
5019 }
5020
5021 return vfs_found;
5022}
5023
5024static int igb_check_vf_assignment(struct igb_adapter *adapter)
5025{
5026 int i;
5027 for (i = 0; i < adapter->vfs_allocated_count; i++) {
5028 if (adapter->vf_data[i].vfdev) {
5029 if (adapter->vf_data[i].vfdev->dev_flags &
5030 PCI_DEV_FLAGS_ASSIGNED)
5031 return true;
5032 }
5033 }
5034 return false;
5035}
5036
5037#endif
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005038static void igb_ping_all_vfs(struct igb_adapter *adapter)
5039{
5040 struct e1000_hw *hw = &adapter->hw;
5041 u32 ping;
5042 int i;
5043
5044 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
5045 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005046 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005047 ping |= E1000_VT_MSGTYPE_CTS;
5048 igb_write_mbx(hw, &ping, 1, i);
5049 }
5050}
5051
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005052static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5053{
5054 struct e1000_hw *hw = &adapter->hw;
5055 u32 vmolr = rd32(E1000_VMOLR(vf));
5056 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5057
Alexander Duyckd85b90042010-09-22 17:56:20 +00005058 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005059 IGB_VF_FLAG_MULTI_PROMISC);
5060 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5061
5062 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
5063 vmolr |= E1000_VMOLR_MPME;
Alexander Duyckd85b90042010-09-22 17:56:20 +00005064 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005065 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
5066 } else {
5067 /*
5068 * if we have hashes and we are clearing a multicast promisc
5069 * flag we need to write the hashes to the MTA as this step
5070 * was previously skipped
5071 */
5072 if (vf_data->num_vf_mc_hashes > 30) {
5073 vmolr |= E1000_VMOLR_MPME;
5074 } else if (vf_data->num_vf_mc_hashes) {
5075 int j;
5076 vmolr |= E1000_VMOLR_ROMPE;
5077 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5078 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5079 }
5080 }
5081
5082 wr32(E1000_VMOLR(vf), vmolr);
5083
5084 /* there are flags left unprocessed, likely not supported */
5085 if (*msgbuf & E1000_VT_MSGINFO_MASK)
5086 return -EINVAL;
5087
5088 return 0;
5089
5090}
5091
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005092static int igb_set_vf_multicasts(struct igb_adapter *adapter,
5093 u32 *msgbuf, u32 vf)
5094{
5095 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5096 u16 *hash_list = (u16 *)&msgbuf[1];
5097 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5098 int i;
5099
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005100 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005101 * to this VF for later use to restore when the PF multi cast
5102 * list changes
5103 */
5104 vf_data->num_vf_mc_hashes = n;
5105
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005106 /* only up to 30 hash values supported */
5107 if (n > 30)
5108 n = 30;
5109
5110 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005111 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07005112 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005113
5114 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005115 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005116
5117 return 0;
5118}
5119
5120static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5121{
5122 struct e1000_hw *hw = &adapter->hw;
5123 struct vf_data_storage *vf_data;
5124 int i, j;
5125
5126 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005127 u32 vmolr = rd32(E1000_VMOLR(i));
5128 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5129
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005130 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005131
5132 if ((vf_data->num_vf_mc_hashes > 30) ||
5133 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5134 vmolr |= E1000_VMOLR_MPME;
5135 } else if (vf_data->num_vf_mc_hashes) {
5136 vmolr |= E1000_VMOLR_ROMPE;
5137 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5138 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5139 }
5140 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005141 }
5142}
5143
5144static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5145{
5146 struct e1000_hw *hw = &adapter->hw;
5147 u32 pool_mask, reg, vid;
5148 int i;
5149
5150 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5151
5152 /* Find the vlan filter for this id */
5153 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5154 reg = rd32(E1000_VLVF(i));
5155
5156 /* remove the vf from the pool */
5157 reg &= ~pool_mask;
5158
5159 /* if pool is empty then remove entry from vfta */
5160 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5161 (reg & E1000_VLVF_VLANID_ENABLE)) {
5162 reg = 0;
5163 vid = reg & E1000_VLVF_VLANID_MASK;
5164 igb_vfta_set(hw, vid, false);
5165 }
5166
5167 wr32(E1000_VLVF(i), reg);
5168 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005169
5170 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005171}
5172
5173static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5174{
5175 struct e1000_hw *hw = &adapter->hw;
5176 u32 reg, i;
5177
Alexander Duyck51466232009-10-27 23:47:35 +00005178 /* The vlvf table only exists on 82576 hardware and newer */
5179 if (hw->mac.type < e1000_82576)
5180 return -1;
5181
5182 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005183 if (!adapter->vfs_allocated_count)
5184 return -1;
5185
5186 /* Find the vlan filter for this id */
5187 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5188 reg = rd32(E1000_VLVF(i));
5189 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5190 vid == (reg & E1000_VLVF_VLANID_MASK))
5191 break;
5192 }
5193
5194 if (add) {
5195 if (i == E1000_VLVF_ARRAY_SIZE) {
5196 /* Did not find a matching VLAN ID entry that was
5197 * enabled. Search for a free filter entry, i.e.
5198 * one without the enable bit set
5199 */
5200 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5201 reg = rd32(E1000_VLVF(i));
5202 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5203 break;
5204 }
5205 }
5206 if (i < E1000_VLVF_ARRAY_SIZE) {
5207 /* Found an enabled/available entry */
5208 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5209
5210 /* if !enabled we need to set this up in vfta */
5211 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00005212 /* add VID to filter table */
5213 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005214 reg |= E1000_VLVF_VLANID_ENABLE;
5215 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00005216 reg &= ~E1000_VLVF_VLANID_MASK;
5217 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005218 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005219
5220 /* do not modify RLPML for PF devices */
5221 if (vf >= adapter->vfs_allocated_count)
5222 return 0;
5223
5224 if (!adapter->vf_data[vf].vlans_enabled) {
5225 u32 size;
5226 reg = rd32(E1000_VMOLR(vf));
5227 size = reg & E1000_VMOLR_RLPML_MASK;
5228 size += 4;
5229 reg &= ~E1000_VMOLR_RLPML_MASK;
5230 reg |= size;
5231 wr32(E1000_VMOLR(vf), reg);
5232 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005233
Alexander Duyck51466232009-10-27 23:47:35 +00005234 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005235 }
5236 } else {
5237 if (i < E1000_VLVF_ARRAY_SIZE) {
5238 /* remove vf from the pool */
5239 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5240 /* if pool is empty then remove entry from vfta */
5241 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5242 reg = 0;
5243 igb_vfta_set(hw, vid, false);
5244 }
5245 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005246
5247 /* do not modify RLPML for PF devices */
5248 if (vf >= adapter->vfs_allocated_count)
5249 return 0;
5250
5251 adapter->vf_data[vf].vlans_enabled--;
5252 if (!adapter->vf_data[vf].vlans_enabled) {
5253 u32 size;
5254 reg = rd32(E1000_VMOLR(vf));
5255 size = reg & E1000_VMOLR_RLPML_MASK;
5256 size -= 4;
5257 reg &= ~E1000_VMOLR_RLPML_MASK;
5258 reg |= size;
5259 wr32(E1000_VMOLR(vf), reg);
5260 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005261 }
5262 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00005263 return 0;
5264}
5265
5266static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5267{
5268 struct e1000_hw *hw = &adapter->hw;
5269
5270 if (vid)
5271 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5272 else
5273 wr32(E1000_VMVIR(vf), 0);
5274}
5275
5276static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5277 int vf, u16 vlan, u8 qos)
5278{
5279 int err = 0;
5280 struct igb_adapter *adapter = netdev_priv(netdev);
5281
5282 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5283 return -EINVAL;
5284 if (vlan || qos) {
5285 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5286 if (err)
5287 goto out;
5288 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5289 igb_set_vmolr(adapter, vf, !vlan);
5290 adapter->vf_data[vf].pf_vlan = vlan;
5291 adapter->vf_data[vf].pf_qos = qos;
5292 dev_info(&adapter->pdev->dev,
5293 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5294 if (test_bit(__IGB_DOWN, &adapter->state)) {
5295 dev_warn(&adapter->pdev->dev,
5296 "The VF VLAN has been set,"
5297 " but the PF device is not up.\n");
5298 dev_warn(&adapter->pdev->dev,
5299 "Bring the PF device up before"
5300 " attempting to use the VF device.\n");
5301 }
5302 } else {
5303 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5304 false, vf);
5305 igb_set_vmvir(adapter, vlan, vf);
5306 igb_set_vmolr(adapter, vf, true);
5307 adapter->vf_data[vf].pf_vlan = 0;
5308 adapter->vf_data[vf].pf_qos = 0;
5309 }
5310out:
5311 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005312}
5313
5314static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5315{
5316 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5317 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5318
5319 return igb_vlvf_set(adapter, vid, add, vf);
5320}
5321
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005322static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005323{
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005324 /* clear flags - except flag that indicates PF has set the MAC */
5325 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005326 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005327
5328 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005329 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005330
5331 /* reset vlans for device */
5332 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005333 if (adapter->vf_data[vf].pf_vlan)
5334 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5335 adapter->vf_data[vf].pf_vlan,
5336 adapter->vf_data[vf].pf_qos);
5337 else
5338 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005339
5340 /* reset multicast table array for vf */
5341 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5342
5343 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005344 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005345}
5346
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005347static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5348{
5349 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5350
5351 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005352 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
5353 random_ether_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005354
5355 /* process remaining reset events */
5356 igb_vf_reset(adapter, vf);
5357}
5358
5359static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005360{
5361 struct e1000_hw *hw = &adapter->hw;
5362 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005363 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005364 u32 reg, msgbuf[3];
5365 u8 *addr = (u8 *)(&msgbuf[1]);
5366
5367 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005368 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005369
5370 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00005371 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005372
5373 /* enable transmit and receive for vf */
5374 reg = rd32(E1000_VFTE);
5375 wr32(E1000_VFTE, reg | (1 << vf));
5376 reg = rd32(E1000_VFRE);
5377 wr32(E1000_VFRE, reg | (1 << vf));
5378
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005379 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005380
5381 /* reply to reset with ack and vf mac address */
5382 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5383 memcpy(addr, vf_mac, 6);
5384 igb_write_mbx(hw, msgbuf, 3, vf);
5385}
5386
5387static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5388{
Greg Rosede42edd2010-07-01 13:39:23 +00005389 /*
5390 * The VF MAC Address is stored in a packed array of bytes
5391 * starting at the second 32 bit word of the msg array
5392 */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005393 unsigned char *addr = (char *)&msg[1];
5394 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005395
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005396 if (is_valid_ether_addr(addr))
5397 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005398
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005399 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005400}
5401
5402static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5403{
5404 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005405 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005406 u32 msg = E1000_VT_MSGTYPE_NACK;
5407
5408 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005409 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5410 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005411 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005412 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005413 }
5414}
5415
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005416static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005417{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005418 struct pci_dev *pdev = adapter->pdev;
5419 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005420 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005421 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005422 s32 retval;
5423
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005424 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005425
Alexander Duyckfef45f42009-12-11 22:57:34 -08005426 if (retval) {
5427 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005428 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08005429 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5430 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5431 return;
5432 goto out;
5433 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005434
5435 /* this is a message we already processed, do nothing */
5436 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005437 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005438
5439 /*
5440 * until the vf completes a reset it should not be
5441 * allowed to start any configuration.
5442 */
5443
5444 if (msgbuf[0] == E1000_VF_RESET) {
5445 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005446 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005447 }
5448
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005449 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08005450 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5451 return;
5452 retval = -1;
5453 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005454 }
5455
5456 switch ((msgbuf[0] & 0xFFFF)) {
5457 case E1000_VF_SET_MAC_ADDR:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005458 retval = -EINVAL;
5459 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5460 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5461 else
5462 dev_warn(&pdev->dev,
5463 "VF %d attempted to override administratively "
5464 "set MAC address\nReload the VF driver to "
5465 "resume operations\n", vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005466 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005467 case E1000_VF_SET_PROMISC:
5468 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5469 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005470 case E1000_VF_SET_MULTICAST:
5471 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5472 break;
5473 case E1000_VF_SET_LPE:
5474 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5475 break;
5476 case E1000_VF_SET_VLAN:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005477 retval = -1;
5478 if (vf_data->pf_vlan)
5479 dev_warn(&pdev->dev,
5480 "VF %d attempted to override administratively "
5481 "set VLAN tag\nReload the VF driver to "
5482 "resume operations\n", vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005483 else
5484 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005485 break;
5486 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005487 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005488 retval = -1;
5489 break;
5490 }
5491
Alexander Duyckfef45f42009-12-11 22:57:34 -08005492 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5493out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005494 /* notify the VF of the results of what it sent us */
5495 if (retval)
5496 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5497 else
5498 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5499
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005500 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005501}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005502
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005503static void igb_msg_task(struct igb_adapter *adapter)
5504{
5505 struct e1000_hw *hw = &adapter->hw;
5506 u32 vf;
5507
5508 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5509 /* process any reset requests */
5510 if (!igb_check_for_rst(hw, vf))
5511 igb_vf_reset_event(adapter, vf);
5512
5513 /* process any messages pending */
5514 if (!igb_check_for_msg(hw, vf))
5515 igb_rcv_msg_from_vf(adapter, vf);
5516
5517 /* process any acks */
5518 if (!igb_check_for_ack(hw, vf))
5519 igb_rcv_ack_from_vf(adapter, vf);
5520 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005521}
5522
Auke Kok9d5c8242008-01-24 02:22:38 -08005523/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00005524 * igb_set_uta - Set unicast filter table address
5525 * @adapter: board private structure
5526 *
5527 * The unicast table address is a register array of 32-bit registers.
5528 * The table is meant to be used in a way similar to how the MTA is used
5529 * however due to certain limitations in the hardware it is necessary to
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005530 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5531 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
Alexander Duyck68d480c2009-10-05 06:33:08 +00005532 **/
5533static void igb_set_uta(struct igb_adapter *adapter)
5534{
5535 struct e1000_hw *hw = &adapter->hw;
5536 int i;
5537
5538 /* The UTA table only exists on 82576 hardware and newer */
5539 if (hw->mac.type < e1000_82576)
5540 return;
5541
5542 /* we only need to do this if VMDq is enabled */
5543 if (!adapter->vfs_allocated_count)
5544 return;
5545
5546 for (i = 0; i < hw->mac.uta_reg_count; i++)
5547 array_wr32(E1000_UTA, i, ~0);
5548}
5549
5550/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005551 * igb_intr_msi - Interrupt Handler
5552 * @irq: interrupt number
5553 * @data: pointer to a network interface device structure
5554 **/
5555static irqreturn_t igb_intr_msi(int irq, void *data)
5556{
Alexander Duyck047e0032009-10-27 15:49:27 +00005557 struct igb_adapter *adapter = data;
5558 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005559 struct e1000_hw *hw = &adapter->hw;
5560 /* read ICR disables interrupts using IAM */
5561 u32 icr = rd32(E1000_ICR);
5562
Alexander Duyck047e0032009-10-27 15:49:27 +00005563 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005564
Alexander Duyck7f081d42010-01-07 17:41:00 +00005565 if (icr & E1000_ICR_DRSTA)
5566 schedule_work(&adapter->reset_task);
5567
Alexander Duyck047e0032009-10-27 15:49:27 +00005568 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005569 /* HW is reporting DMA is out of sync */
5570 adapter->stats.doosync++;
5571 }
5572
Auke Kok9d5c8242008-01-24 02:22:38 -08005573 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5574 hw->mac.get_link_status = 1;
5575 if (!test_bit(__IGB_DOWN, &adapter->state))
5576 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5577 }
5578
Alexander Duyck047e0032009-10-27 15:49:27 +00005579 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005580
5581 return IRQ_HANDLED;
5582}
5583
5584/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00005585 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08005586 * @irq: interrupt number
5587 * @data: pointer to a network interface device structure
5588 **/
5589static irqreturn_t igb_intr(int irq, void *data)
5590{
Alexander Duyck047e0032009-10-27 15:49:27 +00005591 struct igb_adapter *adapter = data;
5592 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005593 struct e1000_hw *hw = &adapter->hw;
5594 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5595 * need for the IMC write */
5596 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08005597
5598 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5599 * not set, then the adapter didn't send an interrupt */
5600 if (!(icr & E1000_ICR_INT_ASSERTED))
5601 return IRQ_NONE;
5602
Alexander Duyck0ba82992011-08-26 07:45:47 +00005603 igb_write_itr(q_vector);
5604
Alexander Duyck7f081d42010-01-07 17:41:00 +00005605 if (icr & E1000_ICR_DRSTA)
5606 schedule_work(&adapter->reset_task);
5607
Alexander Duyck047e0032009-10-27 15:49:27 +00005608 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005609 /* HW is reporting DMA is out of sync */
5610 adapter->stats.doosync++;
5611 }
5612
Auke Kok9d5c8242008-01-24 02:22:38 -08005613 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5614 hw->mac.get_link_status = 1;
5615 /* guard against interrupt when we're going down */
5616 if (!test_bit(__IGB_DOWN, &adapter->state))
5617 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5618 }
5619
Alexander Duyck047e0032009-10-27 15:49:27 +00005620 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005621
5622 return IRQ_HANDLED;
5623}
5624
Alexander Duyck0ba82992011-08-26 07:45:47 +00005625void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08005626{
Alexander Duyck047e0032009-10-27 15:49:27 +00005627 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08005628 struct e1000_hw *hw = &adapter->hw;
5629
Alexander Duyck0ba82992011-08-26 07:45:47 +00005630 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
5631 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
5632 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
5633 igb_set_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005634 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005635 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005636 }
5637
5638 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5639 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00005640 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08005641 else
5642 igb_irq_enable(adapter);
5643 }
5644}
5645
Auke Kok9d5c8242008-01-24 02:22:38 -08005646/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005647 * igb_poll - NAPI Rx polling callback
5648 * @napi: napi polling structure
5649 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08005650 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005651static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005652{
Alexander Duyck047e0032009-10-27 15:49:27 +00005653 struct igb_q_vector *q_vector = container_of(napi,
5654 struct igb_q_vector,
5655 napi);
Alexander Duyck16eb8812011-08-26 07:43:54 +00005656 bool clean_complete = true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005657
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005658#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00005659 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5660 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005661#endif
Alexander Duyck0ba82992011-08-26 07:45:47 +00005662 if (q_vector->tx.ring)
Alexander Duyck13fde972011-10-05 13:35:24 +00005663 clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005664
Alexander Duyck0ba82992011-08-26 07:45:47 +00005665 if (q_vector->rx.ring)
Alexander Duyckcd392f52011-08-26 07:43:59 +00005666 clean_complete &= igb_clean_rx_irq(q_vector, budget);
Alexander Duyck047e0032009-10-27 15:49:27 +00005667
Alexander Duyck16eb8812011-08-26 07:43:54 +00005668 /* If all work not completed, return budget and keep polling */
5669 if (!clean_complete)
5670 return budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005671
Alexander Duyck46544252009-02-19 20:39:04 -08005672 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck16eb8812011-08-26 07:43:54 +00005673 napi_complete(napi);
5674 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005675
Alexander Duyck16eb8812011-08-26 07:43:54 +00005676 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005677}
Al Viro6d8126f2008-03-16 22:23:24 +00005678
Auke Kok9d5c8242008-01-24 02:22:38 -08005679/**
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005680 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005681 * @adapter: board private structure
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005682 * @shhwtstamps: timestamp structure to update
5683 * @regval: unsigned 64bit system time value.
5684 *
5685 * We need to convert the system time value stored in the RX/TXSTMP registers
5686 * into a hwtstamp which can be used by the upper level timestamping functions
5687 */
5688static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
5689 struct skb_shared_hwtstamps *shhwtstamps,
5690 u64 regval)
5691{
5692 u64 ns;
5693
Alexander Duyck55cac242009-11-19 12:42:21 +00005694 /*
5695 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
5696 * 24 to match clock shift we setup earlier.
5697 */
Alexander Duyck06218a82011-08-26 07:46:55 +00005698 if (adapter->hw.mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00005699 regval <<= IGB_82580_TSYNC_SHIFT;
5700
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005701 ns = timecounter_cyc2time(&adapter->clock, regval);
5702 timecompare_update(&adapter->compare, ns);
5703 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
5704 shhwtstamps->hwtstamp = ns_to_ktime(ns);
5705 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
5706}
5707
5708/**
5709 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5710 * @q_vector: pointer to q_vector containing needed info
Alexander Duyck06034642011-08-26 07:44:22 +00005711 * @buffer: pointer to igb_tx_buffer structure
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005712 *
5713 * If we were asked to do hardware stamping and such a time stamp is
5714 * available, then it must have been for this skb here because we only
5715 * allow only one such packet into the queue.
5716 */
Alexander Duyck06034642011-08-26 07:44:22 +00005717static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5718 struct igb_tx_buffer *buffer_info)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005719{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005720 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005721 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005722 struct skb_shared_hwtstamps shhwtstamps;
5723 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005724
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005725 /* if skb does not support hw timestamp or TX stamp not valid exit */
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00005726 if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005727 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5728 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005729
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005730 regval = rd32(E1000_TXSTMPL);
5731 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
5732
5733 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
Nick Nunley28739572010-05-04 21:58:07 +00005734 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005735}
5736
5737/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005738 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00005739 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08005740 * returns true if ring is completely cleaned
5741 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005742static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005743{
Alexander Duyck047e0032009-10-27 15:49:27 +00005744 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005745 struct igb_ring *tx_ring = q_vector->tx.ring;
Alexander Duyck06034642011-08-26 07:44:22 +00005746 struct igb_tx_buffer *tx_buffer;
Alexander Duyck8542db02011-08-26 07:44:43 +00005747 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005748 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005749 unsigned int budget = q_vector->tx.work_limit;
Alexander Duyck8542db02011-08-26 07:44:43 +00005750 unsigned int i = tx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005751
Alexander Duyck13fde972011-10-05 13:35:24 +00005752 if (test_bit(__IGB_DOWN, &adapter->state))
5753 return true;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005754
Alexander Duyck06034642011-08-26 07:44:22 +00005755 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyck13fde972011-10-05 13:35:24 +00005756 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck8542db02011-08-26 07:44:43 +00005757 i -= tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005758
Alexander Duyck13fde972011-10-05 13:35:24 +00005759 for (; budget; budget--) {
Alexander Duyck8542db02011-08-26 07:44:43 +00005760 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005761
Alexander Duyck8542db02011-08-26 07:44:43 +00005762 /* prevent any other reads prior to eop_desc */
5763 rmb();
5764
5765 /* if next_to_watch is not set then there is no work pending */
5766 if (!eop_desc)
5767 break;
Alexander Duyck13fde972011-10-05 13:35:24 +00005768
5769 /* if DD is not set pending work has not been completed */
5770 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5771 break;
5772
Alexander Duyck8542db02011-08-26 07:44:43 +00005773 /* clear next_to_watch to prevent false hangs */
5774 tx_buffer->next_to_watch = NULL;
Alexander Duyck13fde972011-10-05 13:35:24 +00005775
Alexander Duyckebe42d12011-08-26 07:45:09 +00005776 /* update the statistics for this packet */
5777 total_bytes += tx_buffer->bytecount;
5778 total_packets += tx_buffer->gso_segs;
Alexander Duyck13fde972011-10-05 13:35:24 +00005779
Alexander Duyckebe42d12011-08-26 07:45:09 +00005780 /* retrieve hardware timestamp */
5781 igb_tx_hwtstamp(q_vector, tx_buffer);
Auke Kok9d5c8242008-01-24 02:22:38 -08005782
Alexander Duyckebe42d12011-08-26 07:45:09 +00005783 /* free the skb */
5784 dev_kfree_skb_any(tx_buffer->skb);
5785 tx_buffer->skb = NULL;
5786
5787 /* unmap skb header data */
5788 dma_unmap_single(tx_ring->dev,
5789 tx_buffer->dma,
5790 tx_buffer->length,
5791 DMA_TO_DEVICE);
5792
5793 /* clear last DMA location and unmap remaining buffers */
5794 while (tx_desc != eop_desc) {
5795 tx_buffer->dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005796
Alexander Duyck13fde972011-10-05 13:35:24 +00005797 tx_buffer++;
5798 tx_desc++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005799 i++;
Alexander Duyck8542db02011-08-26 07:44:43 +00005800 if (unlikely(!i)) {
5801 i -= tx_ring->count;
Alexander Duyck06034642011-08-26 07:44:22 +00005802 tx_buffer = tx_ring->tx_buffer_info;
Alexander Duyck13fde972011-10-05 13:35:24 +00005803 tx_desc = IGB_TX_DESC(tx_ring, 0);
5804 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00005805
5806 /* unmap any remaining paged data */
5807 if (tx_buffer->dma) {
5808 dma_unmap_page(tx_ring->dev,
5809 tx_buffer->dma,
5810 tx_buffer->length,
5811 DMA_TO_DEVICE);
5812 }
5813 }
5814
5815 /* clear last DMA location */
5816 tx_buffer->dma = 0;
5817
5818 /* move us one more past the eop_desc for start of next pkt */
5819 tx_buffer++;
5820 tx_desc++;
5821 i++;
5822 if (unlikely(!i)) {
5823 i -= tx_ring->count;
5824 tx_buffer = tx_ring->tx_buffer_info;
5825 tx_desc = IGB_TX_DESC(tx_ring, 0);
5826 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005827 }
5828
Eric Dumazetbdbc0632012-01-04 20:23:36 +00005829 netdev_tx_completed_queue(txring_txq(tx_ring),
5830 total_packets, total_bytes);
Alexander Duyck8542db02011-08-26 07:44:43 +00005831 i += tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005832 tx_ring->next_to_clean = i;
Alexander Duyck13fde972011-10-05 13:35:24 +00005833 u64_stats_update_begin(&tx_ring->tx_syncp);
5834 tx_ring->tx_stats.bytes += total_bytes;
5835 tx_ring->tx_stats.packets += total_packets;
5836 u64_stats_update_end(&tx_ring->tx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00005837 q_vector->tx.total_bytes += total_bytes;
5838 q_vector->tx.total_packets += total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08005839
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005840 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
Alexander Duyck13fde972011-10-05 13:35:24 +00005841 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck13fde972011-10-05 13:35:24 +00005842
Alexander Duyck8542db02011-08-26 07:44:43 +00005843 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005844
Auke Kok9d5c8242008-01-24 02:22:38 -08005845 /* Detect a transmit hang in hardware, this serializes the
5846 * check with the clearing of time_stamp and movement of i */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005847 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyck8542db02011-08-26 07:44:43 +00005848 if (eop_desc &&
5849 time_after(jiffies, tx_buffer->time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005850 (adapter->tx_timeout_factor * HZ)) &&
5851 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005852
Auke Kok9d5c8242008-01-24 02:22:38 -08005853 /* detected Tx unit hang */
Alexander Duyck59d71982010-04-27 13:09:25 +00005854 dev_err(tx_ring->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005855 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005856 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005857 " TDH <%x>\n"
5858 " TDT <%x>\n"
5859 " next_to_use <%x>\n"
5860 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005861 "buffer_info[next_to_clean]\n"
5862 " time_stamp <%lx>\n"
Alexander Duyck8542db02011-08-26 07:44:43 +00005863 " next_to_watch <%p>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005864 " jiffies <%lx>\n"
5865 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005866 tx_ring->queue_index,
Alexander Duyck238ac812011-08-26 07:43:48 +00005867 rd32(E1000_TDH(tx_ring->reg_idx)),
Alexander Duyckfce99e32009-10-27 15:51:27 +00005868 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005869 tx_ring->next_to_use,
5870 tx_ring->next_to_clean,
Alexander Duyck8542db02011-08-26 07:44:43 +00005871 tx_buffer->time_stamp,
5872 eop_desc,
Auke Kok9d5c8242008-01-24 02:22:38 -08005873 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005874 eop_desc->wb.status);
Alexander Duyck13fde972011-10-05 13:35:24 +00005875 netif_stop_subqueue(tx_ring->netdev,
5876 tx_ring->queue_index);
5877
5878 /* we are about to reset, no point in enabling stuff */
5879 return true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005880 }
5881 }
Alexander Duyck13fde972011-10-05 13:35:24 +00005882
5883 if (unlikely(total_packets &&
5884 netif_carrier_ok(tx_ring->netdev) &&
5885 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5886 /* Make sure that anybody stopping the queue after this
5887 * sees the new next_to_clean.
5888 */
5889 smp_mb();
5890 if (__netif_subqueue_stopped(tx_ring->netdev,
5891 tx_ring->queue_index) &&
5892 !(test_bit(__IGB_DOWN, &adapter->state))) {
5893 netif_wake_subqueue(tx_ring->netdev,
5894 tx_ring->queue_index);
5895
5896 u64_stats_update_begin(&tx_ring->tx_syncp);
5897 tx_ring->tx_stats.restart_queue++;
5898 u64_stats_update_end(&tx_ring->tx_syncp);
5899 }
5900 }
5901
5902 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005903}
5904
Alexander Duyckcd392f52011-08-26 07:43:59 +00005905static inline void igb_rx_checksum(struct igb_ring *ring,
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005906 union e1000_adv_rx_desc *rx_desc,
5907 struct sk_buff *skb)
Auke Kok9d5c8242008-01-24 02:22:38 -08005908{
Eric Dumazetbc8acf22010-09-02 13:07:41 -07005909 skb_checksum_none_assert(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005910
Alexander Duyck294e7d72011-08-26 07:45:57 +00005911 /* Ignore Checksum bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005912 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
Alexander Duyck294e7d72011-08-26 07:45:57 +00005913 return;
5914
5915 /* Rx checksum disabled via ethtool */
5916 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005917 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005918
Auke Kok9d5c8242008-01-24 02:22:38 -08005919 /* TCP/UDP checksum error bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005920 if (igb_test_staterr(rx_desc,
5921 E1000_RXDEXT_STATERR_TCPE |
5922 E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005923 /*
5924 * work around errata with sctp packets where the TCPE aka
5925 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5926 * packets, (aka let the stack check the crc32c)
5927 */
Alexander Duyck866cff02011-08-26 07:45:36 +00005928 if (!((skb->len == 60) &&
5929 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
Eric Dumazet12dcd862010-10-15 17:27:10 +00005930 u64_stats_update_begin(&ring->rx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005931 ring->rx_stats.csum_err++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005932 u64_stats_update_end(&ring->rx_syncp);
5933 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005934 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005935 return;
5936 }
5937 /* It must be a TCP or UDP packet with a valid checksum */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005938 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
5939 E1000_RXD_STAT_UDPCS))
Auke Kok9d5c8242008-01-24 02:22:38 -08005940 skb->ip_summed = CHECKSUM_UNNECESSARY;
5941
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005942 dev_dbg(ring->dev, "cksum success: bits %08X\n",
5943 le32_to_cpu(rx_desc->wb.upper.status_error));
Auke Kok9d5c8242008-01-24 02:22:38 -08005944}
5945
Alexander Duyck077887c2011-08-26 07:46:29 +00005946static inline void igb_rx_hash(struct igb_ring *ring,
5947 union e1000_adv_rx_desc *rx_desc,
5948 struct sk_buff *skb)
5949{
5950 if (ring->netdev->features & NETIF_F_RXHASH)
5951 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5952}
5953
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005954static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
5955 union e1000_adv_rx_desc *rx_desc,
5956 struct sk_buff *skb)
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005957{
5958 struct igb_adapter *adapter = q_vector->adapter;
5959 struct e1000_hw *hw = &adapter->hw;
5960 u64 regval;
5961
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005962 if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
5963 E1000_RXDADV_STAT_TS))
5964 return;
5965
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005966 /*
5967 * If this bit is set, then the RX registers contain the time stamp. No
5968 * other packet will be time stamped until we read these registers, so
5969 * read the registers to make them available again. Because only one
5970 * packet can be time stamped at a time, we know that the register
5971 * values must belong to this one here and therefore we don't need to
5972 * compare any of the additional attributes stored for it.
5973 *
Oliver Hartkopp2244d072010-08-17 08:59:14 +00005974 * If nothing went wrong, then it should have a shared tx_flags that we
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005975 * can turn into a skb_shared_hwtstamps.
5976 */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005977 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
Nick Nunley757b77e2010-03-26 11:36:47 +00005978 u32 *stamp = (u32 *)skb->data;
5979 regval = le32_to_cpu(*(stamp + 2));
5980 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5981 skb_pull(skb, IGB_TS_HDR_LEN);
5982 } else {
5983 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5984 return;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005985
Nick Nunley757b77e2010-03-26 11:36:47 +00005986 regval = rd32(E1000_RXSTMPL);
5987 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5988 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005989
5990 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5991}
Alexander Duyck8be10e92011-08-26 07:47:11 +00005992
5993static void igb_rx_vlan(struct igb_ring *ring,
5994 union e1000_adv_rx_desc *rx_desc,
5995 struct sk_buff *skb)
5996{
5997 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
5998 u16 vid;
5999 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
6000 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
6001 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
6002 else
6003 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
6004
6005 __vlan_hwaccel_put_tag(skb, vid);
6006 }
6007}
6008
Alexander Duyck44390ca2011-08-26 07:43:38 +00006009static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00006010{
6011 /* HW will not DMA in data larger than the given buffer, even if it
6012 * parses the (NFS, of course) header to be larger. In that case, it
6013 * fills the header buffer and spills the rest into the page.
6014 */
6015 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
6016 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck44390ca2011-08-26 07:43:38 +00006017 if (hlen > IGB_RX_HDR_LEN)
6018 hlen = IGB_RX_HDR_LEN;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00006019 return hlen;
6020}
6021
Alexander Duyckcd392f52011-08-26 07:43:59 +00006022static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08006023{
Alexander Duyck0ba82992011-08-26 07:45:47 +00006024 struct igb_ring *rx_ring = q_vector->rx.ring;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006025 union e1000_adv_rx_desc *rx_desc;
6026 const int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08006027 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006028 u16 cleaned_count = igb_desc_unused(rx_ring);
6029 u16 i = rx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08006030
Alexander Duyck601369062011-08-26 07:44:05 +00006031 rx_desc = IGB_RX_DESC(rx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08006032
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006033 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
Alexander Duyck06034642011-08-26 07:44:22 +00006034 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck16eb8812011-08-26 07:43:54 +00006035 struct sk_buff *skb = buffer_info->skb;
6036 union e1000_adv_rx_desc *next_rxd;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006037
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006038 buffer_info->skb = NULL;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006039 prefetch(skb->data);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006040
6041 i++;
6042 if (i == rx_ring->count)
6043 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00006044
Alexander Duyck601369062011-08-26 07:44:05 +00006045 next_rxd = IGB_RX_DESC(rx_ring, i);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006046 prefetch(next_rxd);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006047
Alexander Duyck16eb8812011-08-26 07:43:54 +00006048 /*
6049 * This memory barrier is needed to keep us from reading
6050 * any other fields out of the rx_desc until we know the
6051 * RXD_STAT_DD bit is set
6052 */
6053 rmb();
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006054
Alexander Duyck16eb8812011-08-26 07:43:54 +00006055 if (!skb_is_nonlinear(skb)) {
6056 __skb_put(skb, igb_get_hlen(rx_desc));
6057 dma_unmap_single(rx_ring->dev, buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00006058 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00006059 DMA_FROM_DEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00006060 buffer_info->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006061 }
6062
Alexander Duyck16eb8812011-08-26 07:43:54 +00006063 if (rx_desc->wb.upper.length) {
6064 u16 length = le16_to_cpu(rx_desc->wb.upper.length);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006065
Koki Sanagiaa913402010-04-27 01:01:19 +00006066 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006067 buffer_info->page,
6068 buffer_info->page_offset,
6069 length);
6070
Alexander Duyck16eb8812011-08-26 07:43:54 +00006071 skb->len += length;
6072 skb->data_len += length;
Eric Dumazet95b9c1d2011-10-13 07:56:41 +00006073 skb->truesize += PAGE_SIZE / 2;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006074
Alexander Duyckd1eff352009-11-12 18:38:35 +00006075 if ((page_count(buffer_info->page) != 1) ||
6076 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006077 buffer_info->page = NULL;
6078 else
6079 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08006080
Alexander Duyck16eb8812011-08-26 07:43:54 +00006081 dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
6082 PAGE_SIZE / 2, DMA_FROM_DEVICE);
6083 buffer_info->page_dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006084 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006085
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006086 if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
Alexander Duyck06034642011-08-26 07:44:22 +00006087 struct igb_rx_buffer *next_buffer;
6088 next_buffer = &rx_ring->rx_buffer_info[i];
Alexander Duyckb2d56532008-11-20 00:47:34 -08006089 buffer_info->skb = next_buffer->skb;
6090 buffer_info->dma = next_buffer->dma;
6091 next_buffer->skb = skb;
6092 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006093 goto next_desc;
6094 }
Alexander Duyck44390ca2011-08-26 07:43:38 +00006095
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006096 if (igb_test_staterr(rx_desc,
6097 E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
Alexander Duyck16eb8812011-08-26 07:43:54 +00006098 dev_kfree_skb_any(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006099 goto next_desc;
6100 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006101
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006102 igb_rx_hwtstamp(q_vector, rx_desc, skb);
Alexander Duyck077887c2011-08-26 07:46:29 +00006103 igb_rx_hash(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006104 igb_rx_checksum(rx_ring, rx_desc, skb);
Alexander Duyck8be10e92011-08-26 07:47:11 +00006105 igb_rx_vlan(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006106
6107 total_bytes += skb->len;
6108 total_packets++;
6109
6110 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
6111
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006112 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006113
Alexander Duyck16eb8812011-08-26 07:43:54 +00006114 budget--;
Auke Kok9d5c8242008-01-24 02:22:38 -08006115next_desc:
Alexander Duyck16eb8812011-08-26 07:43:54 +00006116 if (!budget)
6117 break;
6118
6119 cleaned_count++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006120 /* return some buffers to hardware, one at a time is too slow */
6121 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Alexander Duyckcd392f52011-08-26 07:43:59 +00006122 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08006123 cleaned_count = 0;
6124 }
6125
6126 /* use prefetched values */
6127 rx_desc = next_rxd;
Auke Kok9d5c8242008-01-24 02:22:38 -08006128 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006129
Auke Kok9d5c8242008-01-24 02:22:38 -08006130 rx_ring->next_to_clean = i;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006131 u64_stats_update_begin(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08006132 rx_ring->rx_stats.packets += total_packets;
6133 rx_ring->rx_stats.bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006134 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00006135 q_vector->rx.total_packets += total_packets;
6136 q_vector->rx.total_bytes += total_bytes;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006137
6138 if (cleaned_count)
Alexander Duyckcd392f52011-08-26 07:43:59 +00006139 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006140
Alexander Duyck16eb8812011-08-26 07:43:54 +00006141 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08006142}
6143
Alexander Duyckc023cd82011-08-26 07:43:43 +00006144static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006145 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006146{
6147 struct sk_buff *skb = bi->skb;
6148 dma_addr_t dma = bi->dma;
6149
6150 if (dma)
6151 return true;
6152
6153 if (likely(!skb)) {
6154 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6155 IGB_RX_HDR_LEN);
6156 bi->skb = skb;
6157 if (!skb) {
6158 rx_ring->rx_stats.alloc_failed++;
6159 return false;
6160 }
6161
6162 /* initialize skb for ring */
6163 skb_record_rx_queue(skb, rx_ring->queue_index);
6164 }
6165
6166 dma = dma_map_single(rx_ring->dev, skb->data,
6167 IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
6168
6169 if (dma_mapping_error(rx_ring->dev, dma)) {
6170 rx_ring->rx_stats.alloc_failed++;
6171 return false;
6172 }
6173
6174 bi->dma = dma;
6175 return true;
6176}
6177
6178static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006179 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006180{
6181 struct page *page = bi->page;
6182 dma_addr_t page_dma = bi->page_dma;
6183 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
6184
6185 if (page_dma)
6186 return true;
6187
6188 if (!page) {
Eric Dumazet1f2149c2011-11-22 10:57:41 +00006189 page = alloc_page(GFP_ATOMIC | __GFP_COLD);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006190 bi->page = page;
6191 if (unlikely(!page)) {
6192 rx_ring->rx_stats.alloc_failed++;
6193 return false;
6194 }
6195 }
6196
6197 page_dma = dma_map_page(rx_ring->dev, page,
6198 page_offset, PAGE_SIZE / 2,
6199 DMA_FROM_DEVICE);
6200
6201 if (dma_mapping_error(rx_ring->dev, page_dma)) {
6202 rx_ring->rx_stats.alloc_failed++;
6203 return false;
6204 }
6205
6206 bi->page_dma = page_dma;
6207 bi->page_offset = page_offset;
6208 return true;
6209}
6210
Auke Kok9d5c8242008-01-24 02:22:38 -08006211/**
Alexander Duyckcd392f52011-08-26 07:43:59 +00006212 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
Auke Kok9d5c8242008-01-24 02:22:38 -08006213 * @adapter: address of board private structure
6214 **/
Alexander Duyckcd392f52011-08-26 07:43:59 +00006215void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08006216{
Auke Kok9d5c8242008-01-24 02:22:38 -08006217 union e1000_adv_rx_desc *rx_desc;
Alexander Duyck06034642011-08-26 07:44:22 +00006218 struct igb_rx_buffer *bi;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006219 u16 i = rx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08006220
Alexander Duyck601369062011-08-26 07:44:05 +00006221 rx_desc = IGB_RX_DESC(rx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +00006222 bi = &rx_ring->rx_buffer_info[i];
Alexander Duyckc023cd82011-08-26 07:43:43 +00006223 i -= rx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006224
6225 while (cleaned_count--) {
Alexander Duyckc023cd82011-08-26 07:43:43 +00006226 if (!igb_alloc_mapped_skb(rx_ring, bi))
6227 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006228
Alexander Duyckc023cd82011-08-26 07:43:43 +00006229 /* Refresh the desc even if buffer_addrs didn't change
6230 * because each write-back erases this info. */
6231 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006232
Alexander Duyckc023cd82011-08-26 07:43:43 +00006233 if (!igb_alloc_mapped_page(rx_ring, bi))
6234 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006235
Alexander Duyckc023cd82011-08-26 07:43:43 +00006236 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006237
Alexander Duyckc023cd82011-08-26 07:43:43 +00006238 rx_desc++;
6239 bi++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006240 i++;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006241 if (unlikely(!i)) {
Alexander Duyck601369062011-08-26 07:44:05 +00006242 rx_desc = IGB_RX_DESC(rx_ring, 0);
Alexander Duyck06034642011-08-26 07:44:22 +00006243 bi = rx_ring->rx_buffer_info;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006244 i -= rx_ring->count;
6245 }
6246
6247 /* clear the hdr_addr for the next_to_use descriptor */
6248 rx_desc->read.hdr_addr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006249 }
6250
Alexander Duyckc023cd82011-08-26 07:43:43 +00006251 i += rx_ring->count;
6252
Auke Kok9d5c8242008-01-24 02:22:38 -08006253 if (rx_ring->next_to_use != i) {
6254 rx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006255
6256 /* Force memory writes to complete before letting h/w
6257 * know there are new descriptors to fetch. (Only
6258 * applicable for weak-ordered memory model archs,
6259 * such as IA-64). */
6260 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00006261 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08006262 }
6263}
6264
6265/**
6266 * igb_mii_ioctl -
6267 * @netdev:
6268 * @ifreq:
6269 * @cmd:
6270 **/
6271static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6272{
6273 struct igb_adapter *adapter = netdev_priv(netdev);
6274 struct mii_ioctl_data *data = if_mii(ifr);
6275
6276 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6277 return -EOPNOTSUPP;
6278
6279 switch (cmd) {
6280 case SIOCGMIIPHY:
6281 data->phy_id = adapter->hw.phy.addr;
6282 break;
6283 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08006284 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6285 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08006286 return -EIO;
6287 break;
6288 case SIOCSMIIREG:
6289 default:
6290 return -EOPNOTSUPP;
6291 }
6292 return 0;
6293}
6294
6295/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006296 * igb_hwtstamp_ioctl - control hardware time stamping
6297 * @netdev:
6298 * @ifreq:
6299 * @cmd:
6300 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006301 * Outgoing time stamping can be enabled and disabled. Play nice and
6302 * disable it when requested, although it shouldn't case any overhead
6303 * when no packet needs it. At most one packet in the queue may be
6304 * marked for time stamping, otherwise it would be impossible to tell
6305 * for sure to which packet the hardware time stamp belongs.
6306 *
6307 * Incoming time stamping has to be configured via the hardware
6308 * filters. Not all combinations are supported, in particular event
6309 * type has to be specified. Matching the kind of event packet is
6310 * not supported, with the exception of "all V2 events regardless of
6311 * level 2 or 4".
6312 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006313 **/
6314static int igb_hwtstamp_ioctl(struct net_device *netdev,
6315 struct ifreq *ifr, int cmd)
6316{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006317 struct igb_adapter *adapter = netdev_priv(netdev);
6318 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006319 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006320 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
6321 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006322 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006323 bool is_l4 = false;
6324 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006325 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006326
6327 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
6328 return -EFAULT;
6329
6330 /* reserved for future extensions */
6331 if (config.flags)
6332 return -EINVAL;
6333
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006334 switch (config.tx_type) {
6335 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006336 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006337 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006338 break;
6339 default:
6340 return -ERANGE;
6341 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006342
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006343 switch (config.rx_filter) {
6344 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006345 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006346 break;
6347 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6348 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6349 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6350 case HWTSTAMP_FILTER_ALL:
6351 /*
6352 * register TSYNCRXCFG must be set, therefore it is not
6353 * possible to time stamp both Sync and Delay_Req messages
6354 * => fall back to time stamping all packets
6355 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006356 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006357 config.rx_filter = HWTSTAMP_FILTER_ALL;
6358 break;
6359 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006360 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006361 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006362 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006363 break;
6364 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006365 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006366 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006367 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006368 break;
6369 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6370 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006371 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006372 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006373 is_l2 = true;
6374 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006375 config.rx_filter = HWTSTAMP_FILTER_SOME;
6376 break;
6377 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6378 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006379 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006380 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006381 is_l2 = true;
6382 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006383 config.rx_filter = HWTSTAMP_FILTER_SOME;
6384 break;
6385 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6386 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6387 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006388 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006389 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006390 is_l2 = true;
Jacob Keller11ba69e2011-10-12 00:51:54 +00006391 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006392 break;
6393 default:
6394 return -ERANGE;
6395 }
6396
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006397 if (hw->mac.type == e1000_82575) {
6398 if (tsync_rx_ctl | tsync_tx_ctl)
6399 return -EINVAL;
6400 return 0;
6401 }
6402
Nick Nunley757b77e2010-03-26 11:36:47 +00006403 /*
6404 * Per-packet timestamping only works if all packets are
6405 * timestamped, so enable timestamping in all packets as
6406 * long as one rx filter was configured.
6407 */
Alexander Duyck06218a82011-08-26 07:46:55 +00006408 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
Nick Nunley757b77e2010-03-26 11:36:47 +00006409 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6410 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6411 }
6412
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006413 /* enable/disable TX */
6414 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006415 regval &= ~E1000_TSYNCTXCTL_ENABLED;
6416 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006417 wr32(E1000_TSYNCTXCTL, regval);
6418
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006419 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006420 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006421 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
6422 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006423 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006424
6425 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006426 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
6427
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006428 /* define ethertype filter for timestamped packets */
6429 if (is_l2)
6430 wr32(E1000_ETQF(3),
6431 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
6432 E1000_ETQF_1588 | /* enable timestamping */
6433 ETH_P_1588)); /* 1588 eth protocol type */
6434 else
6435 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006436
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006437#define PTP_PORT 319
6438 /* L4 Queue Filter[3]: filter by destination port and protocol */
6439 if (is_l4) {
6440 u32 ftqf = (IPPROTO_UDP /* UDP */
6441 | E1000_FTQF_VF_BP /* VF not compared */
6442 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
6443 | E1000_FTQF_MASK); /* mask all inputs */
6444 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006445
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006446 wr32(E1000_IMIR(3), htons(PTP_PORT));
6447 wr32(E1000_IMIREXT(3),
6448 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
6449 if (hw->mac.type == e1000_82576) {
6450 /* enable source port check */
6451 wr32(E1000_SPQF(3), htons(PTP_PORT));
6452 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
6453 }
6454 wr32(E1000_FTQF(3), ftqf);
6455 } else {
6456 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
6457 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006458 wrfl();
6459
6460 adapter->hwtstamp_config = config;
6461
6462 /* clear TX/RX time stamp registers, just to be sure */
6463 regval = rd32(E1000_TXSTMPH);
6464 regval = rd32(E1000_RXSTMPH);
6465
6466 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
6467 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006468}
6469
6470/**
Auke Kok9d5c8242008-01-24 02:22:38 -08006471 * igb_ioctl -
6472 * @netdev:
6473 * @ifreq:
6474 * @cmd:
6475 **/
6476static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6477{
6478 switch (cmd) {
6479 case SIOCGMIIPHY:
6480 case SIOCGMIIREG:
6481 case SIOCSMIIREG:
6482 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006483 case SIOCSHWTSTAMP:
6484 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08006485 default:
6486 return -EOPNOTSUPP;
6487 }
6488}
6489
Alexander Duyck009bc062009-07-23 18:08:35 +00006490s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6491{
6492 struct igb_adapter *adapter = hw->back;
6493 u16 cap_offset;
6494
Jon Masonbdaae042011-06-27 07:44:01 +00006495 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006496 if (!cap_offset)
6497 return -E1000_ERR_CONFIG;
6498
6499 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6500
6501 return 0;
6502}
6503
6504s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6505{
6506 struct igb_adapter *adapter = hw->back;
6507 u16 cap_offset;
6508
Jon Masonbdaae042011-06-27 07:44:01 +00006509 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006510 if (!cap_offset)
6511 return -E1000_ERR_CONFIG;
6512
6513 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6514
6515 return 0;
6516}
6517
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006518static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
Auke Kok9d5c8242008-01-24 02:22:38 -08006519{
6520 struct igb_adapter *adapter = netdev_priv(netdev);
6521 struct e1000_hw *hw = &adapter->hw;
6522 u32 ctrl, rctl;
Alexander Duyck5faf0302011-08-26 07:46:08 +00006523 bool enable = !!(features & NETIF_F_HW_VLAN_RX);
Auke Kok9d5c8242008-01-24 02:22:38 -08006524
Alexander Duyck5faf0302011-08-26 07:46:08 +00006525 if (enable) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006526 /* enable VLAN tag insert/strip */
6527 ctrl = rd32(E1000_CTRL);
6528 ctrl |= E1000_CTRL_VME;
6529 wr32(E1000_CTRL, ctrl);
6530
Alexander Duyck51466232009-10-27 23:47:35 +00006531 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08006532 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08006533 rctl &= ~E1000_RCTL_CFIEN;
6534 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006535 } else {
6536 /* disable VLAN tag insert/strip */
6537 ctrl = rd32(E1000_CTRL);
6538 ctrl &= ~E1000_CTRL_VME;
6539 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006540 }
6541
Alexander Duycke1739522009-02-19 20:39:44 -08006542 igb_rlpml_set(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006543}
6544
Jiri Pirko8e586132011-12-08 19:52:37 -05006545static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08006546{
6547 struct igb_adapter *adapter = netdev_priv(netdev);
6548 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006549 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006550
Alexander Duyck51466232009-10-27 23:47:35 +00006551 /* attempt to add filter to vlvf array */
6552 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006553
Alexander Duyck51466232009-10-27 23:47:35 +00006554 /* add the filter since PF can receive vlans w/o entry in vlvf */
6555 igb_vfta_set(hw, vid, true);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006556
6557 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05006558
6559 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006560}
6561
Jiri Pirko8e586132011-12-08 19:52:37 -05006562static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08006563{
6564 struct igb_adapter *adapter = netdev_priv(netdev);
6565 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006566 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00006567 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006568
Alexander Duyck51466232009-10-27 23:47:35 +00006569 /* remove vlan from VLVF table array */
6570 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08006571
Alexander Duyck51466232009-10-27 23:47:35 +00006572 /* if vid was not present in VLVF just remove it from table */
6573 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006574 igb_vfta_set(hw, vid, false);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006575
6576 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05006577
6578 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006579}
6580
6581static void igb_restore_vlan(struct igb_adapter *adapter)
6582{
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006583 u16 vid;
Auke Kok9d5c8242008-01-24 02:22:38 -08006584
Alexander Duyck5faf0302011-08-26 07:46:08 +00006585 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
6586
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006587 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6588 igb_vlan_rx_add_vid(adapter->netdev, vid);
Auke Kok9d5c8242008-01-24 02:22:38 -08006589}
6590
David Decotigny14ad2512011-04-27 18:32:43 +00006591int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
Auke Kok9d5c8242008-01-24 02:22:38 -08006592{
Alexander Duyck090b1792009-10-27 23:51:55 +00006593 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08006594 struct e1000_mac_info *mac = &adapter->hw.mac;
6595
6596 mac->autoneg = 0;
6597
David Decotigny14ad2512011-04-27 18:32:43 +00006598 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6599 * for the switch() below to work */
6600 if ((spd & 1) || (dplx & ~1))
6601 goto err_inval;
6602
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006603 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6604 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
David Decotigny14ad2512011-04-27 18:32:43 +00006605 spd != SPEED_1000 &&
6606 dplx != DUPLEX_FULL)
6607 goto err_inval;
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006608
David Decotigny14ad2512011-04-27 18:32:43 +00006609 switch (spd + dplx) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006610 case SPEED_10 + DUPLEX_HALF:
6611 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6612 break;
6613 case SPEED_10 + DUPLEX_FULL:
6614 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6615 break;
6616 case SPEED_100 + DUPLEX_HALF:
6617 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6618 break;
6619 case SPEED_100 + DUPLEX_FULL:
6620 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6621 break;
6622 case SPEED_1000 + DUPLEX_FULL:
6623 mac->autoneg = 1;
6624 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6625 break;
6626 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6627 default:
David Decotigny14ad2512011-04-27 18:32:43 +00006628 goto err_inval;
Auke Kok9d5c8242008-01-24 02:22:38 -08006629 }
6630 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00006631
6632err_inval:
6633 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6634 return -EINVAL;
Auke Kok9d5c8242008-01-24 02:22:38 -08006635}
6636
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006637static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
6638 bool runtime)
Auke Kok9d5c8242008-01-24 02:22:38 -08006639{
6640 struct net_device *netdev = pci_get_drvdata(pdev);
6641 struct igb_adapter *adapter = netdev_priv(netdev);
6642 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07006643 u32 ctrl, rctl, status;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006644 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
Auke Kok9d5c8242008-01-24 02:22:38 -08006645#ifdef CONFIG_PM
6646 int retval = 0;
6647#endif
6648
6649 netif_device_detach(netdev);
6650
Alexander Duycka88f10e2008-07-08 15:13:38 -07006651 if (netif_running(netdev))
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006652 __igb_close(netdev, true);
Alexander Duycka88f10e2008-07-08 15:13:38 -07006653
Alexander Duyck047e0032009-10-27 15:49:27 +00006654 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006655
6656#ifdef CONFIG_PM
6657 retval = pci_save_state(pdev);
6658 if (retval)
6659 return retval;
6660#endif
6661
6662 status = rd32(E1000_STATUS);
6663 if (status & E1000_STATUS_LU)
6664 wufc &= ~E1000_WUFC_LNKC;
6665
6666 if (wufc) {
6667 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006668 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006669
6670 /* turn on all-multi mode if wake on multicast is enabled */
6671 if (wufc & E1000_WUFC_MC) {
6672 rctl = rd32(E1000_RCTL);
6673 rctl |= E1000_RCTL_MPE;
6674 wr32(E1000_RCTL, rctl);
6675 }
6676
6677 ctrl = rd32(E1000_CTRL);
6678 /* advertise wake from D3Cold */
6679 #define E1000_CTRL_ADVD3WUC 0x00100000
6680 /* phy power management enable */
6681 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6682 ctrl |= E1000_CTRL_ADVD3WUC;
6683 wr32(E1000_CTRL, ctrl);
6684
Auke Kok9d5c8242008-01-24 02:22:38 -08006685 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00006686 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08006687
6688 wr32(E1000_WUC, E1000_WUC_PME_EN);
6689 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08006690 } else {
6691 wr32(E1000_WUC, 0);
6692 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08006693 }
6694
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006695 *enable_wake = wufc || adapter->en_mng_pt;
6696 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00006697 igb_power_down_link(adapter);
6698 else
6699 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006700
6701 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6702 * would have already happened in close and is redundant. */
6703 igb_release_hw_control(adapter);
6704
6705 pci_disable_device(pdev);
6706
Auke Kok9d5c8242008-01-24 02:22:38 -08006707 return 0;
6708}
6709
6710#ifdef CONFIG_PM
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006711static int igb_suspend(struct device *dev)
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006712{
6713 int retval;
6714 bool wake;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006715 struct pci_dev *pdev = to_pci_dev(dev);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006716
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006717 retval = __igb_shutdown(pdev, &wake, 0);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006718 if (retval)
6719 return retval;
6720
6721 if (wake) {
6722 pci_prepare_to_sleep(pdev);
6723 } else {
6724 pci_wake_from_d3(pdev, false);
6725 pci_set_power_state(pdev, PCI_D3hot);
6726 }
6727
6728 return 0;
6729}
6730
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006731static int igb_resume(struct device *dev)
Auke Kok9d5c8242008-01-24 02:22:38 -08006732{
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006733 struct pci_dev *pdev = to_pci_dev(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006734 struct net_device *netdev = pci_get_drvdata(pdev);
6735 struct igb_adapter *adapter = netdev_priv(netdev);
6736 struct e1000_hw *hw = &adapter->hw;
6737 u32 err;
6738
6739 pci_set_power_state(pdev, PCI_D0);
6740 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006741 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006742
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006743 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006744 if (err) {
6745 dev_err(&pdev->dev,
6746 "igb: Cannot enable PCI device from suspend\n");
6747 return err;
6748 }
6749 pci_set_master(pdev);
6750
6751 pci_enable_wake(pdev, PCI_D3hot, 0);
6752 pci_enable_wake(pdev, PCI_D3cold, 0);
6753
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006754 if (!rtnl_is_locked()) {
6755 /*
6756 * shut up ASSERT_RTNL() warning in
6757 * netif_set_real_num_tx/rx_queues.
6758 */
6759 rtnl_lock();
6760 err = igb_init_interrupt_scheme(adapter);
6761 rtnl_unlock();
6762 } else {
6763 err = igb_init_interrupt_scheme(adapter);
6764 }
6765 if (err) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07006766 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6767 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08006768 }
6769
Auke Kok9d5c8242008-01-24 02:22:38 -08006770 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00006771
6772 /* let the f/w know that the h/w is now under the control of the
6773 * driver. */
6774 igb_get_hw_control(adapter);
6775
Auke Kok9d5c8242008-01-24 02:22:38 -08006776 wr32(E1000_WUS, ~0);
6777
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006778 if (netdev->flags & IFF_UP) {
6779 err = __igb_open(netdev, true);
Alexander Duycka88f10e2008-07-08 15:13:38 -07006780 if (err)
6781 return err;
6782 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006783
6784 netif_device_attach(netdev);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006785 return 0;
6786}
6787
6788#ifdef CONFIG_PM_RUNTIME
6789static int igb_runtime_idle(struct device *dev)
6790{
6791 struct pci_dev *pdev = to_pci_dev(dev);
6792 struct net_device *netdev = pci_get_drvdata(pdev);
6793 struct igb_adapter *adapter = netdev_priv(netdev);
6794
6795 if (!igb_has_link(adapter))
6796 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
6797
6798 return -EBUSY;
6799}
6800
6801static int igb_runtime_suspend(struct device *dev)
6802{
6803 struct pci_dev *pdev = to_pci_dev(dev);
6804 int retval;
6805 bool wake;
6806
6807 retval = __igb_shutdown(pdev, &wake, 1);
6808 if (retval)
6809 return retval;
6810
6811 if (wake) {
6812 pci_prepare_to_sleep(pdev);
6813 } else {
6814 pci_wake_from_d3(pdev, false);
6815 pci_set_power_state(pdev, PCI_D3hot);
6816 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006817
Auke Kok9d5c8242008-01-24 02:22:38 -08006818 return 0;
6819}
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006820
6821static int igb_runtime_resume(struct device *dev)
6822{
6823 return igb_resume(dev);
6824}
6825#endif /* CONFIG_PM_RUNTIME */
Auke Kok9d5c8242008-01-24 02:22:38 -08006826#endif
6827
6828static void igb_shutdown(struct pci_dev *pdev)
6829{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006830 bool wake;
6831
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006832 __igb_shutdown(pdev, &wake, 0);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006833
6834 if (system_state == SYSTEM_POWER_OFF) {
6835 pci_wake_from_d3(pdev, wake);
6836 pci_set_power_state(pdev, PCI_D3hot);
6837 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006838}
6839
6840#ifdef CONFIG_NET_POLL_CONTROLLER
6841/*
6842 * Polling 'interrupt' - used by things like netconsole to send skbs
6843 * without having to re-enable interrupts. It's not called while
6844 * the interrupt routine is executing.
6845 */
6846static void igb_netpoll(struct net_device *netdev)
6847{
6848 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006849 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00006850 struct igb_q_vector *q_vector;
Auke Kok9d5c8242008-01-24 02:22:38 -08006851 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006852
Alexander Duyck047e0032009-10-27 15:49:27 +00006853 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00006854 q_vector = adapter->q_vector[i];
6855 if (adapter->msix_entries)
6856 wr32(E1000_EIMC, q_vector->eims_value);
6857 else
6858 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00006859 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006860 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006861}
6862#endif /* CONFIG_NET_POLL_CONTROLLER */
6863
6864/**
6865 * igb_io_error_detected - called when PCI error is detected
6866 * @pdev: Pointer to PCI device
6867 * @state: The current pci connection state
6868 *
6869 * This function is called after a PCI bus error affecting
6870 * this device has been detected.
6871 */
6872static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6873 pci_channel_state_t state)
6874{
6875 struct net_device *netdev = pci_get_drvdata(pdev);
6876 struct igb_adapter *adapter = netdev_priv(netdev);
6877
6878 netif_device_detach(netdev);
6879
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00006880 if (state == pci_channel_io_perm_failure)
6881 return PCI_ERS_RESULT_DISCONNECT;
6882
Auke Kok9d5c8242008-01-24 02:22:38 -08006883 if (netif_running(netdev))
6884 igb_down(adapter);
6885 pci_disable_device(pdev);
6886
6887 /* Request a slot slot reset. */
6888 return PCI_ERS_RESULT_NEED_RESET;
6889}
6890
6891/**
6892 * igb_io_slot_reset - called after the pci bus has been reset.
6893 * @pdev: Pointer to PCI device
6894 *
6895 * Restart the card from scratch, as if from a cold-boot. Implementation
6896 * resembles the first-half of the igb_resume routine.
6897 */
6898static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6899{
6900 struct net_device *netdev = pci_get_drvdata(pdev);
6901 struct igb_adapter *adapter = netdev_priv(netdev);
6902 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08006903 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006904 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006905
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006906 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006907 dev_err(&pdev->dev,
6908 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08006909 result = PCI_ERS_RESULT_DISCONNECT;
6910 } else {
6911 pci_set_master(pdev);
6912 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006913 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006914
6915 pci_enable_wake(pdev, PCI_D3hot, 0);
6916 pci_enable_wake(pdev, PCI_D3cold, 0);
6917
6918 igb_reset(adapter);
6919 wr32(E1000_WUS, ~0);
6920 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006921 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006922
Jeff Kirsherea943d42008-12-11 20:34:19 -08006923 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6924 if (err) {
6925 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6926 "failed 0x%0x\n", err);
6927 /* non-fatal, continue */
6928 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006929
Alexander Duyck40a914f2008-11-27 00:24:37 -08006930 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006931}
6932
6933/**
6934 * igb_io_resume - called when traffic can start flowing again.
6935 * @pdev: Pointer to PCI device
6936 *
6937 * This callback is called when the error recovery driver tells us that
6938 * its OK to resume normal operation. Implementation resembles the
6939 * second-half of the igb_resume routine.
6940 */
6941static void igb_io_resume(struct pci_dev *pdev)
6942{
6943 struct net_device *netdev = pci_get_drvdata(pdev);
6944 struct igb_adapter *adapter = netdev_priv(netdev);
6945
Auke Kok9d5c8242008-01-24 02:22:38 -08006946 if (netif_running(netdev)) {
6947 if (igb_up(adapter)) {
6948 dev_err(&pdev->dev, "igb_up failed after reset\n");
6949 return;
6950 }
6951 }
6952
6953 netif_device_attach(netdev);
6954
6955 /* let the f/w know that the h/w is now under the control of the
6956 * driver. */
6957 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006958}
6959
Alexander Duyck26ad9172009-10-05 06:32:49 +00006960static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6961 u8 qsel)
6962{
6963 u32 rar_low, rar_high;
6964 struct e1000_hw *hw = &adapter->hw;
6965
6966 /* HW expects these in little endian so we reverse the byte order
6967 * from network order (big endian) to little endian
6968 */
6969 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6970 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6971 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6972
6973 /* Indicate to hardware the Address is Valid. */
6974 rar_high |= E1000_RAH_AV;
6975
6976 if (hw->mac.type == e1000_82575)
6977 rar_high |= E1000_RAH_POOL_1 * qsel;
6978 else
6979 rar_high |= E1000_RAH_POOL_1 << qsel;
6980
6981 wr32(E1000_RAL(index), rar_low);
6982 wrfl();
6983 wr32(E1000_RAH(index), rar_high);
6984 wrfl();
6985}
6986
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006987static int igb_set_vf_mac(struct igb_adapter *adapter,
6988 int vf, unsigned char *mac_addr)
6989{
6990 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006991 /* VF MAC addresses start at end of receive addresses and moves
6992 * torwards the first, as a result a collision should not be possible */
6993 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006994
Alexander Duyck37680112009-02-19 20:40:30 -08006995 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006996
Alexander Duyck26ad9172009-10-05 06:32:49 +00006997 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006998
6999 return 0;
7000}
7001
Williams, Mitch A8151d292010-02-10 01:44:24 +00007002static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
7003{
7004 struct igb_adapter *adapter = netdev_priv(netdev);
7005 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
7006 return -EINVAL;
7007 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
7008 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
7009 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
7010 " change effective.");
7011 if (test_bit(__IGB_DOWN, &adapter->state)) {
7012 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
7013 " but the PF device is not up.\n");
7014 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
7015 " attempting to use the VF device.\n");
7016 }
7017 return igb_set_vf_mac(adapter, vf, mac);
7018}
7019
Lior Levy17dc5662011-02-08 02:28:46 +00007020static int igb_link_mbps(int internal_link_speed)
7021{
7022 switch (internal_link_speed) {
7023 case SPEED_100:
7024 return 100;
7025 case SPEED_1000:
7026 return 1000;
7027 default:
7028 return 0;
7029 }
7030}
7031
7032static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
7033 int link_speed)
7034{
7035 int rf_dec, rf_int;
7036 u32 bcnrc_val;
7037
7038 if (tx_rate != 0) {
7039 /* Calculate the rate factor values to set */
7040 rf_int = link_speed / tx_rate;
7041 rf_dec = (link_speed - (rf_int * tx_rate));
7042 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
7043
7044 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
7045 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
7046 E1000_RTTBCNRC_RF_INT_MASK);
7047 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
7048 } else {
7049 bcnrc_val = 0;
7050 }
7051
7052 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
7053 wr32(E1000_RTTBCNRC, bcnrc_val);
7054}
7055
7056static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
7057{
7058 int actual_link_speed, i;
7059 bool reset_rate = false;
7060
7061 /* VF TX rate limit was not set or not supported */
7062 if ((adapter->vf_rate_link_speed == 0) ||
7063 (adapter->hw.mac.type != e1000_82576))
7064 return;
7065
7066 actual_link_speed = igb_link_mbps(adapter->link_speed);
7067 if (actual_link_speed != adapter->vf_rate_link_speed) {
7068 reset_rate = true;
7069 adapter->vf_rate_link_speed = 0;
7070 dev_info(&adapter->pdev->dev,
7071 "Link speed has been changed. VF Transmit "
7072 "rate is disabled\n");
7073 }
7074
7075 for (i = 0; i < adapter->vfs_allocated_count; i++) {
7076 if (reset_rate)
7077 adapter->vf_data[i].tx_rate = 0;
7078
7079 igb_set_vf_rate_limit(&adapter->hw, i,
7080 adapter->vf_data[i].tx_rate,
7081 actual_link_speed);
7082 }
7083}
7084
Williams, Mitch A8151d292010-02-10 01:44:24 +00007085static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
7086{
Lior Levy17dc5662011-02-08 02:28:46 +00007087 struct igb_adapter *adapter = netdev_priv(netdev);
7088 struct e1000_hw *hw = &adapter->hw;
7089 int actual_link_speed;
7090
7091 if (hw->mac.type != e1000_82576)
7092 return -EOPNOTSUPP;
7093
7094 actual_link_speed = igb_link_mbps(adapter->link_speed);
7095 if ((vf >= adapter->vfs_allocated_count) ||
7096 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
7097 (tx_rate < 0) || (tx_rate > actual_link_speed))
7098 return -EINVAL;
7099
7100 adapter->vf_rate_link_speed = actual_link_speed;
7101 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
7102 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
7103
7104 return 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007105}
7106
7107static int igb_ndo_get_vf_config(struct net_device *netdev,
7108 int vf, struct ifla_vf_info *ivi)
7109{
7110 struct igb_adapter *adapter = netdev_priv(netdev);
7111 if (vf >= adapter->vfs_allocated_count)
7112 return -EINVAL;
7113 ivi->vf = vf;
7114 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
Lior Levy17dc5662011-02-08 02:28:46 +00007115 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007116 ivi->vlan = adapter->vf_data[vf].pf_vlan;
7117 ivi->qos = adapter->vf_data[vf].pf_qos;
7118 return 0;
7119}
7120
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007121static void igb_vmm_control(struct igb_adapter *adapter)
7122{
7123 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00007124 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007125
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007126 switch (hw->mac.type) {
7127 case e1000_82575:
7128 default:
7129 /* replication is not supported for 82575 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007130 return;
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007131 case e1000_82576:
7132 /* notify HW that the MAC is adding vlan tags */
7133 reg = rd32(E1000_DTXCTL);
7134 reg |= E1000_DTXCTL_VLAN_ADDED;
7135 wr32(E1000_DTXCTL, reg);
7136 case e1000_82580:
7137 /* enable replication vlan tag stripping */
7138 reg = rd32(E1000_RPLOLR);
7139 reg |= E1000_RPLOLR_STRVLAN;
7140 wr32(E1000_RPLOLR, reg);
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00007141 case e1000_i350:
7142 /* none of the above registers are supported by i350 */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007143 break;
7144 }
Alexander Duyck10d8e902009-10-27 15:54:04 +00007145
Alexander Duyckd4960302009-10-27 15:53:45 +00007146 if (adapter->vfs_allocated_count) {
7147 igb_vmdq_set_loopback_pf(hw, true);
7148 igb_vmdq_set_replication_pf(hw, true);
Greg Rose13800462010-11-06 02:08:26 +00007149 igb_vmdq_set_anti_spoofing_pf(hw, true,
7150 adapter->vfs_allocated_count);
Alexander Duyckd4960302009-10-27 15:53:45 +00007151 } else {
7152 igb_vmdq_set_loopback_pf(hw, false);
7153 igb_vmdq_set_replication_pf(hw, false);
7154 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007155}
7156
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007157static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7158{
7159 struct e1000_hw *hw = &adapter->hw;
7160 u32 dmac_thr;
7161 u16 hwm;
7162
7163 if (hw->mac.type > e1000_82580) {
7164 if (adapter->flags & IGB_FLAG_DMAC) {
7165 u32 reg;
7166
7167 /* force threshold to 0. */
7168 wr32(E1000_DMCTXTH, 0);
7169
7170 /*
Matthew Vicke8c626e2011-11-17 08:33:12 +00007171 * DMA Coalescing high water mark needs to be greater
7172 * than the Rx threshold. Set hwm to PBA - max frame
7173 * size in 16B units, capping it at PBA - 6KB.
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007174 */
Matthew Vicke8c626e2011-11-17 08:33:12 +00007175 hwm = 64 * pba - adapter->max_frame_size / 16;
7176 if (hwm < 64 * (pba - 6))
7177 hwm = 64 * (pba - 6);
7178 reg = rd32(E1000_FCRTC);
7179 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
7180 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
7181 & E1000_FCRTC_RTH_COAL_MASK);
7182 wr32(E1000_FCRTC, reg);
7183
7184 /*
7185 * Set the DMA Coalescing Rx threshold to PBA - 2 * max
7186 * frame size, capping it at PBA - 10KB.
7187 */
7188 dmac_thr = pba - adapter->max_frame_size / 512;
7189 if (dmac_thr < pba - 10)
7190 dmac_thr = pba - 10;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007191 reg = rd32(E1000_DMACR);
7192 reg &= ~E1000_DMACR_DMACTHR_MASK;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007193 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
7194 & E1000_DMACR_DMACTHR_MASK);
7195
7196 /* transition to L0x or L1 if available..*/
7197 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
7198
7199 /* watchdog timer= +-1000 usec in 32usec intervals */
7200 reg |= (1000 >> 5);
7201 wr32(E1000_DMACR, reg);
7202
7203 /*
7204 * no lower threshold to disable
7205 * coalescing(smart fifb)-UTRESH=0
7206 */
7207 wr32(E1000_DMCRTRH, 0);
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007208
7209 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
7210
7211 wr32(E1000_DMCTLX, reg);
7212
7213 /*
7214 * free space in tx packet buffer to wake from
7215 * DMA coal
7216 */
7217 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
7218 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
7219
7220 /*
7221 * make low power state decision controlled
7222 * by DMA coal
7223 */
7224 reg = rd32(E1000_PCIEMISC);
7225 reg &= ~E1000_PCIEMISC_LX_DECISION;
7226 wr32(E1000_PCIEMISC, reg);
7227 } /* endif adapter->dmac is not disabled */
7228 } else if (hw->mac.type == e1000_82580) {
7229 u32 reg = rd32(E1000_PCIEMISC);
7230 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
7231 wr32(E1000_DMACR, 0);
7232 }
7233}
7234
Auke Kok9d5c8242008-01-24 02:22:38 -08007235/* igb_main.c */