blob: f689aa1b5a373a660a9bacf8a9a2056fa009a0f7 [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Carolyn Wyborny4297f992011-06-29 01:16:10 +00004 Copyright(c) 2007-2011 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
Jiri Pirkob2cb09b2011-07-21 03:27:27 +000031#include <linux/bitops.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080032#include <linux/vmalloc.h>
33#include <linux/pagemap.h>
34#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080035#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080037#include <net/checksum.h>
38#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000039#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080040#include <linux/mii.h>
41#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000042#include <linux/if.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080043#include <linux/if_vlan.h>
44#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070045#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080046#include <linux/delay.h>
47#include <linux/interrupt.h>
Alexander Duyck7d13a7d2011-08-26 07:44:32 +000048#include <linux/ip.h>
49#include <linux/tcp.h>
50#include <linux/sctp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080051#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080052#include <linux/aer.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040053#include <linux/prefetch.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070054#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070055#include <linux/dca.h>
56#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080057#include "igb.h"
58
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080059#define MAJ 3
Carolyn Wybornya28dc432011-10-07 07:00:27 +000060#define MIN 2
61#define BUILD 10
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080062#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
Carolyn Wyborny929dd042011-05-26 03:02:26 +000063__stringify(BUILD) "-k"
Auke Kok9d5c8242008-01-24 02:22:38 -080064char igb_driver_name[] = "igb";
65char igb_driver_version[] = DRV_VERSION;
66static const char igb_driver_string[] =
67 "Intel(R) Gigabit Ethernet Network Driver";
Carolyn Wyborny4c4b42c2011-02-17 09:02:30 +000068static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080069
Auke Kok9d5c8242008-01-24 02:22:38 -080070static const struct e1000_info *igb_info_tbl[] = {
71 [board_82575] = &e1000_82575_info,
72};
73
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000074static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +000075 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000079 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
Carolyn Wyborny6493d242011-01-14 05:33:46 +000081 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000082 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Joseph Gasparakis308fb392010-09-22 17:56:44 +000085 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
Gasparakis, Joseph1b5dda32010-12-09 01:41:01 +000087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070089 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000090 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000091 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070092 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000094 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +000095 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000096 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -080097 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
99 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
100 /* required last entry */
101 {0, }
102};
103
104MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
105
106void igb_reset(struct igb_adapter *);
107static int igb_setup_all_tx_resources(struct igb_adapter *);
108static int igb_setup_all_rx_resources(struct igb_adapter *);
109static void igb_free_all_tx_resources(struct igb_adapter *);
110static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +0000111static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800112static int igb_probe(struct pci_dev *, const struct pci_device_id *);
113static void __devexit igb_remove(struct pci_dev *pdev);
Anders Berggren673b8b72011-02-04 07:32:32 +0000114static void igb_init_hw_timer(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800115static int igb_sw_init(struct igb_adapter *);
116static int igb_open(struct net_device *);
117static int igb_close(struct net_device *);
118static void igb_configure_tx(struct igb_adapter *);
119static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800120static void igb_clean_all_tx_rings(struct igb_adapter *);
121static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700122static void igb_clean_tx_ring(struct igb_ring *);
123static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000124static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800125static void igb_update_phy_info(unsigned long);
126static void igb_watchdog(unsigned long);
127static void igb_watchdog_task(struct work_struct *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000128static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
Eric Dumazet12dcd862010-10-15 17:27:10 +0000129static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
130 struct rtnl_link_stats64 *stats);
Auke Kok9d5c8242008-01-24 02:22:38 -0800131static int igb_change_mtu(struct net_device *, int);
132static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000133static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800134static irqreturn_t igb_intr(int irq, void *);
135static irqreturn_t igb_intr_msi(int irq, void *);
136static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000137static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700138#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000139static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700140static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700141#endif /* CONFIG_IGB_DCA */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700142static int igb_poll(struct napi_struct *, int);
Alexander Duyck13fde972011-10-05 13:35:24 +0000143static bool igb_clean_tx_irq(struct igb_q_vector *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000144static bool igb_clean_rx_irq(struct igb_q_vector *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800145static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
146static void igb_tx_timeout(struct net_device *);
147static void igb_reset_task(struct work_struct *);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +0000148static void igb_vlan_mode(struct net_device *netdev, u32 features);
Auke Kok9d5c8242008-01-24 02:22:38 -0800149static void igb_vlan_rx_add_vid(struct net_device *, u16);
150static void igb_vlan_rx_kill_vid(struct net_device *, u16);
151static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000152static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800153static void igb_ping_all_vfs(struct igb_adapter *);
154static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800155static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000156static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800157static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000158static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
159static int igb_ndo_set_vf_vlan(struct net_device *netdev,
160 int vf, u16 vlan, u8 qos);
161static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
162static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
163 struct ifla_vf_info *ivi);
Lior Levy17dc5662011-02-08 02:28:46 +0000164static void igb_check_vf_rate_limit(struct igb_adapter *);
RongQing Li46a01692011-10-18 22:52:35 +0000165
166#ifdef CONFIG_PCI_IOV
Greg Rose0224d662011-10-14 02:57:14 +0000167static int igb_vf_configure(struct igb_adapter *adapter, int vf);
168static int igb_find_enabled_vfs(struct igb_adapter *adapter);
169static int igb_check_vf_assignment(struct igb_adapter *adapter);
RongQing Li46a01692011-10-18 22:52:35 +0000170#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800171
Auke Kok9d5c8242008-01-24 02:22:38 -0800172#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +0000173static int igb_suspend(struct pci_dev *, pm_message_t);
Auke Kok9d5c8242008-01-24 02:22:38 -0800174static int igb_resume(struct pci_dev *);
175#endif
176static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700177#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700178static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
179static struct notifier_block dca_notifier = {
180 .notifier_call = igb_notify_dca,
181 .next = NULL,
182 .priority = 0
183};
184#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800185#ifdef CONFIG_NET_POLL_CONTROLLER
186/* for netdump / net console */
187static void igb_netpoll(struct net_device *);
188#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800189#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000190static unsigned int max_vfs = 0;
191module_param(max_vfs, uint, 0);
192MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
193 "per physical function");
194#endif /* CONFIG_PCI_IOV */
195
Auke Kok9d5c8242008-01-24 02:22:38 -0800196static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
197 pci_channel_state_t);
198static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
199static void igb_io_resume(struct pci_dev *);
200
201static struct pci_error_handlers igb_err_handler = {
202 .error_detected = igb_io_error_detected,
203 .slot_reset = igb_io_slot_reset,
204 .resume = igb_io_resume,
205};
206
207
208static struct pci_driver igb_driver = {
209 .name = igb_driver_name,
210 .id_table = igb_pci_tbl,
211 .probe = igb_probe,
212 .remove = __devexit_p(igb_remove),
213#ifdef CONFIG_PM
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300214 /* Power Management Hooks */
Auke Kok9d5c8242008-01-24 02:22:38 -0800215 .suspend = igb_suspend,
216 .resume = igb_resume,
217#endif
218 .shutdown = igb_shutdown,
219 .err_handler = &igb_err_handler
220};
221
222MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
223MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
224MODULE_LICENSE("GPL");
225MODULE_VERSION(DRV_VERSION);
226
Taku Izumic97ec422010-04-27 14:39:30 +0000227struct igb_reg_info {
228 u32 ofs;
229 char *name;
230};
231
232static const struct igb_reg_info igb_reg_info_tbl[] = {
233
234 /* General Registers */
235 {E1000_CTRL, "CTRL"},
236 {E1000_STATUS, "STATUS"},
237 {E1000_CTRL_EXT, "CTRL_EXT"},
238
239 /* Interrupt Registers */
240 {E1000_ICR, "ICR"},
241
242 /* RX Registers */
243 {E1000_RCTL, "RCTL"},
244 {E1000_RDLEN(0), "RDLEN"},
245 {E1000_RDH(0), "RDH"},
246 {E1000_RDT(0), "RDT"},
247 {E1000_RXDCTL(0), "RXDCTL"},
248 {E1000_RDBAL(0), "RDBAL"},
249 {E1000_RDBAH(0), "RDBAH"},
250
251 /* TX Registers */
252 {E1000_TCTL, "TCTL"},
253 {E1000_TDBAL(0), "TDBAL"},
254 {E1000_TDBAH(0), "TDBAH"},
255 {E1000_TDLEN(0), "TDLEN"},
256 {E1000_TDH(0), "TDH"},
257 {E1000_TDT(0), "TDT"},
258 {E1000_TXDCTL(0), "TXDCTL"},
259 {E1000_TDFH, "TDFH"},
260 {E1000_TDFT, "TDFT"},
261 {E1000_TDFHS, "TDFHS"},
262 {E1000_TDFPC, "TDFPC"},
263
264 /* List Terminator */
265 {}
266};
267
268/*
269 * igb_regdump - register printout routine
270 */
271static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
272{
273 int n = 0;
274 char rname[16];
275 u32 regs[8];
276
277 switch (reginfo->ofs) {
278 case E1000_RDLEN(0):
279 for (n = 0; n < 4; n++)
280 regs[n] = rd32(E1000_RDLEN(n));
281 break;
282 case E1000_RDH(0):
283 for (n = 0; n < 4; n++)
284 regs[n] = rd32(E1000_RDH(n));
285 break;
286 case E1000_RDT(0):
287 for (n = 0; n < 4; n++)
288 regs[n] = rd32(E1000_RDT(n));
289 break;
290 case E1000_RXDCTL(0):
291 for (n = 0; n < 4; n++)
292 regs[n] = rd32(E1000_RXDCTL(n));
293 break;
294 case E1000_RDBAL(0):
295 for (n = 0; n < 4; n++)
296 regs[n] = rd32(E1000_RDBAL(n));
297 break;
298 case E1000_RDBAH(0):
299 for (n = 0; n < 4; n++)
300 regs[n] = rd32(E1000_RDBAH(n));
301 break;
302 case E1000_TDBAL(0):
303 for (n = 0; n < 4; n++)
304 regs[n] = rd32(E1000_RDBAL(n));
305 break;
306 case E1000_TDBAH(0):
307 for (n = 0; n < 4; n++)
308 regs[n] = rd32(E1000_TDBAH(n));
309 break;
310 case E1000_TDLEN(0):
311 for (n = 0; n < 4; n++)
312 regs[n] = rd32(E1000_TDLEN(n));
313 break;
314 case E1000_TDH(0):
315 for (n = 0; n < 4; n++)
316 regs[n] = rd32(E1000_TDH(n));
317 break;
318 case E1000_TDT(0):
319 for (n = 0; n < 4; n++)
320 regs[n] = rd32(E1000_TDT(n));
321 break;
322 case E1000_TXDCTL(0):
323 for (n = 0; n < 4; n++)
324 regs[n] = rd32(E1000_TXDCTL(n));
325 break;
326 default:
327 printk(KERN_INFO "%-15s %08x\n",
328 reginfo->name, rd32(reginfo->ofs));
329 return;
330 }
331
332 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
333 printk(KERN_INFO "%-15s ", rname);
334 for (n = 0; n < 4; n++)
335 printk(KERN_CONT "%08x ", regs[n]);
336 printk(KERN_CONT "\n");
337}
338
339/*
340 * igb_dump - Print registers, tx-rings and rx-rings
341 */
342static void igb_dump(struct igb_adapter *adapter)
343{
344 struct net_device *netdev = adapter->netdev;
345 struct e1000_hw *hw = &adapter->hw;
346 struct igb_reg_info *reginfo;
Taku Izumic97ec422010-04-27 14:39:30 +0000347 struct igb_ring *tx_ring;
348 union e1000_adv_tx_desc *tx_desc;
349 struct my_u0 { u64 a; u64 b; } *u0;
Taku Izumic97ec422010-04-27 14:39:30 +0000350 struct igb_ring *rx_ring;
351 union e1000_adv_rx_desc *rx_desc;
352 u32 staterr;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +0000353 u16 i, n;
Taku Izumic97ec422010-04-27 14:39:30 +0000354
355 if (!netif_msg_hw(adapter))
356 return;
357
358 /* Print netdevice Info */
359 if (netdev) {
360 dev_info(&adapter->pdev->dev, "Net device Info\n");
361 printk(KERN_INFO "Device Name state "
362 "trans_start last_rx\n");
363 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
364 netdev->name,
365 netdev->state,
366 netdev->trans_start,
367 netdev->last_rx);
368 }
369
370 /* Print Registers */
371 dev_info(&adapter->pdev->dev, "Register Dump\n");
372 printk(KERN_INFO " Register Name Value\n");
373 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
374 reginfo->name; reginfo++) {
375 igb_regdump(hw, reginfo);
376 }
377
378 /* Print TX Ring Summary */
379 if (!netdev || !netif_running(netdev))
380 goto exit;
381
382 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
383 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
384 " leng ntw timestamp\n");
385 for (n = 0; n < adapter->num_tx_queues; n++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000386 struct igb_tx_buffer *buffer_info;
Taku Izumic97ec422010-04-27 14:39:30 +0000387 tx_ring = adapter->tx_ring[n];
Alexander Duyck06034642011-08-26 07:44:22 +0000388 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
Alexander Duyck8542db02011-08-26 07:44:43 +0000389 printk(KERN_INFO " %5d %5X %5X %016llX %04X %p %016llX\n",
Taku Izumic97ec422010-04-27 14:39:30 +0000390 n, tx_ring->next_to_use, tx_ring->next_to_clean,
391 (u64)buffer_info->dma,
392 buffer_info->length,
393 buffer_info->next_to_watch,
394 (u64)buffer_info->time_stamp);
395 }
396
397 /* Print TX Rings */
398 if (!netif_msg_tx_done(adapter))
399 goto rx_ring_summary;
400
401 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
402
403 /* Transmit Descriptor Formats
404 *
405 * Advanced Transmit Descriptor
406 * +--------------------------------------------------------------+
407 * 0 | Buffer Address [63:0] |
408 * +--------------------------------------------------------------+
409 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
410 * +--------------------------------------------------------------+
411 * 63 46 45 40 39 38 36 35 32 31 24 15 0
412 */
413
414 for (n = 0; n < adapter->num_tx_queues; n++) {
415 tx_ring = adapter->tx_ring[n];
416 printk(KERN_INFO "------------------------------------\n");
417 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
418 printk(KERN_INFO "------------------------------------\n");
419 printk(KERN_INFO "T [desc] [address 63:0 ] "
420 "[PlPOCIStDDM Ln] [bi->dma ] "
421 "leng ntw timestamp bi->skb\n");
422
423 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000424 struct igb_tx_buffer *buffer_info;
Alexander Duyck601369062011-08-26 07:44:05 +0000425 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +0000426 buffer_info = &tx_ring->tx_buffer_info[i];
Taku Izumic97ec422010-04-27 14:39:30 +0000427 u0 = (struct my_u0 *)tx_desc;
428 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
Alexander Duyck8542db02011-08-26 07:44:43 +0000429 " %04X %p %016llX %p", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000430 le64_to_cpu(u0->a),
431 le64_to_cpu(u0->b),
432 (u64)buffer_info->dma,
433 buffer_info->length,
434 buffer_info->next_to_watch,
435 (u64)buffer_info->time_stamp,
436 buffer_info->skb);
437 if (i == tx_ring->next_to_use &&
438 i == tx_ring->next_to_clean)
439 printk(KERN_CONT " NTC/U\n");
440 else if (i == tx_ring->next_to_use)
441 printk(KERN_CONT " NTU\n");
442 else if (i == tx_ring->next_to_clean)
443 printk(KERN_CONT " NTC\n");
444 else
445 printk(KERN_CONT "\n");
446
447 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
448 print_hex_dump(KERN_INFO, "",
449 DUMP_PREFIX_ADDRESS,
450 16, 1, phys_to_virt(buffer_info->dma),
451 buffer_info->length, true);
452 }
453 }
454
455 /* Print RX Rings Summary */
456rx_ring_summary:
457 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
458 printk(KERN_INFO "Queue [NTU] [NTC]\n");
459 for (n = 0; n < adapter->num_rx_queues; n++) {
460 rx_ring = adapter->rx_ring[n];
461 printk(KERN_INFO " %5d %5X %5X\n", n,
462 rx_ring->next_to_use, rx_ring->next_to_clean);
463 }
464
465 /* Print RX Rings */
466 if (!netif_msg_rx_status(adapter))
467 goto exit;
468
469 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
470
471 /* Advanced Receive Descriptor (Read) Format
472 * 63 1 0
473 * +-----------------------------------------------------+
474 * 0 | Packet Buffer Address [63:1] |A0/NSE|
475 * +----------------------------------------------+------+
476 * 8 | Header Buffer Address [63:1] | DD |
477 * +-----------------------------------------------------+
478 *
479 *
480 * Advanced Receive Descriptor (Write-Back) Format
481 *
482 * 63 48 47 32 31 30 21 20 17 16 4 3 0
483 * +------------------------------------------------------+
484 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
485 * | Checksum Ident | | | | Type | Type |
486 * +------------------------------------------------------+
487 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
488 * +------------------------------------------------------+
489 * 63 48 47 32 31 20 19 0
490 */
491
492 for (n = 0; n < adapter->num_rx_queues; n++) {
493 rx_ring = adapter->rx_ring[n];
494 printk(KERN_INFO "------------------------------------\n");
495 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
496 printk(KERN_INFO "------------------------------------\n");
497 printk(KERN_INFO "R [desc] [ PktBuf A0] "
498 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
499 "<-- Adv Rx Read format\n");
500 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
501 "[vl er S cks ln] ---------------- [bi->skb] "
502 "<-- Adv Rx Write-Back format\n");
503
504 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000505 struct igb_rx_buffer *buffer_info;
506 buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +0000507 rx_desc = IGB_RX_DESC(rx_ring, i);
Taku Izumic97ec422010-04-27 14:39:30 +0000508 u0 = (struct my_u0 *)rx_desc;
509 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
510 if (staterr & E1000_RXD_STAT_DD) {
511 /* Descriptor Done */
512 printk(KERN_INFO "RWB[0x%03X] %016llX "
513 "%016llX ---------------- %p", i,
514 le64_to_cpu(u0->a),
515 le64_to_cpu(u0->b),
516 buffer_info->skb);
517 } else {
518 printk(KERN_INFO "R [0x%03X] %016llX "
519 "%016llX %016llX %p", i,
520 le64_to_cpu(u0->a),
521 le64_to_cpu(u0->b),
522 (u64)buffer_info->dma,
523 buffer_info->skb);
524
525 if (netif_msg_pktdata(adapter)) {
526 print_hex_dump(KERN_INFO, "",
527 DUMP_PREFIX_ADDRESS,
528 16, 1,
529 phys_to_virt(buffer_info->dma),
Alexander Duyck44390ca2011-08-26 07:43:38 +0000530 IGB_RX_HDR_LEN, true);
531 print_hex_dump(KERN_INFO, "",
532 DUMP_PREFIX_ADDRESS,
533 16, 1,
534 phys_to_virt(
535 buffer_info->page_dma +
536 buffer_info->page_offset),
537 PAGE_SIZE/2, true);
Taku Izumic97ec422010-04-27 14:39:30 +0000538 }
539 }
540
541 if (i == rx_ring->next_to_use)
542 printk(KERN_CONT " NTU\n");
543 else if (i == rx_ring->next_to_clean)
544 printk(KERN_CONT " NTC\n");
545 else
546 printk(KERN_CONT "\n");
547
548 }
549 }
550
551exit:
552 return;
553}
554
555
Patrick Ohly38c845c2009-02-12 05:03:41 +0000556/**
Patrick Ohly38c845c2009-02-12 05:03:41 +0000557 * igb_read_clock - read raw cycle counter (to be used by time counter)
558 */
559static cycle_t igb_read_clock(const struct cyclecounter *tc)
560{
561 struct igb_adapter *adapter =
562 container_of(tc, struct igb_adapter, cycles);
563 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000564 u64 stamp = 0;
565 int shift = 0;
Patrick Ohly38c845c2009-02-12 05:03:41 +0000566
Alexander Duyck55cac242009-11-19 12:42:21 +0000567 /*
568 * The timestamp latches on lowest register read. For the 82580
569 * the lowest register is SYSTIMR instead of SYSTIML. However we never
570 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
571 */
Alexander Duyck06218a82011-08-26 07:46:55 +0000572 if (hw->mac.type >= e1000_82580) {
Alexander Duyck55cac242009-11-19 12:42:21 +0000573 stamp = rd32(E1000_SYSTIMR) >> 8;
574 shift = IGB_82580_TSYNC_SHIFT;
575 }
576
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000577 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
578 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
Patrick Ohly38c845c2009-02-12 05:03:41 +0000579 return stamp;
580}
581
Auke Kok9d5c8242008-01-24 02:22:38 -0800582/**
Alexander Duyckc0410762010-03-25 13:10:08 +0000583 * igb_get_hw_dev - return device
Auke Kok9d5c8242008-01-24 02:22:38 -0800584 * used by hardware layer to print debugging information
585 **/
Alexander Duyckc0410762010-03-25 13:10:08 +0000586struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
Auke Kok9d5c8242008-01-24 02:22:38 -0800587{
588 struct igb_adapter *adapter = hw->back;
Alexander Duyckc0410762010-03-25 13:10:08 +0000589 return adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800590}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000591
592/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800593 * igb_init_module - Driver Registration Routine
594 *
595 * igb_init_module is the first routine called when the driver is
596 * loaded. All it does is register with the PCI subsystem.
597 **/
598static int __init igb_init_module(void)
599{
600 int ret;
601 printk(KERN_INFO "%s - version %s\n",
602 igb_driver_string, igb_driver_version);
603
604 printk(KERN_INFO "%s\n", igb_copyright);
605
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700606#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700607 dca_register_notify(&dca_notifier);
608#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800609 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800610 return ret;
611}
612
613module_init(igb_init_module);
614
615/**
616 * igb_exit_module - Driver Exit Cleanup Routine
617 *
618 * igb_exit_module is called just before the driver is removed
619 * from memory.
620 **/
621static void __exit igb_exit_module(void)
622{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700623#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700624 dca_unregister_notify(&dca_notifier);
625#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800626 pci_unregister_driver(&igb_driver);
627}
628
629module_exit(igb_exit_module);
630
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800631#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
632/**
633 * igb_cache_ring_register - Descriptor ring to register mapping
634 * @adapter: board private structure to initialize
635 *
636 * Once we know the feature-set enabled for the device, we'll cache
637 * the register offset the descriptor ring is assigned to.
638 **/
639static void igb_cache_ring_register(struct igb_adapter *adapter)
640{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000641 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000642 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800643
644 switch (adapter->hw.mac.type) {
645 case e1000_82576:
646 /* The queues are allocated for virtualization such that VF 0
647 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
648 * In order to avoid collision we start at the first free queue
649 * and continue consuming queues in the same sequence
650 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000651 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000652 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000653 adapter->rx_ring[i]->reg_idx = rbase_offset +
654 Q_IDX_82576(i);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000655 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800656 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000657 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000658 case e1000_i350:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800659 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000660 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000661 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000662 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000663 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800664 break;
665 }
666}
667
Alexander Duyck047e0032009-10-27 15:49:27 +0000668static void igb_free_queues(struct igb_adapter *adapter)
669{
Alexander Duyck3025a442010-02-17 01:02:39 +0000670 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000671
Alexander Duyck3025a442010-02-17 01:02:39 +0000672 for (i = 0; i < adapter->num_tx_queues; i++) {
673 kfree(adapter->tx_ring[i]);
674 adapter->tx_ring[i] = NULL;
675 }
676 for (i = 0; i < adapter->num_rx_queues; i++) {
677 kfree(adapter->rx_ring[i]);
678 adapter->rx_ring[i] = NULL;
679 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000680 adapter->num_rx_queues = 0;
681 adapter->num_tx_queues = 0;
682}
683
Auke Kok9d5c8242008-01-24 02:22:38 -0800684/**
685 * igb_alloc_queues - Allocate memory for all rings
686 * @adapter: board private structure to initialize
687 *
688 * We allocate one ring per queue at run-time since we don't know the
689 * number of queues at compile-time.
690 **/
691static int igb_alloc_queues(struct igb_adapter *adapter)
692{
Alexander Duyck3025a442010-02-17 01:02:39 +0000693 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800694 int i;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000695 int orig_node = adapter->node;
Auke Kok9d5c8242008-01-24 02:22:38 -0800696
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700697 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000698 if (orig_node == -1) {
699 int cur_node = next_online_node(adapter->node);
700 if (cur_node == MAX_NUMNODES)
701 cur_node = first_online_node;
702 adapter->node = cur_node;
703 }
704 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
705 adapter->node);
706 if (!ring)
707 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000708 if (!ring)
709 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800710 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700711 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000712 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000713 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000714 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000715 /* For 82575, context index must be unique per ring. */
716 if (adapter->hw.mac.type == e1000_82575)
Alexander Duyck866cff02011-08-26 07:45:36 +0000717 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
Alexander Duyck3025a442010-02-17 01:02:39 +0000718 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700719 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000720 /* Restore the adapter's original node */
721 adapter->node = orig_node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000722
Auke Kok9d5c8242008-01-24 02:22:38 -0800723 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000724 if (orig_node == -1) {
725 int cur_node = next_online_node(adapter->node);
726 if (cur_node == MAX_NUMNODES)
727 cur_node = first_online_node;
728 adapter->node = cur_node;
729 }
730 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
731 adapter->node);
732 if (!ring)
733 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000734 if (!ring)
735 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800736 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700737 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000738 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000739 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000740 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000741 /* set flag indicating ring supports SCTP checksum offload */
742 if (adapter->hw.mac.type >= e1000_82576)
Alexander Duyck866cff02011-08-26 07:45:36 +0000743 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
Alexander Duyck8be10e92011-08-26 07:47:11 +0000744
745 /* On i350, loopback VLAN packets have the tag byte-swapped. */
746 if (adapter->hw.mac.type == e1000_i350)
747 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
748
Alexander Duyck3025a442010-02-17 01:02:39 +0000749 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800750 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000751 /* Restore the adapter's original node */
752 adapter->node = orig_node;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800753
754 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000755
Auke Kok9d5c8242008-01-24 02:22:38 -0800756 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800757
Alexander Duyck047e0032009-10-27 15:49:27 +0000758err:
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000759 /* Restore the adapter's original node */
760 adapter->node = orig_node;
Alexander Duyck047e0032009-10-27 15:49:27 +0000761 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700762
Alexander Duyck047e0032009-10-27 15:49:27 +0000763 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700764}
765
Alexander Duyck4be000c2011-08-26 07:45:52 +0000766/**
767 * igb_write_ivar - configure ivar for given MSI-X vector
768 * @hw: pointer to the HW structure
769 * @msix_vector: vector number we are allocating to a given ring
770 * @index: row index of IVAR register to write within IVAR table
771 * @offset: column offset of in IVAR, should be multiple of 8
772 *
773 * This function is intended to handle the writing of the IVAR register
774 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
775 * each containing an cause allocation for an Rx and Tx ring, and a
776 * variable number of rows depending on the number of queues supported.
777 **/
778static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
779 int index, int offset)
780{
781 u32 ivar = array_rd32(E1000_IVAR0, index);
782
783 /* clear any bits that are currently set */
784 ivar &= ~((u32)0xFF << offset);
785
786 /* write vector and valid bit */
787 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
788
789 array_wr32(E1000_IVAR0, index, ivar);
790}
791
Auke Kok9d5c8242008-01-24 02:22:38 -0800792#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000793static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800794{
Alexander Duyck047e0032009-10-27 15:49:27 +0000795 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800796 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck047e0032009-10-27 15:49:27 +0000797 int rx_queue = IGB_N0_QUEUE;
798 int tx_queue = IGB_N0_QUEUE;
Alexander Duyck4be000c2011-08-26 07:45:52 +0000799 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000800
Alexander Duyck0ba82992011-08-26 07:45:47 +0000801 if (q_vector->rx.ring)
802 rx_queue = q_vector->rx.ring->reg_idx;
803 if (q_vector->tx.ring)
804 tx_queue = q_vector->tx.ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700805
806 switch (hw->mac.type) {
807 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800808 /* The 82575 assigns vectors using a bitmask, which matches the
809 bitmask for the EICR/EIMS/EIMC registers. To assign one
810 or more queues to a vector, we write the appropriate bits
811 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000812 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800813 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000814 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800815 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000816 if (!adapter->msix_entries && msix_vector == 0)
817 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800818 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000819 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700820 break;
821 case e1000_82576:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000822 /*
823 * 82576 uses a table that essentially consists of 2 columns
824 * with 8 rows. The ordering is column-major so we use the
825 * lower 3 bits as the row index, and the 4th bit as the
826 * column offset.
827 */
828 if (rx_queue > IGB_N0_QUEUE)
829 igb_write_ivar(hw, msix_vector,
830 rx_queue & 0x7,
831 (rx_queue & 0x8) << 1);
832 if (tx_queue > IGB_N0_QUEUE)
833 igb_write_ivar(hw, msix_vector,
834 tx_queue & 0x7,
835 ((tx_queue & 0x8) << 1) + 8);
Alexander Duyck047e0032009-10-27 15:49:27 +0000836 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700837 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000838 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000839 case e1000_i350:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000840 /*
841 * On 82580 and newer adapters the scheme is similar to 82576
842 * however instead of ordering column-major we have things
843 * ordered row-major. So we traverse the table by using
844 * bit 0 as the column offset, and the remaining bits as the
845 * row index.
846 */
847 if (rx_queue > IGB_N0_QUEUE)
848 igb_write_ivar(hw, msix_vector,
849 rx_queue >> 1,
850 (rx_queue & 0x1) << 4);
851 if (tx_queue > IGB_N0_QUEUE)
852 igb_write_ivar(hw, msix_vector,
853 tx_queue >> 1,
854 ((tx_queue & 0x1) << 4) + 8);
Alexander Duyck55cac242009-11-19 12:42:21 +0000855 q_vector->eims_value = 1 << msix_vector;
856 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700857 default:
858 BUG();
859 break;
860 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000861
862 /* add q_vector eims value to global eims_enable_mask */
863 adapter->eims_enable_mask |= q_vector->eims_value;
864
865 /* configure q_vector to set itr on first interrupt */
866 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800867}
868
869/**
870 * igb_configure_msix - Configure MSI-X hardware
871 *
872 * igb_configure_msix sets up the hardware to properly
873 * generate MSI-X interrupts.
874 **/
875static void igb_configure_msix(struct igb_adapter *adapter)
876{
877 u32 tmp;
878 int i, vector = 0;
879 struct e1000_hw *hw = &adapter->hw;
880
881 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800882
883 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700884 switch (hw->mac.type) {
885 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800886 tmp = rd32(E1000_CTRL_EXT);
887 /* enable MSI-X PBA support*/
888 tmp |= E1000_CTRL_EXT_PBA_CLR;
889
890 /* Auto-Mask interrupts upon ICR read. */
891 tmp |= E1000_CTRL_EXT_EIAME;
892 tmp |= E1000_CTRL_EXT_IRCA;
893
894 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000895
896 /* enable msix_other interrupt */
897 array_wr32(E1000_MSIXBM(0), vector++,
898 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700899 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800900
Alexander Duyck2d064c02008-07-08 15:10:12 -0700901 break;
902
903 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000904 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000905 case e1000_i350:
Alexander Duyck047e0032009-10-27 15:49:27 +0000906 /* Turn on MSI-X capability first, or our settings
907 * won't stick. And it will take days to debug. */
908 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
909 E1000_GPIE_PBA | E1000_GPIE_EIAME |
910 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700911
Alexander Duyck047e0032009-10-27 15:49:27 +0000912 /* enable msix_other interrupt */
913 adapter->eims_other = 1 << vector;
914 tmp = (vector++ | E1000_IVAR_VALID) << 8;
915
916 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700917 break;
918 default:
919 /* do nothing, since nothing else supports MSI-X */
920 break;
921 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000922
923 adapter->eims_enable_mask |= adapter->eims_other;
924
Alexander Duyck26b39272010-02-17 01:00:41 +0000925 for (i = 0; i < adapter->num_q_vectors; i++)
926 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000927
Auke Kok9d5c8242008-01-24 02:22:38 -0800928 wrfl();
929}
930
931/**
932 * igb_request_msix - Initialize MSI-X interrupts
933 *
934 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
935 * kernel.
936 **/
937static int igb_request_msix(struct igb_adapter *adapter)
938{
939 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000940 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800941 int i, err = 0, vector = 0;
942
Auke Kok9d5c8242008-01-24 02:22:38 -0800943 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800944 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800945 if (err)
946 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000947 vector++;
948
949 for (i = 0; i < adapter->num_q_vectors; i++) {
950 struct igb_q_vector *q_vector = adapter->q_vector[i];
951
952 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
953
Alexander Duyck0ba82992011-08-26 07:45:47 +0000954 if (q_vector->rx.ring && q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000955 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000956 q_vector->rx.ring->queue_index);
957 else if (q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000958 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000959 q_vector->tx.ring->queue_index);
960 else if (q_vector->rx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000961 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000962 q_vector->rx.ring->queue_index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000963 else
964 sprintf(q_vector->name, "%s-unused", netdev->name);
965
966 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800967 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000968 q_vector);
969 if (err)
970 goto out;
971 vector++;
972 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800973
Auke Kok9d5c8242008-01-24 02:22:38 -0800974 igb_configure_msix(adapter);
975 return 0;
976out:
977 return err;
978}
979
980static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
981{
982 if (adapter->msix_entries) {
983 pci_disable_msix(adapter->pdev);
984 kfree(adapter->msix_entries);
985 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000986 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800987 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000988 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800989}
990
Alexander Duyck047e0032009-10-27 15:49:27 +0000991/**
992 * igb_free_q_vectors - Free memory allocated for interrupt vectors
993 * @adapter: board private structure to initialize
994 *
995 * This function frees the memory allocated to the q_vectors. In addition if
996 * NAPI is enabled it will delete any references to the NAPI struct prior
997 * to freeing the q_vector.
998 **/
999static void igb_free_q_vectors(struct igb_adapter *adapter)
1000{
1001 int v_idx;
1002
1003 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1004 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1005 adapter->q_vector[v_idx] = NULL;
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001006 if (!q_vector)
1007 continue;
Alexander Duyck047e0032009-10-27 15:49:27 +00001008 netif_napi_del(&q_vector->napi);
1009 kfree(q_vector);
1010 }
1011 adapter->num_q_vectors = 0;
1012}
1013
1014/**
1015 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1016 *
1017 * This function resets the device so that it has 0 rx queues, tx queues, and
1018 * MSI-X interrupts allocated.
1019 */
1020static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1021{
1022 igb_free_queues(adapter);
1023 igb_free_q_vectors(adapter);
1024 igb_reset_interrupt_capability(adapter);
1025}
Auke Kok9d5c8242008-01-24 02:22:38 -08001026
1027/**
1028 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1029 *
1030 * Attempt to configure interrupts using the best available
1031 * capabilities of the hardware and kernel.
1032 **/
Ben Hutchings21adef32010-09-27 08:28:39 +00001033static int igb_set_interrupt_capability(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08001034{
1035 int err;
1036 int numvecs, i;
1037
Alexander Duyck83b71802009-02-06 23:15:45 +00001038 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +00001039 adapter->num_rx_queues = adapter->rss_queues;
Greg Rose5fa85172010-07-01 13:38:16 +00001040 if (adapter->vfs_allocated_count)
1041 adapter->num_tx_queues = 1;
1042 else
1043 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +00001044
Alexander Duyck047e0032009-10-27 15:49:27 +00001045 /* start with one vector for every rx queue */
1046 numvecs = adapter->num_rx_queues;
1047
Daniel Mack3ad2f3f2010-02-03 08:01:28 +08001048 /* if tx handler is separate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +00001049 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1050 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +00001051
1052 /* store the number of vectors reserved for queues */
1053 adapter->num_q_vectors = numvecs;
1054
1055 /* add 1 vector for link status interrupts */
1056 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -08001057 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1058 GFP_KERNEL);
1059 if (!adapter->msix_entries)
1060 goto msi_only;
1061
1062 for (i = 0; i < numvecs; i++)
1063 adapter->msix_entries[i].entry = i;
1064
1065 err = pci_enable_msix(adapter->pdev,
1066 adapter->msix_entries,
1067 numvecs);
1068 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -07001069 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -08001070
1071 igb_reset_interrupt_capability(adapter);
1072
1073 /* If we can't do MSI-X, try MSI */
1074msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001075#ifdef CONFIG_PCI_IOV
1076 /* disable SR-IOV for non MSI-X configurations */
1077 if (adapter->vf_data) {
1078 struct e1000_hw *hw = &adapter->hw;
1079 /* disable iov and allow time for transactions to clear */
1080 pci_disable_sriov(adapter->pdev);
1081 msleep(500);
1082
1083 kfree(adapter->vf_data);
1084 adapter->vf_data = NULL;
1085 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00001086 wrfl();
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001087 msleep(100);
1088 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1089 }
1090#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001091 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +00001092 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001093 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -08001094 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001095 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001096 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001097 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001098 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -07001099out:
Ben Hutchings21adef32010-09-27 08:28:39 +00001100 /* Notify the stack of the (possibly) reduced queue counts. */
1101 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1102 return netif_set_real_num_rx_queues(adapter->netdev,
1103 adapter->num_rx_queues);
Auke Kok9d5c8242008-01-24 02:22:38 -08001104}
1105
1106/**
Alexander Duyck047e0032009-10-27 15:49:27 +00001107 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1108 * @adapter: board private structure to initialize
1109 *
1110 * We allocate one q_vector per queue interrupt. If allocation fails we
1111 * return -ENOMEM.
1112 **/
1113static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1114{
1115 struct igb_q_vector *q_vector;
1116 struct e1000_hw *hw = &adapter->hw;
1117 int v_idx;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001118 int orig_node = adapter->node;
Alexander Duyck047e0032009-10-27 15:49:27 +00001119
1120 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001121 if ((adapter->num_q_vectors == (adapter->num_rx_queues +
1122 adapter->num_tx_queues)) &&
1123 (adapter->num_rx_queues == v_idx))
1124 adapter->node = orig_node;
1125 if (orig_node == -1) {
1126 int cur_node = next_online_node(adapter->node);
1127 if (cur_node == MAX_NUMNODES)
1128 cur_node = first_online_node;
1129 adapter->node = cur_node;
1130 }
1131 q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
1132 adapter->node);
1133 if (!q_vector)
1134 q_vector = kzalloc(sizeof(struct igb_q_vector),
1135 GFP_KERNEL);
Alexander Duyck047e0032009-10-27 15:49:27 +00001136 if (!q_vector)
1137 goto err_out;
1138 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00001139 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1140 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001141 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1142 adapter->q_vector[v_idx] = q_vector;
1143 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001144 /* Restore the adapter's original node */
1145 adapter->node = orig_node;
1146
Alexander Duyck047e0032009-10-27 15:49:27 +00001147 return 0;
1148
1149err_out:
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001150 /* Restore the adapter's original node */
1151 adapter->node = orig_node;
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001152 igb_free_q_vectors(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001153 return -ENOMEM;
1154}
1155
1156static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1157 int ring_idx, int v_idx)
1158{
Alexander Duyck3025a442010-02-17 01:02:39 +00001159 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001160
Alexander Duyck0ba82992011-08-26 07:45:47 +00001161 q_vector->rx.ring = adapter->rx_ring[ring_idx];
1162 q_vector->rx.ring->q_vector = q_vector;
1163 q_vector->rx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001164 q_vector->itr_val = adapter->rx_itr_setting;
1165 if (q_vector->itr_val && q_vector->itr_val <= 3)
1166 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001167}
1168
1169static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1170 int ring_idx, int v_idx)
1171{
Alexander Duyck3025a442010-02-17 01:02:39 +00001172 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001173
Alexander Duyck0ba82992011-08-26 07:45:47 +00001174 q_vector->tx.ring = adapter->tx_ring[ring_idx];
1175 q_vector->tx.ring->q_vector = q_vector;
1176 q_vector->tx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001177 q_vector->itr_val = adapter->tx_itr_setting;
Alexander Duyck0ba82992011-08-26 07:45:47 +00001178 q_vector->tx.work_limit = adapter->tx_work_limit;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001179 if (q_vector->itr_val && q_vector->itr_val <= 3)
1180 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001181}
1182
1183/**
1184 * igb_map_ring_to_vector - maps allocated queues to vectors
1185 *
1186 * This function maps the recently allocated queues to vectors.
1187 **/
1188static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1189{
1190 int i;
1191 int v_idx = 0;
1192
1193 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1194 (adapter->num_q_vectors < adapter->num_tx_queues))
1195 return -ENOMEM;
1196
1197 if (adapter->num_q_vectors >=
1198 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1199 for (i = 0; i < adapter->num_rx_queues; i++)
1200 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1201 for (i = 0; i < adapter->num_tx_queues; i++)
1202 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1203 } else {
1204 for (i = 0; i < adapter->num_rx_queues; i++) {
1205 if (i < adapter->num_tx_queues)
1206 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1207 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1208 }
1209 for (; i < adapter->num_tx_queues; i++)
1210 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1211 }
1212 return 0;
1213}
1214
1215/**
1216 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1217 *
1218 * This function initializes the interrupts and allocates all of the queues.
1219 **/
1220static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1221{
1222 struct pci_dev *pdev = adapter->pdev;
1223 int err;
1224
Ben Hutchings21adef32010-09-27 08:28:39 +00001225 err = igb_set_interrupt_capability(adapter);
1226 if (err)
1227 return err;
Alexander Duyck047e0032009-10-27 15:49:27 +00001228
1229 err = igb_alloc_q_vectors(adapter);
1230 if (err) {
1231 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1232 goto err_alloc_q_vectors;
1233 }
1234
1235 err = igb_alloc_queues(adapter);
1236 if (err) {
1237 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1238 goto err_alloc_queues;
1239 }
1240
1241 err = igb_map_ring_to_vector(adapter);
1242 if (err) {
1243 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1244 goto err_map_queues;
1245 }
1246
1247
1248 return 0;
1249err_map_queues:
1250 igb_free_queues(adapter);
1251err_alloc_queues:
1252 igb_free_q_vectors(adapter);
1253err_alloc_q_vectors:
1254 igb_reset_interrupt_capability(adapter);
1255 return err;
1256}
1257
1258/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001259 * igb_request_irq - initialize interrupts
1260 *
1261 * Attempts to configure interrupts using the best available
1262 * capabilities of the hardware and kernel.
1263 **/
1264static int igb_request_irq(struct igb_adapter *adapter)
1265{
1266 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +00001267 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001268 int err = 0;
1269
1270 if (adapter->msix_entries) {
1271 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001272 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001273 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08001274 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +00001275 igb_clear_interrupt_scheme(adapter);
Alexander Duyckc74d5882011-08-26 07:46:45 +00001276 if (!pci_enable_msi(pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001277 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001278 igb_free_all_tx_resources(adapter);
1279 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001280 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001281 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001282 adapter->num_q_vectors = 1;
1283 err = igb_alloc_q_vectors(adapter);
1284 if (err) {
1285 dev_err(&pdev->dev,
1286 "Unable to allocate memory for vectors\n");
1287 goto request_done;
1288 }
1289 err = igb_alloc_queues(adapter);
1290 if (err) {
1291 dev_err(&pdev->dev,
1292 "Unable to allocate memory for queues\n");
1293 igb_free_q_vectors(adapter);
1294 goto request_done;
1295 }
1296 igb_setup_all_tx_resources(adapter);
1297 igb_setup_all_rx_resources(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001298 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001299
Alexander Duyckc74d5882011-08-26 07:46:45 +00001300 igb_assign_vector(adapter->q_vector[0], 0);
1301
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001302 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Alexander Duyckc74d5882011-08-26 07:46:45 +00001303 err = request_irq(pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +00001304 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001305 if (!err)
1306 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +00001307
Auke Kok9d5c8242008-01-24 02:22:38 -08001308 /* fall back to legacy interrupts */
1309 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001310 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001311 }
1312
Alexander Duyckc74d5882011-08-26 07:46:45 +00001313 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +00001314 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001315
Andy Gospodarek6cb5e572008-02-15 14:05:25 -08001316 if (err)
Alexander Duyckc74d5882011-08-26 07:46:45 +00001317 dev_err(&pdev->dev, "Error %d getting interrupt\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001318 err);
Auke Kok9d5c8242008-01-24 02:22:38 -08001319
1320request_done:
1321 return err;
1322}
1323
1324static void igb_free_irq(struct igb_adapter *adapter)
1325{
Auke Kok9d5c8242008-01-24 02:22:38 -08001326 if (adapter->msix_entries) {
1327 int vector = 0, i;
1328
Alexander Duyck047e0032009-10-27 15:49:27 +00001329 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001330
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001331 for (i = 0; i < adapter->num_q_vectors; i++)
Alexander Duyck047e0032009-10-27 15:49:27 +00001332 free_irq(adapter->msix_entries[vector++].vector,
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001333 adapter->q_vector[i]);
Alexander Duyck047e0032009-10-27 15:49:27 +00001334 } else {
1335 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001336 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001337}
1338
1339/**
1340 * igb_irq_disable - Mask off interrupt generation on the NIC
1341 * @adapter: board private structure
1342 **/
1343static void igb_irq_disable(struct igb_adapter *adapter)
1344{
1345 struct e1000_hw *hw = &adapter->hw;
1346
Alexander Duyck25568a52009-10-27 23:49:59 +00001347 /*
1348 * we need to be careful when disabling interrupts. The VFs are also
1349 * mapped into these registers and so clearing the bits can cause
1350 * issues on the VF drivers so we only need to clear what we set
1351 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001352 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001353 u32 regval = rd32(E1000_EIAM);
1354 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1355 wr32(E1000_EIMC, adapter->eims_enable_mask);
1356 regval = rd32(E1000_EIAC);
1357 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001358 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001359
1360 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001361 wr32(E1000_IMC, ~0);
1362 wrfl();
Emil Tantilov81a61852010-08-02 14:40:52 +00001363 if (adapter->msix_entries) {
1364 int i;
1365 for (i = 0; i < adapter->num_q_vectors; i++)
1366 synchronize_irq(adapter->msix_entries[i].vector);
1367 } else {
1368 synchronize_irq(adapter->pdev->irq);
1369 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001370}
1371
1372/**
1373 * igb_irq_enable - Enable default interrupt generation settings
1374 * @adapter: board private structure
1375 **/
1376static void igb_irq_enable(struct igb_adapter *adapter)
1377{
1378 struct e1000_hw *hw = &adapter->hw;
1379
1380 if (adapter->msix_entries) {
Alexander Duyck06218a82011-08-26 07:46:55 +00001381 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001382 u32 regval = rd32(E1000_EIAC);
1383 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1384 regval = rd32(E1000_EIAM);
1385 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001386 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001387 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001388 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001389 ims |= E1000_IMS_VMMB;
1390 }
1391 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001392 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001393 wr32(E1000_IMS, IMS_ENABLE_MASK |
1394 E1000_IMS_DRSTA);
1395 wr32(E1000_IAM, IMS_ENABLE_MASK |
1396 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001397 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001398}
1399
1400static void igb_update_mng_vlan(struct igb_adapter *adapter)
1401{
Alexander Duyck51466232009-10-27 23:47:35 +00001402 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001403 u16 vid = adapter->hw.mng_cookie.vlan_id;
1404 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001405
Alexander Duyck51466232009-10-27 23:47:35 +00001406 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1407 /* add VID to filter table */
1408 igb_vfta_set(hw, vid, true);
1409 adapter->mng_vlan_id = vid;
1410 } else {
1411 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1412 }
1413
1414 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1415 (vid != old_vid) &&
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001416 !test_bit(old_vid, adapter->active_vlans)) {
Alexander Duyck51466232009-10-27 23:47:35 +00001417 /* remove VID from filter table */
1418 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001419 }
1420}
1421
1422/**
1423 * igb_release_hw_control - release control of the h/w to f/w
1424 * @adapter: address of board private structure
1425 *
1426 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1427 * For ASF and Pass Through versions of f/w this means that the
1428 * driver is no longer loaded.
1429 *
1430 **/
1431static void igb_release_hw_control(struct igb_adapter *adapter)
1432{
1433 struct e1000_hw *hw = &adapter->hw;
1434 u32 ctrl_ext;
1435
1436 /* Let firmware take over control of h/w */
1437 ctrl_ext = rd32(E1000_CTRL_EXT);
1438 wr32(E1000_CTRL_EXT,
1439 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1440}
1441
Auke Kok9d5c8242008-01-24 02:22:38 -08001442/**
1443 * igb_get_hw_control - get control of the h/w from f/w
1444 * @adapter: address of board private structure
1445 *
1446 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1447 * For ASF and Pass Through versions of f/w this means that
1448 * the driver is loaded.
1449 *
1450 **/
1451static void igb_get_hw_control(struct igb_adapter *adapter)
1452{
1453 struct e1000_hw *hw = &adapter->hw;
1454 u32 ctrl_ext;
1455
1456 /* Let firmware know the driver has taken over */
1457 ctrl_ext = rd32(E1000_CTRL_EXT);
1458 wr32(E1000_CTRL_EXT,
1459 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1460}
1461
Auke Kok9d5c8242008-01-24 02:22:38 -08001462/**
1463 * igb_configure - configure the hardware for RX and TX
1464 * @adapter: private board structure
1465 **/
1466static void igb_configure(struct igb_adapter *adapter)
1467{
1468 struct net_device *netdev = adapter->netdev;
1469 int i;
1470
1471 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001472 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001473
1474 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001475
Alexander Duyck85b430b2009-10-27 15:50:29 +00001476 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001477 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001478 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001479
1480 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001481 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001482
1483 igb_rx_fifo_flush_82575(&adapter->hw);
1484
Alexander Duyckc493ea42009-03-20 00:16:50 +00001485 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001486 * at least 1 descriptor unused to make sure
1487 * next_to_use != next_to_clean */
1488 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001489 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckcd392f52011-08-26 07:43:59 +00001490 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001491 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001492}
1493
Nick Nunley88a268c2010-02-17 01:01:59 +00001494/**
1495 * igb_power_up_link - Power up the phy/serdes link
1496 * @adapter: address of board private structure
1497 **/
1498void igb_power_up_link(struct igb_adapter *adapter)
1499{
1500 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1501 igb_power_up_phy_copper(&adapter->hw);
1502 else
1503 igb_power_up_serdes_link_82575(&adapter->hw);
1504}
1505
1506/**
1507 * igb_power_down_link - Power down the phy/serdes link
1508 * @adapter: address of board private structure
1509 */
1510static void igb_power_down_link(struct igb_adapter *adapter)
1511{
1512 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1513 igb_power_down_phy_copper_82575(&adapter->hw);
1514 else
1515 igb_shutdown_serdes_link_82575(&adapter->hw);
1516}
Auke Kok9d5c8242008-01-24 02:22:38 -08001517
1518/**
1519 * igb_up - Open the interface and prepare it to handle traffic
1520 * @adapter: board private structure
1521 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001522int igb_up(struct igb_adapter *adapter)
1523{
1524 struct e1000_hw *hw = &adapter->hw;
1525 int i;
1526
1527 /* hardware has been reset, we need to reload some things */
1528 igb_configure(adapter);
1529
1530 clear_bit(__IGB_DOWN, &adapter->state);
1531
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001532 for (i = 0; i < adapter->num_q_vectors; i++)
1533 napi_enable(&(adapter->q_vector[i]->napi));
1534
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001535 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001536 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001537 else
1538 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001539
1540 /* Clear any pending interrupts. */
1541 rd32(E1000_ICR);
1542 igb_irq_enable(adapter);
1543
Alexander Duyckd4960302009-10-27 15:53:45 +00001544 /* notify VFs that reset has been completed */
1545 if (adapter->vfs_allocated_count) {
1546 u32 reg_data = rd32(E1000_CTRL_EXT);
1547 reg_data |= E1000_CTRL_EXT_PFRSTD;
1548 wr32(E1000_CTRL_EXT, reg_data);
1549 }
1550
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001551 netif_tx_start_all_queues(adapter->netdev);
1552
Alexander Duyck25568a52009-10-27 23:49:59 +00001553 /* start the watchdog. */
1554 hw->mac.get_link_status = 1;
1555 schedule_work(&adapter->watchdog_task);
1556
Auke Kok9d5c8242008-01-24 02:22:38 -08001557 return 0;
1558}
1559
1560void igb_down(struct igb_adapter *adapter)
1561{
Auke Kok9d5c8242008-01-24 02:22:38 -08001562 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001563 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001564 u32 tctl, rctl;
1565 int i;
1566
1567 /* signal that we're down so the interrupt handler does not
1568 * reschedule our watchdog timer */
1569 set_bit(__IGB_DOWN, &adapter->state);
1570
1571 /* disable receives in the hardware */
1572 rctl = rd32(E1000_RCTL);
1573 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1574 /* flush and sleep below */
1575
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001576 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001577
1578 /* disable transmits in the hardware */
1579 tctl = rd32(E1000_TCTL);
1580 tctl &= ~E1000_TCTL_EN;
1581 wr32(E1000_TCTL, tctl);
1582 /* flush both disables and wait for them to finish */
1583 wrfl();
1584 msleep(10);
1585
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001586 for (i = 0; i < adapter->num_q_vectors; i++)
1587 napi_disable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08001588
Auke Kok9d5c8242008-01-24 02:22:38 -08001589 igb_irq_disable(adapter);
1590
1591 del_timer_sync(&adapter->watchdog_timer);
1592 del_timer_sync(&adapter->phy_info_timer);
1593
Auke Kok9d5c8242008-01-24 02:22:38 -08001594 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001595
1596 /* record the stats before reset*/
Eric Dumazet12dcd862010-10-15 17:27:10 +00001597 spin_lock(&adapter->stats64_lock);
1598 igb_update_stats(adapter, &adapter->stats64);
1599 spin_unlock(&adapter->stats64_lock);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001600
Auke Kok9d5c8242008-01-24 02:22:38 -08001601 adapter->link_speed = 0;
1602 adapter->link_duplex = 0;
1603
Jeff Kirsher30236822008-06-24 17:01:15 -07001604 if (!pci_channel_offline(adapter->pdev))
1605 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001606 igb_clean_all_tx_rings(adapter);
1607 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001608#ifdef CONFIG_IGB_DCA
1609
1610 /* since we reset the hardware DCA settings were cleared */
1611 igb_setup_dca(adapter);
1612#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001613}
1614
1615void igb_reinit_locked(struct igb_adapter *adapter)
1616{
1617 WARN_ON(in_interrupt());
1618 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1619 msleep(1);
1620 igb_down(adapter);
1621 igb_up(adapter);
1622 clear_bit(__IGB_RESETTING, &adapter->state);
1623}
1624
1625void igb_reset(struct igb_adapter *adapter)
1626{
Alexander Duyck090b1792009-10-27 23:51:55 +00001627 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001628 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001629 struct e1000_mac_info *mac = &hw->mac;
1630 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001631 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1632 u16 hwm;
1633
1634 /* Repartition Pba for greater than 9k mtu
1635 * To take effect CTRL.RST is required.
1636 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001637 switch (mac->type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00001638 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00001639 case e1000_82580:
1640 pba = rd32(E1000_RXPBS);
1641 pba = igb_rxpbs_adjust_82580(pba);
1642 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001643 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001644 pba = rd32(E1000_RXPBS);
1645 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001646 break;
1647 case e1000_82575:
1648 default:
1649 pba = E1000_PBA_34K;
1650 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001651 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001652
Alexander Duyck2d064c02008-07-08 15:10:12 -07001653 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1654 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001655 /* adjust PBA for jumbo frames */
1656 wr32(E1000_PBA, pba);
1657
1658 /* To maintain wire speed transmits, the Tx FIFO should be
1659 * large enough to accommodate two full transmit packets,
1660 * rounded up to the next 1KB and expressed in KB. Likewise,
1661 * the Rx FIFO should be large enough to accommodate at least
1662 * one full receive packet and is similarly rounded up and
1663 * expressed in KB. */
1664 pba = rd32(E1000_PBA);
1665 /* upper 16 bits has Tx packet buffer allocation size in KB */
1666 tx_space = pba >> 16;
1667 /* lower 16 bits has Rx packet buffer allocation size in KB */
1668 pba &= 0xffff;
1669 /* the tx fifo also stores 16 bytes of information about the tx
1670 * but don't include ethernet FCS because hardware appends it */
1671 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001672 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001673 ETH_FCS_LEN) * 2;
1674 min_tx_space = ALIGN(min_tx_space, 1024);
1675 min_tx_space >>= 10;
1676 /* software strips receive CRC, so leave room for it */
1677 min_rx_space = adapter->max_frame_size;
1678 min_rx_space = ALIGN(min_rx_space, 1024);
1679 min_rx_space >>= 10;
1680
1681 /* If current Tx allocation is less than the min Tx FIFO size,
1682 * and the min Tx FIFO size is less than the current Rx FIFO
1683 * allocation, take space away from current Rx allocation */
1684 if (tx_space < min_tx_space &&
1685 ((min_tx_space - tx_space) < pba)) {
1686 pba = pba - (min_tx_space - tx_space);
1687
1688 /* if short on rx space, rx wins and must trump tx
1689 * adjustment */
1690 if (pba < min_rx_space)
1691 pba = min_rx_space;
1692 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001693 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001694 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001695
1696 /* flow control settings */
1697 /* The high water mark must be low enough to fit one full frame
1698 * (or the size used for early receive) above it in the Rx FIFO.
1699 * Set it to the lower of:
1700 * - 90% of the Rx FIFO size, or
1701 * - the full Rx FIFO size minus one full frame */
1702 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001703 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001704
Alexander Duyckd405ea32009-12-23 13:21:27 +00001705 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1706 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001707 fc->pause_time = 0xFFFF;
1708 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001709 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001710
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001711 /* disable receive for all VFs and wait one second */
1712 if (adapter->vfs_allocated_count) {
1713 int i;
1714 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Greg Rose8fa7e0f2010-11-06 05:43:21 +00001715 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001716
1717 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001718 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001719
1720 /* disable transmits and receives */
1721 wr32(E1000_VFRE, 0);
1722 wr32(E1000_VFTE, 0);
1723 }
1724
Auke Kok9d5c8242008-01-24 02:22:38 -08001725 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001726 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001727 wr32(E1000_WUC, 0);
1728
Alexander Duyck330a6d62009-10-27 23:51:35 +00001729 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001730 dev_err(&pdev->dev, "Hardware Error\n");
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08001731 if (hw->mac.type > e1000_82580) {
1732 if (adapter->flags & IGB_FLAG_DMAC) {
1733 u32 reg;
Auke Kok9d5c8242008-01-24 02:22:38 -08001734
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08001735 /*
1736 * DMA Coalescing high water mark needs to be higher
1737 * than * the * Rx threshold. The Rx threshold is
1738 * currently * pba - 6, so we * should use a high water
1739 * mark of pba * - 4. */
1740 hwm = (pba - 4) << 10;
1741
1742 reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
1743 & E1000_DMACR_DMACTHR_MASK);
1744
1745 /* transition to L0x or L1 if available..*/
1746 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
1747
1748 /* watchdog timer= +-1000 usec in 32usec intervals */
1749 reg |= (1000 >> 5);
1750 wr32(E1000_DMACR, reg);
1751
1752 /* no lower threshold to disable coalescing(smart fifb)
1753 * -UTRESH=0*/
1754 wr32(E1000_DMCRTRH, 0);
1755
1756 /* set hwm to PBA - 2 * max frame size */
1757 wr32(E1000_FCRTC, hwm);
1758
1759 /*
1760 * This sets the time to wait before requesting tran-
1761 * sition to * low power state to number of usecs needed
1762 * to receive 1 512 * byte frame at gigabit line rate
1763 */
1764 reg = rd32(E1000_DMCTLX);
1765 reg |= IGB_DMCTLX_DCFLUSH_DIS;
1766
1767 /* Delay 255 usec before entering Lx state. */
1768 reg |= 0xFF;
1769 wr32(E1000_DMCTLX, reg);
1770
1771 /* free space in Tx packet buffer to wake from DMAC */
1772 wr32(E1000_DMCTXTH,
1773 (IGB_MIN_TXPBSIZE -
1774 (IGB_TX_BUF_4096 + adapter->max_frame_size))
1775 >> 6);
1776
1777 /* make low power state decision controlled by DMAC */
1778 reg = rd32(E1000_PCIEMISC);
1779 reg |= E1000_PCIEMISC_LX_DECISION;
1780 wr32(E1000_PCIEMISC, reg);
1781 } /* end if IGB_FLAG_DMAC set */
1782 }
Alexander Duyck55cac242009-11-19 12:42:21 +00001783 if (hw->mac.type == e1000_82580) {
1784 u32 reg = rd32(E1000_PCIEMISC);
1785 wr32(E1000_PCIEMISC,
1786 reg & ~E1000_PCIEMISC_LX_DECISION);
1787 }
Nick Nunley88a268c2010-02-17 01:01:59 +00001788 if (!netif_running(adapter->netdev))
1789 igb_power_down_link(adapter);
1790
Auke Kok9d5c8242008-01-24 02:22:38 -08001791 igb_update_mng_vlan(adapter);
1792
1793 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1794 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1795
Alexander Duyck330a6d62009-10-27 23:51:35 +00001796 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001797}
1798
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001799static u32 igb_fix_features(struct net_device *netdev, u32 features)
1800{
1801 /*
1802 * Since there is no support for separate rx/tx vlan accel
1803 * enable/disable make sure tx flag is always in same state as rx.
1804 */
1805 if (features & NETIF_F_HW_VLAN_RX)
1806 features |= NETIF_F_HW_VLAN_TX;
1807 else
1808 features &= ~NETIF_F_HW_VLAN_TX;
1809
1810 return features;
1811}
1812
Michał Mirosławac52caa2011-06-08 08:38:01 +00001813static int igb_set_features(struct net_device *netdev, u32 features)
1814{
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001815 u32 changed = netdev->features ^ features;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001816
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001817 if (changed & NETIF_F_HW_VLAN_RX)
1818 igb_vlan_mode(netdev, features);
1819
Michał Mirosławac52caa2011-06-08 08:38:01 +00001820 return 0;
1821}
1822
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001823static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001824 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001825 .ndo_stop = igb_close,
Alexander Duyckcd392f52011-08-26 07:43:59 +00001826 .ndo_start_xmit = igb_xmit_frame,
Eric Dumazet12dcd862010-10-15 17:27:10 +00001827 .ndo_get_stats64 = igb_get_stats64,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001828 .ndo_set_rx_mode = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001829 .ndo_set_mac_address = igb_set_mac,
1830 .ndo_change_mtu = igb_change_mtu,
1831 .ndo_do_ioctl = igb_ioctl,
1832 .ndo_tx_timeout = igb_tx_timeout,
1833 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001834 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1835 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001836 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1837 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1838 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1839 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001840#ifdef CONFIG_NET_POLL_CONTROLLER
1841 .ndo_poll_controller = igb_netpoll,
1842#endif
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001843 .ndo_fix_features = igb_fix_features,
1844 .ndo_set_features = igb_set_features,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001845};
1846
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001847/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001848 * igb_probe - Device Initialization Routine
1849 * @pdev: PCI device information struct
1850 * @ent: entry in igb_pci_tbl
1851 *
1852 * Returns 0 on success, negative on failure
1853 *
1854 * igb_probe initializes an adapter identified by a pci_dev structure.
1855 * The OS initialization, configuring of the adapter private structure,
1856 * and a hardware reset occur.
1857 **/
1858static int __devinit igb_probe(struct pci_dev *pdev,
1859 const struct pci_device_id *ent)
1860{
1861 struct net_device *netdev;
1862 struct igb_adapter *adapter;
1863 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001864 u16 eeprom_data = 0;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001865 s32 ret_val;
Alexander Duyck4337e992009-10-27 23:48:31 +00001866 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001867 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1868 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001869 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001870 u16 eeprom_apme_mask = IGB_EEPROM_APME;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001871 u8 part_str[E1000_PBANUM_LENGTH];
Auke Kok9d5c8242008-01-24 02:22:38 -08001872
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001873 /* Catch broken hardware that put the wrong VF device ID in
1874 * the PCIe SR-IOV capability.
1875 */
1876 if (pdev->is_virtfn) {
1877 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
1878 pci_name(pdev), pdev->vendor, pdev->device);
1879 return -EINVAL;
1880 }
1881
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001882 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001883 if (err)
1884 return err;
1885
1886 pci_using_dac = 0;
Alexander Duyck59d71982010-04-27 13:09:25 +00001887 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001888 if (!err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001889 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001890 if (!err)
1891 pci_using_dac = 1;
1892 } else {
Alexander Duyck59d71982010-04-27 13:09:25 +00001893 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001894 if (err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001895 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001896 if (err) {
1897 dev_err(&pdev->dev, "No usable DMA "
1898 "configuration, aborting\n");
1899 goto err_dma;
1900 }
1901 }
1902 }
1903
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001904 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1905 IORESOURCE_MEM),
1906 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001907 if (err)
1908 goto err_pci_reg;
1909
Frans Pop19d5afd2009-10-02 10:04:12 -07001910 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001911
Auke Kok9d5c8242008-01-24 02:22:38 -08001912 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001913 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001914
1915 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001916 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00001917 IGB_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001918 if (!netdev)
1919 goto err_alloc_etherdev;
1920
1921 SET_NETDEV_DEV(netdev, &pdev->dev);
1922
1923 pci_set_drvdata(pdev, netdev);
1924 adapter = netdev_priv(netdev);
1925 adapter->netdev = netdev;
1926 adapter->pdev = pdev;
1927 hw = &adapter->hw;
1928 hw->back = adapter;
1929 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1930
1931 mmio_start = pci_resource_start(pdev, 0);
1932 mmio_len = pci_resource_len(pdev, 0);
1933
1934 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001935 hw->hw_addr = ioremap(mmio_start, mmio_len);
1936 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001937 goto err_ioremap;
1938
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001939 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001940 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001941 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001942
1943 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1944
1945 netdev->mem_start = mmio_start;
1946 netdev->mem_end = mmio_start + mmio_len;
1947
Auke Kok9d5c8242008-01-24 02:22:38 -08001948 /* PCI config space info */
1949 hw->vendor_id = pdev->vendor;
1950 hw->device_id = pdev->device;
1951 hw->revision_id = pdev->revision;
1952 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1953 hw->subsystem_device_id = pdev->subsystem_device;
1954
Auke Kok9d5c8242008-01-24 02:22:38 -08001955 /* Copy the default MAC, PHY and NVM function pointers */
1956 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1957 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1958 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1959 /* Initialize skew-specific constants */
1960 err = ei->get_invariants(hw);
1961 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001962 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001963
Alexander Duyck450c87c2009-02-06 23:22:11 +00001964 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001965 err = igb_sw_init(adapter);
1966 if (err)
1967 goto err_sw_init;
1968
1969 igb_get_bus_info_pcie(hw);
1970
1971 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08001972
1973 /* Copper options */
1974 if (hw->phy.media_type == e1000_media_type_copper) {
1975 hw->phy.mdix = AUTO_ALL_MODES;
1976 hw->phy.disable_polarity_correction = false;
1977 hw->phy.ms_type = e1000_ms_hw_default;
1978 }
1979
1980 if (igb_check_reset_block(hw))
1981 dev_info(&pdev->dev,
1982 "PHY reset is blocked due to SOL/IDER session.\n");
1983
Alexander Duyck077887c2011-08-26 07:46:29 +00001984 /*
1985 * features is initialized to 0 in allocation, it might have bits
1986 * set by igb_sw_init so we should use an or instead of an
1987 * assignment.
1988 */
1989 netdev->features |= NETIF_F_SG |
1990 NETIF_F_IP_CSUM |
1991 NETIF_F_IPV6_CSUM |
1992 NETIF_F_TSO |
1993 NETIF_F_TSO6 |
1994 NETIF_F_RXHASH |
1995 NETIF_F_RXCSUM |
1996 NETIF_F_HW_VLAN_RX |
1997 NETIF_F_HW_VLAN_TX;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001998
Alexander Duyck077887c2011-08-26 07:46:29 +00001999 /* copy netdev features into list of user selectable features */
2000 netdev->hw_features |= netdev->features;
Auke Kok9d5c8242008-01-24 02:22:38 -08002001
Alexander Duyck077887c2011-08-26 07:46:29 +00002002 /* set this bit last since it cannot be part of hw_features */
2003 netdev->features |= NETIF_F_HW_VLAN_FILTER;
2004
2005 netdev->vlan_features |= NETIF_F_TSO |
2006 NETIF_F_TSO6 |
2007 NETIF_F_IP_CSUM |
2008 NETIF_F_IPV6_CSUM |
2009 NETIF_F_SG;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07002010
Yi Zou7b872a52010-09-22 17:57:58 +00002011 if (pci_using_dac) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002012 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00002013 netdev->vlan_features |= NETIF_F_HIGHDMA;
2014 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002015
Michał Mirosławac52caa2011-06-08 08:38:01 +00002016 if (hw->mac.type >= e1000_82576) {
2017 netdev->hw_features |= NETIF_F_SCTP_CSUM;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00002018 netdev->features |= NETIF_F_SCTP_CSUM;
Michał Mirosławac52caa2011-06-08 08:38:01 +00002019 }
Jesse Brandeburgb9473562009-04-27 22:36:13 +00002020
Jiri Pirko01789342011-08-16 06:29:00 +00002021 netdev->priv_flags |= IFF_UNICAST_FLT;
2022
Alexander Duyck330a6d62009-10-27 23:51:35 +00002023 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002024
2025 /* before reading the NVM, reset the controller to put the device in a
2026 * known good starting state */
2027 hw->mac.ops.reset_hw(hw);
2028
2029 /* make sure the NVM is good */
Carolyn Wyborny4322e562011-03-11 20:43:18 -08002030 if (hw->nvm.ops.validate(hw) < 0) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002031 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2032 err = -EIO;
2033 goto err_eeprom;
2034 }
2035
2036 /* copy the MAC address out of the NVM */
2037 if (hw->mac.ops.read_mac_addr(hw))
2038 dev_err(&pdev->dev, "NVM Read Error\n");
2039
2040 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2041 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
2042
2043 if (!is_valid_ether_addr(netdev->perm_addr)) {
2044 dev_err(&pdev->dev, "Invalid MAC Address\n");
2045 err = -EIO;
2046 goto err_eeprom;
2047 }
2048
Joe Perchesc061b182010-08-23 18:20:03 +00002049 setup_timer(&adapter->watchdog_timer, igb_watchdog,
Alexander Duyck0e340482009-03-20 00:17:08 +00002050 (unsigned long) adapter);
Joe Perchesc061b182010-08-23 18:20:03 +00002051 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
Alexander Duyck0e340482009-03-20 00:17:08 +00002052 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002053
2054 INIT_WORK(&adapter->reset_task, igb_reset_task);
2055 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2056
Alexander Duyck450c87c2009-02-06 23:22:11 +00002057 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08002058 adapter->fc_autoneg = true;
2059 hw->mac.autoneg = true;
2060 hw->phy.autoneg_advertised = 0x2f;
2061
Alexander Duyck0cce1192009-07-23 18:10:24 +00002062 hw->fc.requested_mode = e1000_fc_default;
2063 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08002064
Auke Kok9d5c8242008-01-24 02:22:38 -08002065 igb_validate_mdi_setting(hw);
2066
Auke Kok9d5c8242008-01-24 02:22:38 -08002067 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
2068 * enable the ACPI Magic Packet filter
2069 */
2070
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002071 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00002072 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Carolyn Wyborny6d337dc2011-07-07 00:24:56 +00002073 else if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00002074 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2075 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2076 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002077 else if (hw->bus.func == 1)
2078 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08002079
2080 if (eeprom_data & eeprom_apme_mask)
2081 adapter->eeprom_wol |= E1000_WUFC_MAG;
2082
2083 /* now that we have the eeprom settings, apply the special cases where
2084 * the eeprom may be wrong or the board simply won't support wake on
2085 * lan on a particular port */
2086 switch (pdev->device) {
2087 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2088 adapter->eeprom_wol = 0;
2089 break;
2090 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07002091 case E1000_DEV_ID_82576_FIBER:
2092 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08002093 /* Wake events only supported on port A for dual fiber
2094 * regardless of eeprom setting */
2095 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2096 adapter->eeprom_wol = 0;
2097 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002098 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00002099 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002100 /* if quad port adapter, disable WoL on all but port A */
2101 if (global_quad_port_a != 0)
2102 adapter->eeprom_wol = 0;
2103 else
2104 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2105 /* Reset for multiple quad port adapters */
2106 if (++global_quad_port_a == 4)
2107 global_quad_port_a = 0;
2108 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08002109 }
2110
2111 /* initialize the wol settings based on the eeprom settings */
2112 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00002113 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08002114
2115 /* reset the hardware with the new settings */
2116 igb_reset(adapter);
2117
2118 /* let the f/w know that the h/w is now under the control of the
2119 * driver. */
2120 igb_get_hw_control(adapter);
2121
Auke Kok9d5c8242008-01-24 02:22:38 -08002122 strcpy(netdev->name, "eth%d");
2123 err = register_netdev(netdev);
2124 if (err)
2125 goto err_register;
2126
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002127 /* carrier off reporting is important to ethtool even BEFORE open */
2128 netif_carrier_off(netdev);
2129
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002130#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08002131 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002132 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002133 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002134 igb_setup_dca(adapter);
2135 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00002136
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002137#endif
Anders Berggren673b8b72011-02-04 07:32:32 +00002138 /* do hw tstamp init after resetting */
2139 igb_init_hw_timer(adapter);
2140
Auke Kok9d5c8242008-01-24 02:22:38 -08002141 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2142 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07002143 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08002144 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00002145 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
Alexander Duyckff846f52010-04-27 01:02:40 +00002146 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
Alexander Duyck559e9c42009-10-27 23:52:50 +00002147 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00002148 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2149 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2150 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2151 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07002152 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08002153
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002154 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2155 if (ret_val)
2156 strcpy(part_str, "Unknown");
2157 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
Auke Kok9d5c8242008-01-24 02:22:38 -08002158 dev_info(&pdev->dev,
2159 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2160 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002161 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08002162 adapter->num_rx_queues, adapter->num_tx_queues);
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002163 switch (hw->mac.type) {
2164 case e1000_i350:
2165 igb_set_eee_i350(hw);
2166 break;
2167 default:
2168 break;
2169 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002170 return 0;
2171
2172err_register:
2173 igb_release_hw_control(adapter);
2174err_eeprom:
2175 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002176 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002177
2178 if (hw->flash_address)
2179 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08002180err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00002181 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002182 iounmap(hw->hw_addr);
2183err_ioremap:
2184 free_netdev(netdev);
2185err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00002186 pci_release_selected_regions(pdev,
2187 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002188err_pci_reg:
2189err_dma:
2190 pci_disable_device(pdev);
2191 return err;
2192}
2193
2194/**
2195 * igb_remove - Device Removal Routine
2196 * @pdev: PCI device information struct
2197 *
2198 * igb_remove is called by the PCI subsystem to alert the driver
2199 * that it should release a PCI device. The could be caused by a
2200 * Hot-Plug event, or because the driver is going to be removed from
2201 * memory.
2202 **/
2203static void __devexit igb_remove(struct pci_dev *pdev)
2204{
2205 struct net_device *netdev = pci_get_drvdata(pdev);
2206 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002207 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002208
Tejun Heo760141a2010-12-12 16:45:14 +01002209 /*
2210 * The watchdog timer may be rescheduled, so explicitly
2211 * disable watchdog from being rescheduled.
2212 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002213 set_bit(__IGB_DOWN, &adapter->state);
2214 del_timer_sync(&adapter->watchdog_timer);
2215 del_timer_sync(&adapter->phy_info_timer);
2216
Tejun Heo760141a2010-12-12 16:45:14 +01002217 cancel_work_sync(&adapter->reset_task);
2218 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002219
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002220#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002221 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002222 dev_info(&pdev->dev, "DCA disabled\n");
2223 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002224 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08002225 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002226 }
2227#endif
2228
Auke Kok9d5c8242008-01-24 02:22:38 -08002229 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2230 * would have already happened in close and is redundant. */
2231 igb_release_hw_control(adapter);
2232
2233 unregister_netdev(netdev);
2234
Alexander Duyck047e0032009-10-27 15:49:27 +00002235 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002236
Alexander Duyck37680112009-02-19 20:40:30 -08002237#ifdef CONFIG_PCI_IOV
2238 /* reclaim resources allocated to VFs */
2239 if (adapter->vf_data) {
2240 /* disable iov and allow time for transactions to clear */
Greg Rose0224d662011-10-14 02:57:14 +00002241 if (!igb_check_vf_assignment(adapter)) {
2242 pci_disable_sriov(pdev);
2243 msleep(500);
2244 } else {
2245 dev_info(&pdev->dev, "VF(s) assigned to guests!\n");
2246 }
Alexander Duyck37680112009-02-19 20:40:30 -08002247
2248 kfree(adapter->vf_data);
2249 adapter->vf_data = NULL;
2250 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00002251 wrfl();
Alexander Duyck37680112009-02-19 20:40:30 -08002252 msleep(100);
2253 dev_info(&pdev->dev, "IOV Disabled\n");
2254 }
2255#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00002256
Alexander Duyck28b07592009-02-06 23:20:31 +00002257 iounmap(hw->hw_addr);
2258 if (hw->flash_address)
2259 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00002260 pci_release_selected_regions(pdev,
2261 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002262
2263 free_netdev(netdev);
2264
Frans Pop19d5afd2009-10-02 10:04:12 -07002265 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002266
Auke Kok9d5c8242008-01-24 02:22:38 -08002267 pci_disable_device(pdev);
2268}
2269
2270/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00002271 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2272 * @adapter: board private structure to initialize
2273 *
2274 * This function initializes the vf specific data storage and then attempts to
2275 * allocate the VFs. The reason for ordering it this way is because it is much
2276 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2277 * the memory for the VFs.
2278 **/
2279static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2280{
2281#ifdef CONFIG_PCI_IOV
2282 struct pci_dev *pdev = adapter->pdev;
Greg Rose0224d662011-10-14 02:57:14 +00002283 int old_vfs = igb_find_enabled_vfs(adapter);
2284 int i;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002285
Greg Rose0224d662011-10-14 02:57:14 +00002286 if (old_vfs) {
2287 dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
2288 "max_vfs setting of %d\n", old_vfs, max_vfs);
2289 adapter->vfs_allocated_count = old_vfs;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002290 }
2291
Greg Rose0224d662011-10-14 02:57:14 +00002292 if (!adapter->vfs_allocated_count)
2293 return;
2294
2295 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2296 sizeof(struct vf_data_storage), GFP_KERNEL);
2297 /* if allocation failed then we do not support SR-IOV */
2298 if (!adapter->vf_data) {
Alexander Duycka6b623e2009-10-27 23:47:53 +00002299 adapter->vfs_allocated_count = 0;
Greg Rose0224d662011-10-14 02:57:14 +00002300 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2301 "Data Storage\n");
2302 goto out;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002303 }
Greg Rose0224d662011-10-14 02:57:14 +00002304
2305 if (!old_vfs) {
2306 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count))
2307 goto err_out;
2308 }
2309 dev_info(&pdev->dev, "%d VFs allocated\n",
2310 adapter->vfs_allocated_count);
2311 for (i = 0; i < adapter->vfs_allocated_count; i++)
2312 igb_vf_configure(adapter, i);
2313
2314 /* DMA Coalescing is not supported in IOV mode. */
2315 adapter->flags &= ~IGB_FLAG_DMAC;
2316 goto out;
2317err_out:
2318 kfree(adapter->vf_data);
2319 adapter->vf_data = NULL;
2320 adapter->vfs_allocated_count = 0;
2321out:
2322 return;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002323#endif /* CONFIG_PCI_IOV */
2324}
2325
Alexander Duyck115f4592009-11-12 18:37:00 +00002326/**
2327 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
2328 * @adapter: board private structure to initialize
2329 *
2330 * igb_init_hw_timer initializes the function pointer and values for the hw
2331 * timer found in hardware.
2332 **/
2333static void igb_init_hw_timer(struct igb_adapter *adapter)
2334{
2335 struct e1000_hw *hw = &adapter->hw;
2336
2337 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002338 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002339 case e1000_82580:
2340 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2341 adapter->cycles.read = igb_read_clock;
2342 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2343 adapter->cycles.mult = 1;
2344 /*
2345 * The 82580 timesync updates the system timer every 8ns by 8ns
2346 * and the value cannot be shifted. Instead we need to shift
2347 * the registers to generate a 64bit timer value. As a result
2348 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
2349 * 24 in order to generate a larger value for synchronization.
2350 */
2351 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
2352 /* disable system timer temporarily by setting bit 31 */
2353 wr32(E1000_TSAUXC, 0x80000000);
2354 wrfl();
2355
2356 /* Set registers so that rollover occurs soon to test this. */
2357 wr32(E1000_SYSTIMR, 0x00000000);
2358 wr32(E1000_SYSTIML, 0x80000000);
2359 wr32(E1000_SYSTIMH, 0x000000FF);
2360 wrfl();
2361
2362 /* enable system timer by clearing bit 31 */
2363 wr32(E1000_TSAUXC, 0x0);
2364 wrfl();
2365
2366 timecounter_init(&adapter->clock,
2367 &adapter->cycles,
2368 ktime_to_ns(ktime_get_real()));
2369 /*
2370 * Synchronize our NIC clock against system wall clock. NIC
2371 * time stamp reading requires ~3us per sample, each sample
2372 * was pretty stable even under load => only require 10
2373 * samples for each offset comparison.
2374 */
2375 memset(&adapter->compare, 0, sizeof(adapter->compare));
2376 adapter->compare.source = &adapter->clock;
2377 adapter->compare.target = ktime_get_real;
2378 adapter->compare.num_samples = 10;
2379 timecompare_update(&adapter->compare, 0);
2380 break;
Alexander Duyck115f4592009-11-12 18:37:00 +00002381 case e1000_82576:
2382 /*
2383 * Initialize hardware timer: we keep it running just in case
2384 * that some program needs it later on.
2385 */
2386 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2387 adapter->cycles.read = igb_read_clock;
2388 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2389 adapter->cycles.mult = 1;
2390 /**
2391 * Scale the NIC clock cycle by a large factor so that
2392 * relatively small clock corrections can be added or
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002393 * subtracted at each clock tick. The drawbacks of a large
Alexander Duyck115f4592009-11-12 18:37:00 +00002394 * factor are a) that the clock register overflows more quickly
2395 * (not such a big deal) and b) that the increment per tick has
2396 * to fit into 24 bits. As a result we need to use a shift of
2397 * 19 so we can fit a value of 16 into the TIMINCA register.
2398 */
2399 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
2400 wr32(E1000_TIMINCA,
2401 (1 << E1000_TIMINCA_16NS_SHIFT) |
2402 (16 << IGB_82576_TSYNC_SHIFT));
2403
2404 /* Set registers so that rollover occurs soon to test this. */
2405 wr32(E1000_SYSTIML, 0x00000000);
2406 wr32(E1000_SYSTIMH, 0xFF800000);
2407 wrfl();
2408
2409 timecounter_init(&adapter->clock,
2410 &adapter->cycles,
2411 ktime_to_ns(ktime_get_real()));
2412 /*
2413 * Synchronize our NIC clock against system wall clock. NIC
2414 * time stamp reading requires ~3us per sample, each sample
2415 * was pretty stable even under load => only require 10
2416 * samples for each offset comparison.
2417 */
2418 memset(&adapter->compare, 0, sizeof(adapter->compare));
2419 adapter->compare.source = &adapter->clock;
2420 adapter->compare.target = ktime_get_real;
2421 adapter->compare.num_samples = 10;
2422 timecompare_update(&adapter->compare, 0);
2423 break;
2424 case e1000_82575:
2425 /* 82575 does not support timesync */
2426 default:
2427 break;
2428 }
2429
2430}
2431
Alexander Duycka6b623e2009-10-27 23:47:53 +00002432/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002433 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2434 * @adapter: board private structure to initialize
2435 *
2436 * igb_sw_init initializes the Adapter private data structure.
2437 * Fields are initialized based on PCI device information and
2438 * OS network device settings (MTU size).
2439 **/
2440static int __devinit igb_sw_init(struct igb_adapter *adapter)
2441{
2442 struct e1000_hw *hw = &adapter->hw;
2443 struct net_device *netdev = adapter->netdev;
2444 struct pci_dev *pdev = adapter->pdev;
2445
2446 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2447
Alexander Duyck13fde972011-10-05 13:35:24 +00002448 /* set default ring sizes */
Alexander Duyck68fd9912008-11-20 00:48:10 -08002449 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2450 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck13fde972011-10-05 13:35:24 +00002451
2452 /* set default ITR values */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00002453 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2454 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2455
Alexander Duyck13fde972011-10-05 13:35:24 +00002456 /* set default work limits */
2457 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2458
Alexander Duyck153285f2011-08-26 07:43:32 +00002459 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2460 VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08002461 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2462
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002463 adapter->node = -1;
2464
Eric Dumazet12dcd862010-10-15 17:27:10 +00002465 spin_lock_init(&adapter->stats64_lock);
Alexander Duycka6b623e2009-10-27 23:47:53 +00002466#ifdef CONFIG_PCI_IOV
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002467 switch (hw->mac.type) {
2468 case e1000_82576:
2469 case e1000_i350:
Stefan Assmann9b082d72011-02-24 20:03:31 +00002470 if (max_vfs > 7) {
2471 dev_warn(&pdev->dev,
2472 "Maximum of 7 VFs per PF, using max\n");
2473 adapter->vfs_allocated_count = 7;
2474 } else
2475 adapter->vfs_allocated_count = max_vfs;
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002476 break;
2477 default:
2478 break;
2479 }
Alexander Duycka6b623e2009-10-27 23:47:53 +00002480#endif /* CONFIG_PCI_IOV */
Alexander Duycka99955f2009-11-12 18:37:19 +00002481 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
Williams, Mitch A665c8c82011-06-07 14:22:57 -07002482 /* i350 cannot do RSS and SR-IOV at the same time */
2483 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
2484 adapter->rss_queues = 1;
Alexander Duycka99955f2009-11-12 18:37:19 +00002485
2486 /*
2487 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2488 * then we should combine the queues into a queue pair in order to
2489 * conserve interrupts due to limited supply
2490 */
2491 if ((adapter->rss_queues > 4) ||
2492 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
2493 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2494
Alexander Duycka6b623e2009-10-27 23:47:53 +00002495 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00002496 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002497 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2498 return -ENOMEM;
2499 }
2500
Alexander Duycka6b623e2009-10-27 23:47:53 +00002501 igb_probe_vfs(adapter);
2502
Auke Kok9d5c8242008-01-24 02:22:38 -08002503 /* Explicitly disable IRQ since the NIC can be in any state. */
2504 igb_irq_disable(adapter);
2505
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002506 if (hw->mac.type == e1000_i350)
2507 adapter->flags &= ~IGB_FLAG_DMAC;
2508
Auke Kok9d5c8242008-01-24 02:22:38 -08002509 set_bit(__IGB_DOWN, &adapter->state);
2510 return 0;
2511}
2512
2513/**
2514 * igb_open - Called when a network interface is made active
2515 * @netdev: network interface device structure
2516 *
2517 * Returns 0 on success, negative value on failure
2518 *
2519 * The open entry point is called when a network interface is made
2520 * active by the system (IFF_UP). At this point all resources needed
2521 * for transmit and receive operations are allocated, the interrupt
2522 * handler is registered with the OS, the watchdog timer is started,
2523 * and the stack is notified that the interface is ready.
2524 **/
2525static int igb_open(struct net_device *netdev)
2526{
2527 struct igb_adapter *adapter = netdev_priv(netdev);
2528 struct e1000_hw *hw = &adapter->hw;
2529 int err;
2530 int i;
2531
2532 /* disallow open during test */
2533 if (test_bit(__IGB_TESTING, &adapter->state))
2534 return -EBUSY;
2535
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002536 netif_carrier_off(netdev);
2537
Auke Kok9d5c8242008-01-24 02:22:38 -08002538 /* allocate transmit descriptors */
2539 err = igb_setup_all_tx_resources(adapter);
2540 if (err)
2541 goto err_setup_tx;
2542
2543 /* allocate receive descriptors */
2544 err = igb_setup_all_rx_resources(adapter);
2545 if (err)
2546 goto err_setup_rx;
2547
Nick Nunley88a268c2010-02-17 01:01:59 +00002548 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002549
Auke Kok9d5c8242008-01-24 02:22:38 -08002550 /* before we allocate an interrupt, we must be ready to handle it.
2551 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2552 * as soon as we call pci_request_irq, so we have to setup our
2553 * clean_rx handler before we do so. */
2554 igb_configure(adapter);
2555
2556 err = igb_request_irq(adapter);
2557 if (err)
2558 goto err_req_irq;
2559
2560 /* From here on the code is the same as igb_up() */
2561 clear_bit(__IGB_DOWN, &adapter->state);
2562
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00002563 for (i = 0; i < adapter->num_q_vectors; i++)
2564 napi_enable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08002565
2566 /* Clear any pending interrupts. */
2567 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002568
2569 igb_irq_enable(adapter);
2570
Alexander Duyckd4960302009-10-27 15:53:45 +00002571 /* notify VFs that reset has been completed */
2572 if (adapter->vfs_allocated_count) {
2573 u32 reg_data = rd32(E1000_CTRL_EXT);
2574 reg_data |= E1000_CTRL_EXT_PFRSTD;
2575 wr32(E1000_CTRL_EXT, reg_data);
2576 }
2577
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002578 netif_tx_start_all_queues(netdev);
2579
Alexander Duyck25568a52009-10-27 23:49:59 +00002580 /* start the watchdog. */
2581 hw->mac.get_link_status = 1;
2582 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002583
2584 return 0;
2585
2586err_req_irq:
2587 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002588 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002589 igb_free_all_rx_resources(adapter);
2590err_setup_rx:
2591 igb_free_all_tx_resources(adapter);
2592err_setup_tx:
2593 igb_reset(adapter);
2594
2595 return err;
2596}
2597
2598/**
2599 * igb_close - Disables a network interface
2600 * @netdev: network interface device structure
2601 *
2602 * Returns 0, this is not allowed to fail
2603 *
2604 * The close entry point is called when an interface is de-activated
2605 * by the OS. The hardware is still under the driver's control, but
2606 * needs to be disabled. A global MAC reset is issued to stop the
2607 * hardware, and all transmit and receive resources are freed.
2608 **/
2609static int igb_close(struct net_device *netdev)
2610{
2611 struct igb_adapter *adapter = netdev_priv(netdev);
2612
2613 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2614 igb_down(adapter);
2615
2616 igb_free_irq(adapter);
2617
2618 igb_free_all_tx_resources(adapter);
2619 igb_free_all_rx_resources(adapter);
2620
Auke Kok9d5c8242008-01-24 02:22:38 -08002621 return 0;
2622}
2623
2624/**
2625 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002626 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2627 *
2628 * Return 0 on success, negative on failure
2629 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002630int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002631{
Alexander Duyck59d71982010-04-27 13:09:25 +00002632 struct device *dev = tx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002633 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002634 int size;
2635
Alexander Duyck06034642011-08-26 07:44:22 +00002636 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002637 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
2638 if (!tx_ring->tx_buffer_info)
2639 tx_ring->tx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002640 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002641 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002642
2643 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002644 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002645 tx_ring->size = ALIGN(tx_ring->size, 4096);
2646
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002647 set_dev_node(dev, tx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002648 tx_ring->desc = dma_alloc_coherent(dev,
2649 tx_ring->size,
2650 &tx_ring->dma,
2651 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002652 set_dev_node(dev, orig_node);
2653 if (!tx_ring->desc)
2654 tx_ring->desc = dma_alloc_coherent(dev,
2655 tx_ring->size,
2656 &tx_ring->dma,
2657 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002658
2659 if (!tx_ring->desc)
2660 goto err;
2661
Auke Kok9d5c8242008-01-24 02:22:38 -08002662 tx_ring->next_to_use = 0;
2663 tx_ring->next_to_clean = 0;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002664
Auke Kok9d5c8242008-01-24 02:22:38 -08002665 return 0;
2666
2667err:
Alexander Duyck06034642011-08-26 07:44:22 +00002668 vfree(tx_ring->tx_buffer_info);
Alexander Duyck59d71982010-04-27 13:09:25 +00002669 dev_err(dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002670 "Unable to allocate memory for the transmit descriptor ring\n");
2671 return -ENOMEM;
2672}
2673
2674/**
2675 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2676 * (Descriptors) for all queues
2677 * @adapter: board private structure
2678 *
2679 * Return 0 on success, negative on failure
2680 **/
2681static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2682{
Alexander Duyck439705e2009-10-27 23:49:20 +00002683 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002684 int i, err = 0;
2685
2686 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002687 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002688 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002689 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002690 "Allocation for Tx Queue %u failed\n", i);
2691 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002692 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002693 break;
2694 }
2695 }
2696
2697 return err;
2698}
2699
2700/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002701 * igb_setup_tctl - configure the transmit control registers
2702 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002703 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002704void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002705{
Auke Kok9d5c8242008-01-24 02:22:38 -08002706 struct e1000_hw *hw = &adapter->hw;
2707 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002708
Alexander Duyck85b430b2009-10-27 15:50:29 +00002709 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2710 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002711
2712 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002713 tctl = rd32(E1000_TCTL);
2714 tctl &= ~E1000_TCTL_CT;
2715 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2716 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2717
2718 igb_config_collision_dist(hw);
2719
Auke Kok9d5c8242008-01-24 02:22:38 -08002720 /* Enable transmits */
2721 tctl |= E1000_TCTL_EN;
2722
2723 wr32(E1000_TCTL, tctl);
2724}
2725
2726/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002727 * igb_configure_tx_ring - Configure transmit ring after Reset
2728 * @adapter: board private structure
2729 * @ring: tx ring to configure
2730 *
2731 * Configure a transmit ring after a reset.
2732 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002733void igb_configure_tx_ring(struct igb_adapter *adapter,
2734 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002735{
2736 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka74420e2011-08-26 07:43:27 +00002737 u32 txdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002738 u64 tdba = ring->dma;
2739 int reg_idx = ring->reg_idx;
2740
2741 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00002742 wr32(E1000_TXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002743 wrfl();
2744 mdelay(10);
2745
2746 wr32(E1000_TDLEN(reg_idx),
2747 ring->count * sizeof(union e1000_adv_tx_desc));
2748 wr32(E1000_TDBAL(reg_idx),
2749 tdba & 0x00000000ffffffffULL);
2750 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2751
Alexander Duyckfce99e32009-10-27 15:51:27 +00002752 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00002753 wr32(E1000_TDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00002754 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002755
2756 txdctl |= IGB_TX_PTHRESH;
2757 txdctl |= IGB_TX_HTHRESH << 8;
2758 txdctl |= IGB_TX_WTHRESH << 16;
2759
2760 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2761 wr32(E1000_TXDCTL(reg_idx), txdctl);
2762}
2763
2764/**
2765 * igb_configure_tx - Configure transmit Unit after Reset
2766 * @adapter: board private structure
2767 *
2768 * Configure the Tx unit of the MAC after a reset.
2769 **/
2770static void igb_configure_tx(struct igb_adapter *adapter)
2771{
2772 int i;
2773
2774 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002775 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002776}
2777
2778/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002779 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002780 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2781 *
2782 * Returns 0 on success, negative on failure
2783 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002784int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002785{
Alexander Duyck59d71982010-04-27 13:09:25 +00002786 struct device *dev = rx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002787 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002788 int size, desc_len;
2789
Alexander Duyck06034642011-08-26 07:44:22 +00002790 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002791 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
2792 if (!rx_ring->rx_buffer_info)
2793 rx_ring->rx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002794 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002795 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002796
2797 desc_len = sizeof(union e1000_adv_rx_desc);
2798
2799 /* Round up to nearest 4K */
2800 rx_ring->size = rx_ring->count * desc_len;
2801 rx_ring->size = ALIGN(rx_ring->size, 4096);
2802
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002803 set_dev_node(dev, rx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002804 rx_ring->desc = dma_alloc_coherent(dev,
2805 rx_ring->size,
2806 &rx_ring->dma,
2807 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002808 set_dev_node(dev, orig_node);
2809 if (!rx_ring->desc)
2810 rx_ring->desc = dma_alloc_coherent(dev,
2811 rx_ring->size,
2812 &rx_ring->dma,
2813 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002814
2815 if (!rx_ring->desc)
2816 goto err;
2817
2818 rx_ring->next_to_clean = 0;
2819 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002820
Auke Kok9d5c8242008-01-24 02:22:38 -08002821 return 0;
2822
2823err:
Alexander Duyck06034642011-08-26 07:44:22 +00002824 vfree(rx_ring->rx_buffer_info);
2825 rx_ring->rx_buffer_info = NULL;
Alexander Duyck59d71982010-04-27 13:09:25 +00002826 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2827 " ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002828 return -ENOMEM;
2829}
2830
2831/**
2832 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2833 * (Descriptors) for all queues
2834 * @adapter: board private structure
2835 *
2836 * Return 0 on success, negative on failure
2837 **/
2838static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2839{
Alexander Duyck439705e2009-10-27 23:49:20 +00002840 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002841 int i, err = 0;
2842
2843 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002844 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002845 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002846 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002847 "Allocation for Rx Queue %u failed\n", i);
2848 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002849 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002850 break;
2851 }
2852 }
2853
2854 return err;
2855}
2856
2857/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002858 * igb_setup_mrqc - configure the multiple receive queue control registers
2859 * @adapter: Board private structure
2860 **/
2861static void igb_setup_mrqc(struct igb_adapter *adapter)
2862{
2863 struct e1000_hw *hw = &adapter->hw;
2864 u32 mrqc, rxcsum;
2865 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2866 union e1000_reta {
2867 u32 dword;
2868 u8 bytes[4];
2869 } reta;
2870 static const u8 rsshash[40] = {
2871 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2872 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2873 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2874 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2875
2876 /* Fill out hash function seeds */
2877 for (j = 0; j < 10; j++) {
2878 u32 rsskey = rsshash[(j * 4)];
2879 rsskey |= rsshash[(j * 4) + 1] << 8;
2880 rsskey |= rsshash[(j * 4) + 2] << 16;
2881 rsskey |= rsshash[(j * 4) + 3] << 24;
2882 array_wr32(E1000_RSSRK(0), j, rsskey);
2883 }
2884
Alexander Duycka99955f2009-11-12 18:37:19 +00002885 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002886
2887 if (adapter->vfs_allocated_count) {
2888 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2889 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002890 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002891 case e1000_82580:
2892 num_rx_queues = 1;
2893 shift = 0;
2894 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002895 case e1000_82576:
2896 shift = 3;
2897 num_rx_queues = 2;
2898 break;
2899 case e1000_82575:
2900 shift = 2;
2901 shift2 = 6;
2902 default:
2903 break;
2904 }
2905 } else {
2906 if (hw->mac.type == e1000_82575)
2907 shift = 6;
2908 }
2909
2910 for (j = 0; j < (32 * 4); j++) {
2911 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2912 if (shift2)
2913 reta.bytes[j & 3] |= num_rx_queues << shift2;
2914 if ((j & 3) == 3)
2915 wr32(E1000_RETA(j >> 2), reta.dword);
2916 }
2917
2918 /*
2919 * Disable raw packet checksumming so that RSS hash is placed in
2920 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2921 * offloads as they are enabled by default
2922 */
2923 rxcsum = rd32(E1000_RXCSUM);
2924 rxcsum |= E1000_RXCSUM_PCSD;
2925
2926 if (adapter->hw.mac.type >= e1000_82576)
2927 /* Enable Receive Checksum Offload for SCTP */
2928 rxcsum |= E1000_RXCSUM_CRCOFL;
2929
2930 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2931 wr32(E1000_RXCSUM, rxcsum);
2932
2933 /* If VMDq is enabled then we set the appropriate mode for that, else
2934 * we default to RSS so that an RSS hash is calculated per packet even
2935 * if we are only using one queue */
2936 if (adapter->vfs_allocated_count) {
2937 if (hw->mac.type > e1000_82575) {
2938 /* Set the default pool for the PF's first queue */
2939 u32 vtctl = rd32(E1000_VT_CTL);
2940 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2941 E1000_VT_CTL_DISABLE_DEF_POOL);
2942 vtctl |= adapter->vfs_allocated_count <<
2943 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2944 wr32(E1000_VT_CTL, vtctl);
2945 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002946 if (adapter->rss_queues > 1)
Alexander Duyck06cf2662009-10-27 15:53:25 +00002947 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2948 else
2949 mrqc = E1000_MRQC_ENABLE_VMDQ;
2950 } else {
2951 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2952 }
2953 igb_vmm_control(adapter);
2954
Alexander Duyck4478a9c2010-07-01 20:01:05 +00002955 /*
2956 * Generate RSS hash based on TCP port numbers and/or
2957 * IPv4/v6 src and dst addresses since UDP cannot be
2958 * hashed reliably due to IP fragmentation
2959 */
2960 mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2961 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2962 E1000_MRQC_RSS_FIELD_IPV6 |
2963 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2964 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002965
2966 wr32(E1000_MRQC, mrqc);
2967}
2968
2969/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002970 * igb_setup_rctl - configure the receive control registers
2971 * @adapter: Board private structure
2972 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002973void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002974{
2975 struct e1000_hw *hw = &adapter->hw;
2976 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002977
2978 rctl = rd32(E1000_RCTL);
2979
2980 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002981 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002982
Alexander Duyck69d728b2008-11-25 01:04:03 -08002983 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002984 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002985
Auke Kok87cb7e82008-07-08 15:08:29 -07002986 /*
2987 * enable stripping of CRC. It's unlikely this will break BMC
2988 * redirection as it did with e1000. Newer features require
2989 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002990 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002991 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002992
Alexander Duyck559e9c42009-10-27 23:52:50 +00002993 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002994 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002995
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002996 /* enable LPE to prevent packets larger than max_frame_size */
2997 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002998
Alexander Duyck952f72a2009-10-27 15:51:07 +00002999 /* disable queue 0 to prevent tail write w/o re-config */
3000 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08003001
Alexander Duycke1739522009-02-19 20:39:44 -08003002 /* Attention!!! For SR-IOV PF driver operations you must enable
3003 * queue drop for all VF and PF queues to prevent head of line blocking
3004 * if an un-trusted VF does not provide descriptors to hardware.
3005 */
3006 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08003007 /* set all queue drop enable bits */
3008 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08003009 }
3010
Auke Kok9d5c8242008-01-24 02:22:38 -08003011 wr32(E1000_RCTL, rctl);
3012}
3013
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003014static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3015 int vfn)
3016{
3017 struct e1000_hw *hw = &adapter->hw;
3018 u32 vmolr;
3019
3020 /* if it isn't the PF check to see if VFs are enabled and
3021 * increase the size to support vlan tags */
3022 if (vfn < adapter->vfs_allocated_count &&
3023 adapter->vf_data[vfn].vlans_enabled)
3024 size += VLAN_TAG_SIZE;
3025
3026 vmolr = rd32(E1000_VMOLR(vfn));
3027 vmolr &= ~E1000_VMOLR_RLPML_MASK;
3028 vmolr |= size | E1000_VMOLR_LPE;
3029 wr32(E1000_VMOLR(vfn), vmolr);
3030
3031 return 0;
3032}
3033
Auke Kok9d5c8242008-01-24 02:22:38 -08003034/**
Alexander Duycke1739522009-02-19 20:39:44 -08003035 * igb_rlpml_set - set maximum receive packet size
3036 * @adapter: board private structure
3037 *
3038 * Configure maximum receivable packet size.
3039 **/
3040static void igb_rlpml_set(struct igb_adapter *adapter)
3041{
Alexander Duyck153285f2011-08-26 07:43:32 +00003042 u32 max_frame_size = adapter->max_frame_size;
Alexander Duycke1739522009-02-19 20:39:44 -08003043 struct e1000_hw *hw = &adapter->hw;
3044 u16 pf_id = adapter->vfs_allocated_count;
3045
Alexander Duycke1739522009-02-19 20:39:44 -08003046 if (pf_id) {
3047 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck153285f2011-08-26 07:43:32 +00003048 /*
3049 * If we're in VMDQ or SR-IOV mode, then set global RLPML
3050 * to our max jumbo frame size, in case we need to enable
3051 * jumbo frames on one of the rings later.
3052 * This will not pass over-length frames into the default
3053 * queue because it's gated by the VMOLR.RLPML.
3054 */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003055 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08003056 }
3057
3058 wr32(E1000_RLPML, max_frame_size);
3059}
3060
Williams, Mitch A8151d292010-02-10 01:44:24 +00003061static inline void igb_set_vmolr(struct igb_adapter *adapter,
3062 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003063{
3064 struct e1000_hw *hw = &adapter->hw;
3065 u32 vmolr;
3066
3067 /*
3068 * This register exists only on 82576 and newer so if we are older then
3069 * we should exit and do nothing
3070 */
3071 if (hw->mac.type < e1000_82576)
3072 return;
3073
3074 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00003075 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3076 if (aupe)
3077 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3078 else
3079 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003080
3081 /* clear all bits that might not be set */
3082 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3083
Alexander Duycka99955f2009-11-12 18:37:19 +00003084 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003085 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3086 /*
3087 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3088 * multicast packets
3089 */
3090 if (vfn <= adapter->vfs_allocated_count)
3091 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3092
3093 wr32(E1000_VMOLR(vfn), vmolr);
3094}
3095
Alexander Duycke1739522009-02-19 20:39:44 -08003096/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00003097 * igb_configure_rx_ring - Configure a receive ring after Reset
3098 * @adapter: board private structure
3099 * @ring: receive ring to be configured
3100 *
3101 * Configure the Rx unit of the MAC after a reset.
3102 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003103void igb_configure_rx_ring(struct igb_adapter *adapter,
3104 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00003105{
3106 struct e1000_hw *hw = &adapter->hw;
3107 u64 rdba = ring->dma;
3108 int reg_idx = ring->reg_idx;
Alexander Duycka74420e2011-08-26 07:43:27 +00003109 u32 srrctl = 0, rxdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003110
3111 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00003112 wr32(E1000_RXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003113
3114 /* Set DMA base address registers */
3115 wr32(E1000_RDBAL(reg_idx),
3116 rdba & 0x00000000ffffffffULL);
3117 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3118 wr32(E1000_RDLEN(reg_idx),
3119 ring->count * sizeof(union e1000_adv_rx_desc));
3120
3121 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00003122 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00003123 wr32(E1000_RDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00003124 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003125
Alexander Duyck952f72a2009-10-27 15:51:07 +00003126 /* set descriptor configuration */
Alexander Duyck44390ca2011-08-26 07:43:38 +00003127 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003128#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
Alexander Duyck44390ca2011-08-26 07:43:38 +00003129 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003130#else
Alexander Duyck44390ca2011-08-26 07:43:38 +00003131 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003132#endif
Alexander Duyck44390ca2011-08-26 07:43:38 +00003133 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
Alexander Duyck06218a82011-08-26 07:46:55 +00003134 if (hw->mac.type >= e1000_82580)
Nick Nunley757b77e2010-03-26 11:36:47 +00003135 srrctl |= E1000_SRRCTL_TIMESTAMP;
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00003136 /* Only set Drop Enable if we are supporting multiple queues */
3137 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3138 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003139
3140 wr32(E1000_SRRCTL(reg_idx), srrctl);
3141
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003142 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00003143 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003144
Alexander Duyck85b430b2009-10-27 15:50:29 +00003145 rxdctl |= IGB_RX_PTHRESH;
3146 rxdctl |= IGB_RX_HTHRESH << 8;
3147 rxdctl |= IGB_RX_WTHRESH << 16;
Alexander Duycka74420e2011-08-26 07:43:27 +00003148
3149 /* enable receive descriptor fetching */
3150 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003151 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3152}
3153
3154/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003155 * igb_configure_rx - Configure receive Unit after Reset
3156 * @adapter: board private structure
3157 *
3158 * Configure the Rx unit of the MAC after a reset.
3159 **/
3160static void igb_configure_rx(struct igb_adapter *adapter)
3161{
Hannes Eder91075842009-02-18 19:36:04 -08003162 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003163
Alexander Duyck68d480c2009-10-05 06:33:08 +00003164 /* set UTA to appropriate mode */
3165 igb_set_uta(adapter);
3166
Alexander Duyck26ad9172009-10-05 06:32:49 +00003167 /* set the correct pool for the PF default MAC address in entry 0 */
3168 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3169 adapter->vfs_allocated_count);
3170
Alexander Duyck06cf2662009-10-27 15:53:25 +00003171 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3172 * the Base and Length of the Rx Descriptor Ring */
3173 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003174 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003175}
3176
3177/**
3178 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003179 * @tx_ring: Tx descriptor ring for a specific queue
3180 *
3181 * Free all transmit software resources
3182 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003183void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003184{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003185 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003186
Alexander Duyck06034642011-08-26 07:44:22 +00003187 vfree(tx_ring->tx_buffer_info);
3188 tx_ring->tx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003189
Alexander Duyck439705e2009-10-27 23:49:20 +00003190 /* if not set, then don't free */
3191 if (!tx_ring->desc)
3192 return;
3193
Alexander Duyck59d71982010-04-27 13:09:25 +00003194 dma_free_coherent(tx_ring->dev, tx_ring->size,
3195 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003196
3197 tx_ring->desc = NULL;
3198}
3199
3200/**
3201 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3202 * @adapter: board private structure
3203 *
3204 * Free all transmit software resources
3205 **/
3206static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3207{
3208 int i;
3209
3210 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003211 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003212}
3213
Alexander Duyckebe42d12011-08-26 07:45:09 +00003214void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3215 struct igb_tx_buffer *tx_buffer)
Auke Kok9d5c8242008-01-24 02:22:38 -08003216{
Alexander Duyckebe42d12011-08-26 07:45:09 +00003217 if (tx_buffer->skb) {
3218 dev_kfree_skb_any(tx_buffer->skb);
3219 if (tx_buffer->dma)
3220 dma_unmap_single(ring->dev,
3221 tx_buffer->dma,
3222 tx_buffer->length,
3223 DMA_TO_DEVICE);
3224 } else if (tx_buffer->dma) {
3225 dma_unmap_page(ring->dev,
3226 tx_buffer->dma,
3227 tx_buffer->length,
3228 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003229 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00003230 tx_buffer->next_to_watch = NULL;
3231 tx_buffer->skb = NULL;
3232 tx_buffer->dma = 0;
3233 /* buffer_info must be completely set up in the transmit path */
Auke Kok9d5c8242008-01-24 02:22:38 -08003234}
3235
3236/**
3237 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08003238 * @tx_ring: ring to be cleaned
3239 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003240static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003241{
Alexander Duyck06034642011-08-26 07:44:22 +00003242 struct igb_tx_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08003243 unsigned long size;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00003244 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003245
Alexander Duyck06034642011-08-26 07:44:22 +00003246 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003247 return;
3248 /* Free all the Tx ring sk_buffs */
3249
3250 for (i = 0; i < tx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003251 buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00003252 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08003253 }
3254
Alexander Duyck06034642011-08-26 07:44:22 +00003255 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3256 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003257
3258 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08003259 memset(tx_ring->desc, 0, tx_ring->size);
3260
3261 tx_ring->next_to_use = 0;
3262 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003263}
3264
3265/**
3266 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3267 * @adapter: board private structure
3268 **/
3269static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3270{
3271 int i;
3272
3273 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003274 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003275}
3276
3277/**
3278 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003279 * @rx_ring: ring to clean the resources from
3280 *
3281 * Free all receive software resources
3282 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003283void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003284{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003285 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003286
Alexander Duyck06034642011-08-26 07:44:22 +00003287 vfree(rx_ring->rx_buffer_info);
3288 rx_ring->rx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003289
Alexander Duyck439705e2009-10-27 23:49:20 +00003290 /* if not set, then don't free */
3291 if (!rx_ring->desc)
3292 return;
3293
Alexander Duyck59d71982010-04-27 13:09:25 +00003294 dma_free_coherent(rx_ring->dev, rx_ring->size,
3295 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003296
3297 rx_ring->desc = NULL;
3298}
3299
3300/**
3301 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3302 * @adapter: board private structure
3303 *
3304 * Free all receive software resources
3305 **/
3306static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3307{
3308 int i;
3309
3310 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003311 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003312}
3313
3314/**
3315 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003316 * @rx_ring: ring to free buffers from
3317 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003318static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003319{
Auke Kok9d5c8242008-01-24 02:22:38 -08003320 unsigned long size;
Alexander Duyckc023cd82011-08-26 07:43:43 +00003321 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003322
Alexander Duyck06034642011-08-26 07:44:22 +00003323 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003324 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00003325
Auke Kok9d5c8242008-01-24 02:22:38 -08003326 /* Free all the Rx ring sk_buffs */
3327 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003328 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08003329 if (buffer_info->dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003330 dma_unmap_single(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003331 buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00003332 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00003333 DMA_FROM_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08003334 buffer_info->dma = 0;
3335 }
3336
3337 if (buffer_info->skb) {
3338 dev_kfree_skb(buffer_info->skb);
3339 buffer_info->skb = NULL;
3340 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003341 if (buffer_info->page_dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003342 dma_unmap_page(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003343 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003344 PAGE_SIZE / 2,
Alexander Duyck59d71982010-04-27 13:09:25 +00003345 DMA_FROM_DEVICE);
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003346 buffer_info->page_dma = 0;
3347 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003348 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003349 put_page(buffer_info->page);
3350 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07003351 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003352 }
3353 }
3354
Alexander Duyck06034642011-08-26 07:44:22 +00003355 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3356 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003357
3358 /* Zero out the descriptor ring */
3359 memset(rx_ring->desc, 0, rx_ring->size);
3360
3361 rx_ring->next_to_clean = 0;
3362 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003363}
3364
3365/**
3366 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3367 * @adapter: board private structure
3368 **/
3369static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3370{
3371 int i;
3372
3373 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003374 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003375}
3376
3377/**
3378 * igb_set_mac - Change the Ethernet Address of the NIC
3379 * @netdev: network interface device structure
3380 * @p: pointer to an address structure
3381 *
3382 * Returns 0 on success, negative on failure
3383 **/
3384static int igb_set_mac(struct net_device *netdev, void *p)
3385{
3386 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00003387 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003388 struct sockaddr *addr = p;
3389
3390 if (!is_valid_ether_addr(addr->sa_data))
3391 return -EADDRNOTAVAIL;
3392
3393 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00003394 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08003395
Alexander Duyck26ad9172009-10-05 06:32:49 +00003396 /* set the correct pool for the new PF MAC address in entry 0 */
3397 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3398 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08003399
Auke Kok9d5c8242008-01-24 02:22:38 -08003400 return 0;
3401}
3402
3403/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00003404 * igb_write_mc_addr_list - write multicast addresses to MTA
3405 * @netdev: network interface device structure
3406 *
3407 * Writes multicast address list to the MTA hash table.
3408 * Returns: -ENOMEM on failure
3409 * 0 on no addresses written
3410 * X on writing X addresses to MTA
3411 **/
3412static int igb_write_mc_addr_list(struct net_device *netdev)
3413{
3414 struct igb_adapter *adapter = netdev_priv(netdev);
3415 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003416 struct netdev_hw_addr *ha;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003417 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003418 int i;
3419
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003420 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003421 /* nothing to program, so clear mc list */
3422 igb_update_mc_addr_list(hw, NULL, 0);
3423 igb_restore_vf_multicasts(adapter);
3424 return 0;
3425 }
3426
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003427 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003428 if (!mta_list)
3429 return -ENOMEM;
3430
Alexander Duyck68d480c2009-10-05 06:33:08 +00003431 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00003432 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003433 netdev_for_each_mc_addr(ha, netdev)
3434 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003435
Alexander Duyck68d480c2009-10-05 06:33:08 +00003436 igb_update_mc_addr_list(hw, mta_list, i);
3437 kfree(mta_list);
3438
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003439 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003440}
3441
3442/**
3443 * igb_write_uc_addr_list - write unicast addresses to RAR table
3444 * @netdev: network interface device structure
3445 *
3446 * Writes unicast address list to the RAR table.
3447 * Returns: -ENOMEM on failure/insufficient address space
3448 * 0 on no addresses written
3449 * X on writing X addresses to the RAR table
3450 **/
3451static int igb_write_uc_addr_list(struct net_device *netdev)
3452{
3453 struct igb_adapter *adapter = netdev_priv(netdev);
3454 struct e1000_hw *hw = &adapter->hw;
3455 unsigned int vfn = adapter->vfs_allocated_count;
3456 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3457 int count = 0;
3458
3459 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003460 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00003461 return -ENOMEM;
3462
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003463 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003464 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003465
3466 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003467 if (!rar_entries)
3468 break;
3469 igb_rar_set_qsel(adapter, ha->addr,
3470 rar_entries--,
3471 vfn);
3472 count++;
3473 }
3474 }
3475 /* write the addresses in reverse order to avoid write combining */
3476 for (; rar_entries > 0 ; rar_entries--) {
3477 wr32(E1000_RAH(rar_entries), 0);
3478 wr32(E1000_RAL(rar_entries), 0);
3479 }
3480 wrfl();
3481
3482 return count;
3483}
3484
3485/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003486 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08003487 * @netdev: network interface device structure
3488 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003489 * The set_rx_mode entry point is called whenever the unicast or multicast
3490 * address lists or the network interface flags are updated. This routine is
3491 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08003492 * promiscuous mode, and all-multi behavior.
3493 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003494static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003495{
3496 struct igb_adapter *adapter = netdev_priv(netdev);
3497 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003498 unsigned int vfn = adapter->vfs_allocated_count;
3499 u32 rctl, vmolr = 0;
3500 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08003501
3502 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08003503 rctl = rd32(E1000_RCTL);
3504
Alexander Duyck68d480c2009-10-05 06:33:08 +00003505 /* clear the effected bits */
3506 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3507
Patrick McHardy746b9f02008-07-16 20:15:45 -07003508 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003509 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003510 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07003511 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003512 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003513 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003514 vmolr |= E1000_VMOLR_MPME;
3515 } else {
3516 /*
3517 * Write addresses to the MTA, if the attempt fails
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003518 * then we should just turn on promiscuous mode so
Alexander Duyck68d480c2009-10-05 06:33:08 +00003519 * that we can at least receive multicast traffic
3520 */
3521 count = igb_write_mc_addr_list(netdev);
3522 if (count < 0) {
3523 rctl |= E1000_RCTL_MPE;
3524 vmolr |= E1000_VMOLR_MPME;
3525 } else if (count) {
3526 vmolr |= E1000_VMOLR_ROMPE;
3527 }
3528 }
3529 /*
3530 * Write addresses to available RAR registers, if there is not
3531 * sufficient space to store all the addresses then enable
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003532 * unicast promiscuous mode
Alexander Duyck68d480c2009-10-05 06:33:08 +00003533 */
3534 count = igb_write_uc_addr_list(netdev);
3535 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003536 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003537 vmolr |= E1000_VMOLR_ROPE;
3538 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003539 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003540 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003541 wr32(E1000_RCTL, rctl);
3542
Alexander Duyck68d480c2009-10-05 06:33:08 +00003543 /*
3544 * In order to support SR-IOV and eventually VMDq it is necessary to set
3545 * the VMOLR to enable the appropriate modes. Without this workaround
3546 * we will have issues with VLAN tag stripping not being done for frames
3547 * that are only arriving because we are the default pool
3548 */
3549 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003550 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003551
Alexander Duyck68d480c2009-10-05 06:33:08 +00003552 vmolr |= rd32(E1000_VMOLR(vfn)) &
3553 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3554 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003555 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003556}
3557
Greg Rose13800462010-11-06 02:08:26 +00003558static void igb_check_wvbr(struct igb_adapter *adapter)
3559{
3560 struct e1000_hw *hw = &adapter->hw;
3561 u32 wvbr = 0;
3562
3563 switch (hw->mac.type) {
3564 case e1000_82576:
3565 case e1000_i350:
3566 if (!(wvbr = rd32(E1000_WVBR)))
3567 return;
3568 break;
3569 default:
3570 break;
3571 }
3572
3573 adapter->wvbr |= wvbr;
3574}
3575
3576#define IGB_STAGGERED_QUEUE_OFFSET 8
3577
3578static void igb_spoof_check(struct igb_adapter *adapter)
3579{
3580 int j;
3581
3582 if (!adapter->wvbr)
3583 return;
3584
3585 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3586 if (adapter->wvbr & (1 << j) ||
3587 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3588 dev_warn(&adapter->pdev->dev,
3589 "Spoof event(s) detected on VF %d\n", j);
3590 adapter->wvbr &=
3591 ~((1 << j) |
3592 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3593 }
3594 }
3595}
3596
Auke Kok9d5c8242008-01-24 02:22:38 -08003597/* Need to wait a few seconds after link up to get diagnostic information from
3598 * the phy */
3599static void igb_update_phy_info(unsigned long data)
3600{
3601 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003602 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003603}
3604
3605/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003606 * igb_has_link - check shared code for link and determine up/down
3607 * @adapter: pointer to driver private info
3608 **/
Nick Nunley31455352010-02-17 01:01:21 +00003609bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003610{
3611 struct e1000_hw *hw = &adapter->hw;
3612 bool link_active = false;
3613 s32 ret_val = 0;
3614
3615 /* get_link_status is set on LSC (link status) interrupt or
3616 * rx sequence error interrupt. get_link_status will stay
3617 * false until the e1000_check_for_link establishes link
3618 * for copper adapters ONLY
3619 */
3620 switch (hw->phy.media_type) {
3621 case e1000_media_type_copper:
3622 if (hw->mac.get_link_status) {
3623 ret_val = hw->mac.ops.check_for_link(hw);
3624 link_active = !hw->mac.get_link_status;
3625 } else {
3626 link_active = true;
3627 }
3628 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003629 case e1000_media_type_internal_serdes:
3630 ret_val = hw->mac.ops.check_for_link(hw);
3631 link_active = hw->mac.serdes_has_link;
3632 break;
3633 default:
3634 case e1000_media_type_unknown:
3635 break;
3636 }
3637
3638 return link_active;
3639}
3640
Stefan Assmann563988d2011-04-05 04:27:15 +00003641static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3642{
3643 bool ret = false;
3644 u32 ctrl_ext, thstat;
3645
3646 /* check for thermal sensor event on i350, copper only */
3647 if (hw->mac.type == e1000_i350) {
3648 thstat = rd32(E1000_THSTAT);
3649 ctrl_ext = rd32(E1000_CTRL_EXT);
3650
3651 if ((hw->phy.media_type == e1000_media_type_copper) &&
3652 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3653 ret = !!(thstat & event);
3654 }
3655 }
3656
3657 return ret;
3658}
3659
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003660/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003661 * igb_watchdog - Timer Call-back
3662 * @data: pointer to adapter cast into an unsigned long
3663 **/
3664static void igb_watchdog(unsigned long data)
3665{
3666 struct igb_adapter *adapter = (struct igb_adapter *)data;
3667 /* Do the rest outside of interrupt context */
3668 schedule_work(&adapter->watchdog_task);
3669}
3670
3671static void igb_watchdog_task(struct work_struct *work)
3672{
3673 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003674 struct igb_adapter,
3675 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003676 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003677 struct net_device *netdev = adapter->netdev;
Stefan Assmann563988d2011-04-05 04:27:15 +00003678 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003679 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003680
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003681 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003682 if (link) {
3683 if (!netif_carrier_ok(netdev)) {
3684 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003685 hw->mac.ops.get_speed_and_duplex(hw,
3686 &adapter->link_speed,
3687 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003688
3689 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003690 /* Links status message must follow this format */
3691 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
Auke Kok9d5c8242008-01-24 02:22:38 -08003692 "Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003693 netdev->name,
3694 adapter->link_speed,
3695 adapter->link_duplex == FULL_DUPLEX ?
Auke Kok9d5c8242008-01-24 02:22:38 -08003696 "Full Duplex" : "Half Duplex",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003697 ((ctrl & E1000_CTRL_TFCE) &&
3698 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
3699 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3700 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
Auke Kok9d5c8242008-01-24 02:22:38 -08003701
Stefan Assmann563988d2011-04-05 04:27:15 +00003702 /* check for thermal sensor event */
3703 if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) {
3704 printk(KERN_INFO "igb: %s The network adapter "
3705 "link speed was downshifted "
3706 "because it overheated.\n",
3707 netdev->name);
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003708 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003709
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003710 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003711 adapter->tx_timeout_factor = 1;
3712 switch (adapter->link_speed) {
3713 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003714 adapter->tx_timeout_factor = 14;
3715 break;
3716 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003717 /* maybe add some timeout factor ? */
3718 break;
3719 }
3720
3721 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003722
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003723 igb_ping_all_vfs(adapter);
Lior Levy17dc5662011-02-08 02:28:46 +00003724 igb_check_vf_rate_limit(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003725
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003726 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003727 if (!test_bit(__IGB_DOWN, &adapter->state))
3728 mod_timer(&adapter->phy_info_timer,
3729 round_jiffies(jiffies + 2 * HZ));
3730 }
3731 } else {
3732 if (netif_carrier_ok(netdev)) {
3733 adapter->link_speed = 0;
3734 adapter->link_duplex = 0;
Stefan Assmann563988d2011-04-05 04:27:15 +00003735
3736 /* check for thermal sensor event */
3737 if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) {
3738 printk(KERN_ERR "igb: %s The network adapter "
3739 "was stopped because it "
3740 "overheated.\n",
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003741 netdev->name);
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003742 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003743
Alexander Duyck527d47c2008-11-27 00:21:39 -08003744 /* Links status message must follow this format */
3745 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3746 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003747 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003748
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003749 igb_ping_all_vfs(adapter);
3750
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003751 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003752 if (!test_bit(__IGB_DOWN, &adapter->state))
3753 mod_timer(&adapter->phy_info_timer,
3754 round_jiffies(jiffies + 2 * HZ));
3755 }
3756 }
3757
Eric Dumazet12dcd862010-10-15 17:27:10 +00003758 spin_lock(&adapter->stats64_lock);
3759 igb_update_stats(adapter, &adapter->stats64);
3760 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08003761
Alexander Duyckdbabb062009-11-12 18:38:16 +00003762 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003763 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003764 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003765 /* We've lost link, so the controller stops DMA,
3766 * but we've got queued Tx work that's never going
3767 * to get done, so reset controller to flush Tx.
3768 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003769 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3770 adapter->tx_timeout_count++;
3771 schedule_work(&adapter->reset_task);
3772 /* return immediately since reset is imminent */
3773 return;
3774 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003775 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003776
Alexander Duyckdbabb062009-11-12 18:38:16 +00003777 /* Force detection of hung controller every watchdog period */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00003778 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyckdbabb062009-11-12 18:38:16 +00003779 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003780
Auke Kok9d5c8242008-01-24 02:22:38 -08003781 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003782 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003783 u32 eics = 0;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00003784 for (i = 0; i < adapter->num_q_vectors; i++)
3785 eics |= adapter->q_vector[i]->eims_value;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003786 wr32(E1000_EICS, eics);
3787 } else {
3788 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3789 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003790
Greg Rose13800462010-11-06 02:08:26 +00003791 igb_spoof_check(adapter);
3792
Auke Kok9d5c8242008-01-24 02:22:38 -08003793 /* Reset the timer */
3794 if (!test_bit(__IGB_DOWN, &adapter->state))
3795 mod_timer(&adapter->watchdog_timer,
3796 round_jiffies(jiffies + 2 * HZ));
3797}
3798
3799enum latency_range {
3800 lowest_latency = 0,
3801 low_latency = 1,
3802 bulk_latency = 2,
3803 latency_invalid = 255
3804};
3805
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003806/**
3807 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3808 *
3809 * Stores a new ITR value based on strictly on packet size. This
3810 * algorithm is less sophisticated than that used in igb_update_itr,
3811 * due to the difficulty of synchronizing statistics across multiple
Stefan Weileef35c22010-08-06 21:11:15 +02003812 * receive rings. The divisors and thresholds used by this function
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003813 * were determined based on theoretical maximum wire speed and testing
3814 * data, in order to minimize response time while increasing bulk
3815 * throughput.
3816 * This functionality is controlled by the InterruptThrottleRate module
3817 * parameter (see igb_param.c)
3818 * NOTE: This function is called only when operating in a multiqueue
3819 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003820 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003821 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003822static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003823{
Alexander Duyck047e0032009-10-27 15:49:27 +00003824 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003825 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003826 struct igb_adapter *adapter = q_vector->adapter;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003827 unsigned int packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003828
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003829 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3830 * ints/sec - ITR timer value of 120 ticks.
3831 */
3832 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003833 new_val = IGB_4K_ITR;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003834 goto set_itr_val;
3835 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003836
Alexander Duyck0ba82992011-08-26 07:45:47 +00003837 packets = q_vector->rx.total_packets;
3838 if (packets)
3839 avg_wire_size = q_vector->rx.total_bytes / packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003840
Alexander Duyck0ba82992011-08-26 07:45:47 +00003841 packets = q_vector->tx.total_packets;
3842 if (packets)
3843 avg_wire_size = max_t(u32, avg_wire_size,
3844 q_vector->tx.total_bytes / packets);
Alexander Duyck047e0032009-10-27 15:49:27 +00003845
3846 /* if avg_wire_size isn't set no work was done */
3847 if (!avg_wire_size)
3848 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003849
3850 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3851 avg_wire_size += 24;
3852
3853 /* Don't starve jumbo frames */
3854 avg_wire_size = min(avg_wire_size, 3000);
3855
3856 /* Give a little boost to mid-size frames */
3857 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3858 new_val = avg_wire_size / 3;
3859 else
3860 new_val = avg_wire_size / 2;
3861
Alexander Duyck0ba82992011-08-26 07:45:47 +00003862 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3863 if (new_val < IGB_20K_ITR &&
3864 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3865 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3866 new_val = IGB_20K_ITR;
Nick Nunleyabe1c362010-02-17 01:03:19 +00003867
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003868set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003869 if (new_val != q_vector->itr_val) {
3870 q_vector->itr_val = new_val;
3871 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003872 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003873clear_counts:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003874 q_vector->rx.total_bytes = 0;
3875 q_vector->rx.total_packets = 0;
3876 q_vector->tx.total_bytes = 0;
3877 q_vector->tx.total_packets = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003878}
3879
3880/**
3881 * igb_update_itr - update the dynamic ITR value based on statistics
3882 * Stores a new ITR value based on packets and byte
3883 * counts during the last interrupt. The advantage of per interrupt
3884 * computation is faster updates and more accurate ITR for the current
3885 * traffic pattern. Constants in this function were computed
3886 * based on theoretical maximum wire speed and thresholds were set based
3887 * on testing data as well as attempting to minimize response time
3888 * while increasing bulk throughput.
3889 * this functionality is controlled by the InterruptThrottleRate module
3890 * parameter (see igb_param.c)
3891 * NOTE: These calculations are only valid when operating in a single-
3892 * queue environment.
Alexander Duyck0ba82992011-08-26 07:45:47 +00003893 * @q_vector: pointer to q_vector
3894 * @ring_container: ring info to update the itr for
Auke Kok9d5c8242008-01-24 02:22:38 -08003895 **/
Alexander Duyck0ba82992011-08-26 07:45:47 +00003896static void igb_update_itr(struct igb_q_vector *q_vector,
3897 struct igb_ring_container *ring_container)
Auke Kok9d5c8242008-01-24 02:22:38 -08003898{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003899 unsigned int packets = ring_container->total_packets;
3900 unsigned int bytes = ring_container->total_bytes;
3901 u8 itrval = ring_container->itr;
Auke Kok9d5c8242008-01-24 02:22:38 -08003902
Alexander Duyck0ba82992011-08-26 07:45:47 +00003903 /* no packets, exit with status unchanged */
Auke Kok9d5c8242008-01-24 02:22:38 -08003904 if (packets == 0)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003905 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08003906
Alexander Duyck0ba82992011-08-26 07:45:47 +00003907 switch (itrval) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003908 case lowest_latency:
3909 /* handle TSO and jumbo frames */
3910 if (bytes/packets > 8000)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003911 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003912 else if ((packets < 5) && (bytes > 512))
Alexander Duyck0ba82992011-08-26 07:45:47 +00003913 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003914 break;
3915 case low_latency: /* 50 usec aka 20000 ints/s */
3916 if (bytes > 10000) {
3917 /* this if handles the TSO accounting */
3918 if (bytes/packets > 8000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003919 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003920 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003921 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003922 } else if ((packets > 35)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003923 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003924 }
3925 } else if (bytes/packets > 2000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003926 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003927 } else if (packets <= 2 && bytes < 512) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003928 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003929 }
3930 break;
3931 case bulk_latency: /* 250 usec aka 4000 ints/s */
3932 if (bytes > 25000) {
3933 if (packets > 35)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003934 itrval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003935 } else if (bytes < 1500) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003936 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003937 }
3938 break;
3939 }
3940
Alexander Duyck0ba82992011-08-26 07:45:47 +00003941 /* clear work counters since we have the values we need */
3942 ring_container->total_bytes = 0;
3943 ring_container->total_packets = 0;
3944
3945 /* write updated itr to ring container */
3946 ring_container->itr = itrval;
Auke Kok9d5c8242008-01-24 02:22:38 -08003947}
3948
Alexander Duyck0ba82992011-08-26 07:45:47 +00003949static void igb_set_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003950{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003951 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00003952 u32 new_itr = q_vector->itr_val;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003953 u8 current_itr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003954
3955 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3956 if (adapter->link_speed != SPEED_1000) {
3957 current_itr = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003958 new_itr = IGB_4K_ITR;
Auke Kok9d5c8242008-01-24 02:22:38 -08003959 goto set_itr_now;
3960 }
3961
Alexander Duyck0ba82992011-08-26 07:45:47 +00003962 igb_update_itr(q_vector, &q_vector->tx);
3963 igb_update_itr(q_vector, &q_vector->rx);
Auke Kok9d5c8242008-01-24 02:22:38 -08003964
Alexander Duyck0ba82992011-08-26 07:45:47 +00003965 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003966
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003967 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck0ba82992011-08-26 07:45:47 +00003968 if (current_itr == lowest_latency &&
3969 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3970 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003971 current_itr = low_latency;
3972
Auke Kok9d5c8242008-01-24 02:22:38 -08003973 switch (current_itr) {
3974 /* counts and packets in update_itr are dependent on these numbers */
3975 case lowest_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003976 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003977 break;
3978 case low_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003979 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003980 break;
3981 case bulk_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003982 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003983 break;
3984 default:
3985 break;
3986 }
3987
3988set_itr_now:
Alexander Duyck047e0032009-10-27 15:49:27 +00003989 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003990 /* this attempts to bias the interrupt rate towards Bulk
3991 * by adding intermediate steps when interrupt rate is
3992 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003993 new_itr = new_itr > q_vector->itr_val ?
3994 max((new_itr * q_vector->itr_val) /
3995 (new_itr + (q_vector->itr_val >> 2)),
Alexander Duyck0ba82992011-08-26 07:45:47 +00003996 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003997 new_itr;
3998 /* Don't write the value here; it resets the adapter's
3999 * internal timer, and causes us to delay far longer than
4000 * we should between interrupts. Instead, we write the ITR
4001 * value at the beginning of the next interrupt so the timing
4002 * ends up being correct.
4003 */
Alexander Duyck047e0032009-10-27 15:49:27 +00004004 q_vector->itr_val = new_itr;
4005 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004006 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004007}
4008
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004009void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
4010 u32 type_tucmd, u32 mss_l4len_idx)
4011{
4012 struct e1000_adv_tx_context_desc *context_desc;
4013 u16 i = tx_ring->next_to_use;
4014
4015 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
4016
4017 i++;
4018 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
4019
4020 /* set bits to identify this as an advanced context descriptor */
4021 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
4022
4023 /* For 82575, context index must be unique per ring. */
Alexander Duyck866cff02011-08-26 07:45:36 +00004024 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004025 mss_l4len_idx |= tx_ring->reg_idx << 4;
4026
4027 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4028 context_desc->seqnum_seed = 0;
4029 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
4030 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4031}
4032
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004033static int igb_tso(struct igb_ring *tx_ring,
4034 struct igb_tx_buffer *first,
4035 u8 *hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004036{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004037 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004038 u32 vlan_macip_lens, type_tucmd;
4039 u32 mss_l4len_idx, l4len;
4040
4041 if (!skb_is_gso(skb))
4042 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004043
4044 if (skb_header_cloned(skb)) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004045 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004046 if (err)
4047 return err;
4048 }
4049
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004050 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4051 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
Auke Kok9d5c8242008-01-24 02:22:38 -08004052
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004053 if (first->protocol == __constant_htons(ETH_P_IP)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004054 struct iphdr *iph = ip_hdr(skb);
4055 iph->tot_len = 0;
4056 iph->check = 0;
4057 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4058 iph->daddr, 0,
4059 IPPROTO_TCP,
4060 0);
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004061 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004062 first->tx_flags |= IGB_TX_FLAGS_TSO |
4063 IGB_TX_FLAGS_CSUM |
4064 IGB_TX_FLAGS_IPV4;
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08004065 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004066 ipv6_hdr(skb)->payload_len = 0;
4067 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4068 &ipv6_hdr(skb)->daddr,
4069 0, IPPROTO_TCP, 0);
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004070 first->tx_flags |= IGB_TX_FLAGS_TSO |
4071 IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004072 }
4073
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004074 /* compute header lengths */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004075 l4len = tcp_hdrlen(skb);
4076 *hdr_len = skb_transport_offset(skb) + l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08004077
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004078 /* update gso size and bytecount with header size */
4079 first->gso_segs = skb_shinfo(skb)->gso_segs;
4080 first->bytecount += (first->gso_segs - 1) * *hdr_len;
4081
Auke Kok9d5c8242008-01-24 02:22:38 -08004082 /* MSS L4LEN IDX */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004083 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4084 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
Auke Kok9d5c8242008-01-24 02:22:38 -08004085
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004086 /* VLAN MACLEN IPLEN */
4087 vlan_macip_lens = skb_network_header_len(skb);
4088 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004089 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004090
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004091 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004092
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004093 return 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004094}
4095
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004096static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
Auke Kok9d5c8242008-01-24 02:22:38 -08004097{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004098 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004099 u32 vlan_macip_lens = 0;
4100 u32 mss_l4len_idx = 0;
4101 u32 type_tucmd = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004102
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004103 if (skb->ip_summed != CHECKSUM_PARTIAL) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004104 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4105 return;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004106 } else {
4107 u8 l4_hdr = 0;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004108 switch (first->protocol) {
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004109 case __constant_htons(ETH_P_IP):
4110 vlan_macip_lens |= skb_network_header_len(skb);
4111 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4112 l4_hdr = ip_hdr(skb)->protocol;
4113 break;
4114 case __constant_htons(ETH_P_IPV6):
4115 vlan_macip_lens |= skb_network_header_len(skb);
4116 l4_hdr = ipv6_hdr(skb)->nexthdr;
4117 break;
4118 default:
4119 if (unlikely(net_ratelimit())) {
4120 dev_warn(tx_ring->dev,
4121 "partial checksum but proto=%x!\n",
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004122 first->protocol);
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07004123 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004124 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08004125 }
4126
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004127 switch (l4_hdr) {
4128 case IPPROTO_TCP:
4129 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4130 mss_l4len_idx = tcp_hdrlen(skb) <<
4131 E1000_ADVTXD_L4LEN_SHIFT;
4132 break;
4133 case IPPROTO_SCTP:
4134 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4135 mss_l4len_idx = sizeof(struct sctphdr) <<
4136 E1000_ADVTXD_L4LEN_SHIFT;
4137 break;
4138 case IPPROTO_UDP:
4139 mss_l4len_idx = sizeof(struct udphdr) <<
4140 E1000_ADVTXD_L4LEN_SHIFT;
4141 break;
4142 default:
4143 if (unlikely(net_ratelimit())) {
4144 dev_warn(tx_ring->dev,
4145 "partial checksum but l4 proto=%x!\n",
4146 l4_hdr);
4147 }
4148 break;
4149 }
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004150
4151 /* update TX checksum flag */
4152 first->tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004153 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004154
4155 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004156 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004157
4158 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004159}
4160
Alexander Duycke032afc2011-08-26 07:44:48 +00004161static __le32 igb_tx_cmd_type(u32 tx_flags)
4162{
4163 /* set type for advanced descriptor with frame checksum insertion */
4164 __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
4165 E1000_ADVTXD_DCMD_IFCS |
4166 E1000_ADVTXD_DCMD_DEXT);
4167
4168 /* set HW vlan bit if vlan is present */
4169 if (tx_flags & IGB_TX_FLAGS_VLAN)
4170 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
4171
4172 /* set timestamp bit if present */
4173 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
4174 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
4175
4176 /* set segmentation bits for TSO */
4177 if (tx_flags & IGB_TX_FLAGS_TSO)
4178 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
4179
4180 return cmd_type;
4181}
4182
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004183static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4184 union e1000_adv_tx_desc *tx_desc,
4185 u32 tx_flags, unsigned int paylen)
Alexander Duycke032afc2011-08-26 07:44:48 +00004186{
4187 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4188
4189 /* 82575 requires a unique index per ring if any offload is enabled */
4190 if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
Alexander Duyck866cff02011-08-26 07:45:36 +00004191 test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duycke032afc2011-08-26 07:44:48 +00004192 olinfo_status |= tx_ring->reg_idx << 4;
4193
4194 /* insert L4 checksum */
4195 if (tx_flags & IGB_TX_FLAGS_CSUM) {
4196 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4197
4198 /* insert IPv4 checksum */
4199 if (tx_flags & IGB_TX_FLAGS_IPV4)
4200 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4201 }
4202
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004203 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duycke032afc2011-08-26 07:44:48 +00004204}
4205
Alexander Duyckebe42d12011-08-26 07:45:09 +00004206/*
4207 * The largest size we can write to the descriptor is 65535. In order to
4208 * maintain a power of two alignment we have to limit ourselves to 32K.
4209 */
4210#define IGB_MAX_TXD_PWR 15
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004211#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
Auke Kok9d5c8242008-01-24 02:22:38 -08004212
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004213static void igb_tx_map(struct igb_ring *tx_ring,
4214 struct igb_tx_buffer *first,
Alexander Duyckebe42d12011-08-26 07:45:09 +00004215 const u8 hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004216{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004217 struct sk_buff *skb = first->skb;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004218 struct igb_tx_buffer *tx_buffer_info;
4219 union e1000_adv_tx_desc *tx_desc;
4220 dma_addr_t dma;
4221 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
4222 unsigned int data_len = skb->data_len;
4223 unsigned int size = skb_headlen(skb);
4224 unsigned int paylen = skb->len - hdr_len;
4225 __le32 cmd_type;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004226 u32 tx_flags = first->tx_flags;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004227 u16 i = tx_ring->next_to_use;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004228
4229 tx_desc = IGB_TX_DESC(tx_ring, i);
4230
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004231 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004232 cmd_type = igb_tx_cmd_type(tx_flags);
4233
4234 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
4235 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004236 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08004237
Alexander Duyckebe42d12011-08-26 07:45:09 +00004238 /* record length, and DMA address */
4239 first->length = size;
4240 first->dma = dma;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004241 tx_desc->read.buffer_addr = cpu_to_le64(dma);
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00004242
Alexander Duyckebe42d12011-08-26 07:45:09 +00004243 for (;;) {
4244 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4245 tx_desc->read.cmd_type_len =
4246 cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
Auke Kok9d5c8242008-01-24 02:22:38 -08004247
Alexander Duyckebe42d12011-08-26 07:45:09 +00004248 i++;
4249 tx_desc++;
4250 if (i == tx_ring->count) {
4251 tx_desc = IGB_TX_DESC(tx_ring, 0);
4252 i = 0;
4253 }
4254
4255 dma += IGB_MAX_DATA_PER_TXD;
4256 size -= IGB_MAX_DATA_PER_TXD;
4257
4258 tx_desc->read.olinfo_status = 0;
4259 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4260 }
4261
4262 if (likely(!data_len))
4263 break;
4264
4265 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
4266
Alexander Duyck65689fe2009-03-20 00:17:43 +00004267 i++;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004268 tx_desc++;
4269 if (i == tx_ring->count) {
4270 tx_desc = IGB_TX_DESC(tx_ring, 0);
Alexander Duyck65689fe2009-03-20 00:17:43 +00004271 i = 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004272 }
Alexander Duyck65689fe2009-03-20 00:17:43 +00004273
Eric Dumazet9e903e02011-10-18 21:00:24 +00004274 size = skb_frag_size(frag);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004275 data_len -= size;
4276
4277 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
4278 size, DMA_TO_DEVICE);
4279 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004280 goto dma_error;
4281
Alexander Duyckebe42d12011-08-26 07:45:09 +00004282 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4283 tx_buffer_info->length = size;
4284 tx_buffer_info->dma = dma;
4285
4286 tx_desc->read.olinfo_status = 0;
4287 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4288
4289 frag++;
Auke Kok9d5c8242008-01-24 02:22:38 -08004290 }
4291
Alexander Duyckebe42d12011-08-26 07:45:09 +00004292 /* write last descriptor with RS and EOP bits */
4293 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
4294 tx_desc->read.cmd_type_len = cmd_type;
Alexander Duyck8542db02011-08-26 07:44:43 +00004295
4296 /* set the timestamp */
4297 first->time_stamp = jiffies;
4298
Alexander Duyckebe42d12011-08-26 07:45:09 +00004299 /*
4300 * Force memory writes to complete before letting h/w know there
4301 * are new descriptors to fetch. (Only applicable for weak-ordered
4302 * memory model archs, such as IA-64).
4303 *
4304 * We also need this memory barrier to make certain all of the
4305 * status bits have been updated before next_to_watch is written.
4306 */
Auke Kok9d5c8242008-01-24 02:22:38 -08004307 wmb();
4308
Alexander Duyckebe42d12011-08-26 07:45:09 +00004309 /* set next_to_watch value indicating a packet is present */
4310 first->next_to_watch = tx_desc;
4311
4312 i++;
4313 if (i == tx_ring->count)
4314 i = 0;
4315
Auke Kok9d5c8242008-01-24 02:22:38 -08004316 tx_ring->next_to_use = i;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004317
Alexander Duyckfce99e32009-10-27 15:51:27 +00004318 writel(i, tx_ring->tail);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004319
Auke Kok9d5c8242008-01-24 02:22:38 -08004320 /* we need this if more than one processor can write to our tail
4321 * at a time, it syncronizes IO on IA64/Altix systems */
4322 mmiowb();
Alexander Duyckebe42d12011-08-26 07:45:09 +00004323
4324 return;
4325
4326dma_error:
4327 dev_err(tx_ring->dev, "TX DMA map failed\n");
4328
4329 /* clear dma mappings for failed tx_buffer_info map */
4330 for (;;) {
4331 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4332 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4333 if (tx_buffer_info == first)
4334 break;
4335 if (i == 0)
4336 i = tx_ring->count;
4337 i--;
4338 }
4339
4340 tx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004341}
4342
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004343static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004344{
Alexander Duycke694e962009-10-27 15:53:06 +00004345 struct net_device *netdev = tx_ring->netdev;
4346
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004347 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004348
Auke Kok9d5c8242008-01-24 02:22:38 -08004349 /* Herbert's original patch had:
4350 * smp_mb__after_netif_stop_queue();
4351 * but since that doesn't exist yet, just open code it. */
4352 smp_mb();
4353
4354 /* We need to check again in a case another CPU has just
4355 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00004356 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004357 return -EBUSY;
4358
4359 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004360 netif_wake_subqueue(netdev, tx_ring->queue_index);
Eric Dumazet12dcd862010-10-15 17:27:10 +00004361
4362 u64_stats_update_begin(&tx_ring->tx_syncp2);
4363 tx_ring->tx_stats.restart_queue2++;
4364 u64_stats_update_end(&tx_ring->tx_syncp2);
4365
Auke Kok9d5c8242008-01-24 02:22:38 -08004366 return 0;
4367}
4368
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004369static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004370{
Alexander Duyckc493ea42009-03-20 00:16:50 +00004371 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004372 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00004373 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08004374}
4375
Alexander Duyckcd392f52011-08-26 07:43:59 +00004376netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4377 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004378{
Alexander Duyck8542db02011-08-26 07:44:43 +00004379 struct igb_tx_buffer *first;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004380 int tso;
Nick Nunley91d4ee32010-02-17 01:04:56 +00004381 u32 tx_flags = 0;
Alexander Duyck31f6adb2011-08-26 07:44:53 +00004382 __be16 protocol = vlan_get_protocol(skb);
Nick Nunley91d4ee32010-02-17 01:04:56 +00004383 u8 hdr_len = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004384
Auke Kok9d5c8242008-01-24 02:22:38 -08004385 /* need: 1 descriptor per page,
4386 * + 2 desc gap to keep tail from touching head,
4387 * + 1 desc for skb->data,
4388 * + 1 desc for context descriptor,
4389 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00004390 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004391 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08004392 return NETDEV_TX_BUSY;
4393 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004394
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004395 /* record the location of the first descriptor for this packet */
4396 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4397 first->skb = skb;
4398 first->bytecount = skb->len;
4399 first->gso_segs = 1;
4400
Oliver Hartkopp2244d072010-08-17 08:59:14 +00004401 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4402 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004403 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004404 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004405
Jesse Grosseab6d182010-10-20 13:56:03 +00004406 if (vlan_tx_tag_present(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004407 tx_flags |= IGB_TX_FLAGS_VLAN;
4408 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4409 }
4410
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004411 /* record initial flags and protocol */
4412 first->tx_flags = tx_flags;
4413 first->protocol = protocol;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004414
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004415 tso = igb_tso(tx_ring, first, &hdr_len);
4416 if (tso < 0)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004417 goto out_drop;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004418 else if (!tso)
4419 igb_tx_csum(tx_ring, first);
Auke Kok9d5c8242008-01-24 02:22:38 -08004420
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004421 igb_tx_map(tx_ring, first, hdr_len);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004422
4423 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00004424 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004425
Auke Kok9d5c8242008-01-24 02:22:38 -08004426 return NETDEV_TX_OK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004427
4428out_drop:
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004429 igb_unmap_and_free_tx_resource(tx_ring, first);
4430
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004431 return NETDEV_TX_OK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004432}
4433
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004434static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4435 struct sk_buff *skb)
4436{
4437 unsigned int r_idx = skb->queue_mapping;
4438
4439 if (r_idx >= adapter->num_tx_queues)
4440 r_idx = r_idx % adapter->num_tx_queues;
4441
4442 return adapter->tx_ring[r_idx];
4443}
4444
Alexander Duyckcd392f52011-08-26 07:43:59 +00004445static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4446 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08004447{
4448 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckb1a436c2009-10-27 15:54:43 +00004449
4450 if (test_bit(__IGB_DOWN, &adapter->state)) {
4451 dev_kfree_skb_any(skb);
4452 return NETDEV_TX_OK;
4453 }
4454
4455 if (skb->len <= 0) {
4456 dev_kfree_skb_any(skb);
4457 return NETDEV_TX_OK;
4458 }
4459
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004460 /*
4461 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4462 * in order to meet this minimum size requirement.
4463 */
4464 if (skb->len < 17) {
4465 if (skb_padto(skb, 17))
4466 return NETDEV_TX_OK;
4467 skb->len = 17;
4468 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004469
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004470 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
Auke Kok9d5c8242008-01-24 02:22:38 -08004471}
4472
4473/**
4474 * igb_tx_timeout - Respond to a Tx Hang
4475 * @netdev: network interface device structure
4476 **/
4477static void igb_tx_timeout(struct net_device *netdev)
4478{
4479 struct igb_adapter *adapter = netdev_priv(netdev);
4480 struct e1000_hw *hw = &adapter->hw;
4481
4482 /* Do the reset outside of interrupt context */
4483 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00004484
Alexander Duyck06218a82011-08-26 07:46:55 +00004485 if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00004486 hw->dev_spec._82575.global_device_reset = true;
4487
Auke Kok9d5c8242008-01-24 02:22:38 -08004488 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00004489 wr32(E1000_EICS,
4490 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08004491}
4492
4493static void igb_reset_task(struct work_struct *work)
4494{
4495 struct igb_adapter *adapter;
4496 adapter = container_of(work, struct igb_adapter, reset_task);
4497
Taku Izumic97ec422010-04-27 14:39:30 +00004498 igb_dump(adapter);
4499 netdev_err(adapter->netdev, "Reset adapter\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004500 igb_reinit_locked(adapter);
4501}
4502
4503/**
Eric Dumazet12dcd862010-10-15 17:27:10 +00004504 * igb_get_stats64 - Get System Network Statistics
Auke Kok9d5c8242008-01-24 02:22:38 -08004505 * @netdev: network interface device structure
Eric Dumazet12dcd862010-10-15 17:27:10 +00004506 * @stats: rtnl_link_stats64 pointer
Auke Kok9d5c8242008-01-24 02:22:38 -08004507 *
Auke Kok9d5c8242008-01-24 02:22:38 -08004508 **/
Eric Dumazet12dcd862010-10-15 17:27:10 +00004509static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4510 struct rtnl_link_stats64 *stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004511{
Eric Dumazet12dcd862010-10-15 17:27:10 +00004512 struct igb_adapter *adapter = netdev_priv(netdev);
4513
4514 spin_lock(&adapter->stats64_lock);
4515 igb_update_stats(adapter, &adapter->stats64);
4516 memcpy(stats, &adapter->stats64, sizeof(*stats));
4517 spin_unlock(&adapter->stats64_lock);
4518
4519 return stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08004520}
4521
4522/**
4523 * igb_change_mtu - Change the Maximum Transfer Unit
4524 * @netdev: network interface device structure
4525 * @new_mtu: new value for maximum frame size
4526 *
4527 * Returns 0 on success, negative on failure
4528 **/
4529static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4530{
4531 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004532 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck153285f2011-08-26 07:43:32 +00004533 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08004534
Alexander Duyckc809d222009-10-27 23:52:13 +00004535 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004536 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004537 return -EINVAL;
4538 }
4539
Alexander Duyck153285f2011-08-26 07:43:32 +00004540#define MAX_STD_JUMBO_FRAME_SIZE 9238
Auke Kok9d5c8242008-01-24 02:22:38 -08004541 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004542 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004543 return -EINVAL;
4544 }
4545
4546 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4547 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004548
Auke Kok9d5c8242008-01-24 02:22:38 -08004549 /* igb_down has a dependency on max_frame_size */
4550 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00004551
Alexander Duyck4c844852009-10-27 15:52:07 +00004552 if (netif_running(netdev))
4553 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08004554
Alexander Duyck090b1792009-10-27 23:51:55 +00004555 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08004556 netdev->mtu, new_mtu);
4557 netdev->mtu = new_mtu;
4558
4559 if (netif_running(netdev))
4560 igb_up(adapter);
4561 else
4562 igb_reset(adapter);
4563
4564 clear_bit(__IGB_RESETTING, &adapter->state);
4565
4566 return 0;
4567}
4568
4569/**
4570 * igb_update_stats - Update the board statistics counters
4571 * @adapter: board private structure
4572 **/
4573
Eric Dumazet12dcd862010-10-15 17:27:10 +00004574void igb_update_stats(struct igb_adapter *adapter,
4575 struct rtnl_link_stats64 *net_stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004576{
4577 struct e1000_hw *hw = &adapter->hw;
4578 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004579 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004580 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004581 int i;
4582 u64 bytes, packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004583 unsigned int start;
4584 u64 _bytes, _packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004585
4586#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4587
4588 /*
4589 * Prevent stats update while adapter is being reset, or if the pci
4590 * connection is down.
4591 */
4592 if (adapter->link_speed == 0)
4593 return;
4594 if (pci_channel_offline(pdev))
4595 return;
4596
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004597 bytes = 0;
4598 packets = 0;
4599 for (i = 0; i < adapter->num_rx_queues; i++) {
4600 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
Alexander Duyck3025a442010-02-17 01:02:39 +00004601 struct igb_ring *ring = adapter->rx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004602
Alexander Duyck3025a442010-02-17 01:02:39 +00004603 ring->rx_stats.drops += rqdpc_tmp;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004604 net_stats->rx_fifo_errors += rqdpc_tmp;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004605
4606 do {
4607 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4608 _bytes = ring->rx_stats.bytes;
4609 _packets = ring->rx_stats.packets;
4610 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4611 bytes += _bytes;
4612 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004613 }
4614
Alexander Duyck128e45e2009-11-12 18:37:38 +00004615 net_stats->rx_bytes = bytes;
4616 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004617
4618 bytes = 0;
4619 packets = 0;
4620 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004621 struct igb_ring *ring = adapter->tx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004622 do {
4623 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4624 _bytes = ring->tx_stats.bytes;
4625 _packets = ring->tx_stats.packets;
4626 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4627 bytes += _bytes;
4628 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004629 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00004630 net_stats->tx_bytes = bytes;
4631 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004632
4633 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004634 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4635 adapter->stats.gprc += rd32(E1000_GPRC);
4636 adapter->stats.gorc += rd32(E1000_GORCL);
4637 rd32(E1000_GORCH); /* clear GORCL */
4638 adapter->stats.bprc += rd32(E1000_BPRC);
4639 adapter->stats.mprc += rd32(E1000_MPRC);
4640 adapter->stats.roc += rd32(E1000_ROC);
4641
4642 adapter->stats.prc64 += rd32(E1000_PRC64);
4643 adapter->stats.prc127 += rd32(E1000_PRC127);
4644 adapter->stats.prc255 += rd32(E1000_PRC255);
4645 adapter->stats.prc511 += rd32(E1000_PRC511);
4646 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4647 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4648 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4649 adapter->stats.sec += rd32(E1000_SEC);
4650
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004651 mpc = rd32(E1000_MPC);
4652 adapter->stats.mpc += mpc;
4653 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004654 adapter->stats.scc += rd32(E1000_SCC);
4655 adapter->stats.ecol += rd32(E1000_ECOL);
4656 adapter->stats.mcc += rd32(E1000_MCC);
4657 adapter->stats.latecol += rd32(E1000_LATECOL);
4658 adapter->stats.dc += rd32(E1000_DC);
4659 adapter->stats.rlec += rd32(E1000_RLEC);
4660 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4661 adapter->stats.xontxc += rd32(E1000_XONTXC);
4662 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4663 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4664 adapter->stats.fcruc += rd32(E1000_FCRUC);
4665 adapter->stats.gptc += rd32(E1000_GPTC);
4666 adapter->stats.gotc += rd32(E1000_GOTCL);
4667 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004668 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004669 adapter->stats.ruc += rd32(E1000_RUC);
4670 adapter->stats.rfc += rd32(E1000_RFC);
4671 adapter->stats.rjc += rd32(E1000_RJC);
4672 adapter->stats.tor += rd32(E1000_TORH);
4673 adapter->stats.tot += rd32(E1000_TOTH);
4674 adapter->stats.tpr += rd32(E1000_TPR);
4675
4676 adapter->stats.ptc64 += rd32(E1000_PTC64);
4677 adapter->stats.ptc127 += rd32(E1000_PTC127);
4678 adapter->stats.ptc255 += rd32(E1000_PTC255);
4679 adapter->stats.ptc511 += rd32(E1000_PTC511);
4680 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4681 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4682
4683 adapter->stats.mptc += rd32(E1000_MPTC);
4684 adapter->stats.bptc += rd32(E1000_BPTC);
4685
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004686 adapter->stats.tpt += rd32(E1000_TPT);
4687 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004688
4689 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004690 /* read internal phy specific stats */
4691 reg = rd32(E1000_CTRL_EXT);
4692 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4693 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4694 adapter->stats.tncrs += rd32(E1000_TNCRS);
4695 }
4696
Auke Kok9d5c8242008-01-24 02:22:38 -08004697 adapter->stats.tsctc += rd32(E1000_TSCTC);
4698 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4699
4700 adapter->stats.iac += rd32(E1000_IAC);
4701 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4702 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4703 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4704 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4705 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4706 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4707 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4708 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4709
4710 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004711 net_stats->multicast = adapter->stats.mprc;
4712 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004713
4714 /* Rx Errors */
4715
4716 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004717 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004718 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004719 adapter->stats.crcerrs + adapter->stats.algnerrc +
4720 adapter->stats.ruc + adapter->stats.roc +
4721 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004722 net_stats->rx_length_errors = adapter->stats.ruc +
4723 adapter->stats.roc;
4724 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4725 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4726 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004727
4728 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004729 net_stats->tx_errors = adapter->stats.ecol +
4730 adapter->stats.latecol;
4731 net_stats->tx_aborted_errors = adapter->stats.ecol;
4732 net_stats->tx_window_errors = adapter->stats.latecol;
4733 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004734
4735 /* Tx Dropped needs to be maintained elsewhere */
4736
4737 /* Phy Stats */
4738 if (hw->phy.media_type == e1000_media_type_copper) {
4739 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004740 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004741 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4742 adapter->phy_stats.idle_errors += phy_tmp;
4743 }
4744 }
4745
4746 /* Management Stats */
4747 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4748 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4749 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
Carolyn Wyborny0a915b92011-02-26 07:42:37 +00004750
4751 /* OS2BMC Stats */
4752 reg = rd32(E1000_MANC);
4753 if (reg & E1000_MANC_EN_BMC2OS) {
4754 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4755 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4756 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4757 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4758 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004759}
4760
Auke Kok9d5c8242008-01-24 02:22:38 -08004761static irqreturn_t igb_msix_other(int irq, void *data)
4762{
Alexander Duyck047e0032009-10-27 15:49:27 +00004763 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004764 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004765 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004766 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004767
Alexander Duyck7f081d42010-01-07 17:41:00 +00004768 if (icr & E1000_ICR_DRSTA)
4769 schedule_work(&adapter->reset_task);
4770
Alexander Duyck047e0032009-10-27 15:49:27 +00004771 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004772 /* HW is reporting DMA is out of sync */
4773 adapter->stats.doosync++;
Greg Rose13800462010-11-06 02:08:26 +00004774 /* The DMA Out of Sync is also indication of a spoof event
4775 * in IOV mode. Check the Wrong VM Behavior register to
4776 * see if it is really a spoof event. */
4777 igb_check_wvbr(adapter);
Alexander Duyckdda0e082009-02-06 23:19:08 +00004778 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004779
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004780 /* Check for a mailbox event */
4781 if (icr & E1000_ICR_VMMB)
4782 igb_msg_task(adapter);
4783
4784 if (icr & E1000_ICR_LSC) {
4785 hw->mac.get_link_status = 1;
4786 /* guard against interrupt when we're going down */
4787 if (!test_bit(__IGB_DOWN, &adapter->state))
4788 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4789 }
4790
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004791 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004792
4793 return IRQ_HANDLED;
4794}
4795
Alexander Duyck047e0032009-10-27 15:49:27 +00004796static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004797{
Alexander Duyck26b39272010-02-17 01:00:41 +00004798 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004799 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004800
Alexander Duyck047e0032009-10-27 15:49:27 +00004801 if (!q_vector->set_itr)
4802 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004803
Alexander Duyck047e0032009-10-27 15:49:27 +00004804 if (!itr_val)
4805 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004806
Alexander Duyck26b39272010-02-17 01:00:41 +00004807 if (adapter->hw.mac.type == e1000_82575)
4808 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004809 else
Alexander Duyck0ba82992011-08-26 07:45:47 +00004810 itr_val |= E1000_EITR_CNT_IGNR;
Alexander Duyck047e0032009-10-27 15:49:27 +00004811
4812 writel(itr_val, q_vector->itr_register);
4813 q_vector->set_itr = 0;
4814}
4815
4816static irqreturn_t igb_msix_ring(int irq, void *data)
4817{
4818 struct igb_q_vector *q_vector = data;
4819
4820 /* Write the ITR value calculated from the previous interrupt. */
4821 igb_write_itr(q_vector);
4822
4823 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004824
Auke Kok9d5c8242008-01-24 02:22:38 -08004825 return IRQ_HANDLED;
4826}
4827
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004828#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004829static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004830{
Alexander Duyck047e0032009-10-27 15:49:27 +00004831 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004832 struct e1000_hw *hw = &adapter->hw;
4833 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004834
Alexander Duyck047e0032009-10-27 15:49:27 +00004835 if (q_vector->cpu == cpu)
4836 goto out_no_update;
4837
Alexander Duyck0ba82992011-08-26 07:45:47 +00004838 if (q_vector->tx.ring) {
4839 int q = q_vector->tx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004840 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4841 if (hw->mac.type == e1000_82575) {
4842 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4843 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4844 } else {
4845 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4846 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4847 E1000_DCA_TXCTRL_CPUID_SHIFT;
4848 }
4849 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4850 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4851 }
Alexander Duyck0ba82992011-08-26 07:45:47 +00004852 if (q_vector->rx.ring) {
4853 int q = q_vector->rx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004854 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4855 if (hw->mac.type == e1000_82575) {
4856 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4857 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4858 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004859 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004860 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004861 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004862 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004863 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4864 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4865 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4866 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004867 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004868 q_vector->cpu = cpu;
4869out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004870 put_cpu();
4871}
4872
4873static void igb_setup_dca(struct igb_adapter *adapter)
4874{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004875 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004876 int i;
4877
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004878 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004879 return;
4880
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004881 /* Always use CB2 mode, difference is masked in the CB driver. */
4882 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4883
Alexander Duyck047e0032009-10-27 15:49:27 +00004884 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004885 adapter->q_vector[i]->cpu = -1;
4886 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004887 }
4888}
4889
4890static int __igb_notify_dca(struct device *dev, void *data)
4891{
4892 struct net_device *netdev = dev_get_drvdata(dev);
4893 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004894 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004895 struct e1000_hw *hw = &adapter->hw;
4896 unsigned long event = *(unsigned long *)data;
4897
4898 switch (event) {
4899 case DCA_PROVIDER_ADD:
4900 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004901 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004902 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004903 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004904 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004905 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004906 igb_setup_dca(adapter);
4907 break;
4908 }
4909 /* Fall Through since DCA is disabled. */
4910 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004911 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004912 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004913 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004914 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004915 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004916 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004917 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004918 }
4919 break;
4920 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004921
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004922 return 0;
4923}
4924
4925static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4926 void *p)
4927{
4928 int ret_val;
4929
4930 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4931 __igb_notify_dca);
4932
4933 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4934}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004935#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004936
Greg Rose0224d662011-10-14 02:57:14 +00004937#ifdef CONFIG_PCI_IOV
4938static int igb_vf_configure(struct igb_adapter *adapter, int vf)
4939{
4940 unsigned char mac_addr[ETH_ALEN];
4941 struct pci_dev *pdev = adapter->pdev;
4942 struct e1000_hw *hw = &adapter->hw;
4943 struct pci_dev *pvfdev;
4944 unsigned int device_id;
4945 u16 thisvf_devfn;
4946
4947 random_ether_addr(mac_addr);
4948 igb_set_vf_mac(adapter, vf, mac_addr);
4949
4950 switch (adapter->hw.mac.type) {
4951 case e1000_82576:
4952 device_id = IGB_82576_VF_DEV_ID;
4953 /* VF Stride for 82576 is 2 */
4954 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) |
4955 (pdev->devfn & 1);
4956 break;
4957 case e1000_i350:
4958 device_id = IGB_I350_VF_DEV_ID;
4959 /* VF Stride for I350 is 4 */
4960 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) |
4961 (pdev->devfn & 3);
4962 break;
4963 default:
4964 device_id = 0;
4965 thisvf_devfn = 0;
4966 break;
4967 }
4968
4969 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
4970 while (pvfdev) {
4971 if (pvfdev->devfn == thisvf_devfn)
4972 break;
4973 pvfdev = pci_get_device(hw->vendor_id,
4974 device_id, pvfdev);
4975 }
4976
4977 if (pvfdev)
4978 adapter->vf_data[vf].vfdev = pvfdev;
4979 else
4980 dev_err(&pdev->dev,
4981 "Couldn't find pci dev ptr for VF %4.4x\n",
4982 thisvf_devfn);
4983 return pvfdev != NULL;
4984}
4985
4986static int igb_find_enabled_vfs(struct igb_adapter *adapter)
4987{
4988 struct e1000_hw *hw = &adapter->hw;
4989 struct pci_dev *pdev = adapter->pdev;
4990 struct pci_dev *pvfdev;
4991 u16 vf_devfn = 0;
4992 u16 vf_stride;
4993 unsigned int device_id;
4994 int vfs_found = 0;
4995
4996 switch (adapter->hw.mac.type) {
4997 case e1000_82576:
4998 device_id = IGB_82576_VF_DEV_ID;
4999 /* VF Stride for 82576 is 2 */
5000 vf_stride = 2;
5001 break;
5002 case e1000_i350:
5003 device_id = IGB_I350_VF_DEV_ID;
5004 /* VF Stride for I350 is 4 */
5005 vf_stride = 4;
5006 break;
5007 default:
5008 device_id = 0;
5009 vf_stride = 0;
5010 break;
5011 }
5012
5013 vf_devfn = pdev->devfn + 0x80;
5014 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
5015 while (pvfdev) {
5016 if (pvfdev->devfn == vf_devfn)
5017 vfs_found++;
5018 vf_devfn += vf_stride;
5019 pvfdev = pci_get_device(hw->vendor_id,
5020 device_id, pvfdev);
5021 }
5022
5023 return vfs_found;
5024}
5025
5026static int igb_check_vf_assignment(struct igb_adapter *adapter)
5027{
5028 int i;
5029 for (i = 0; i < adapter->vfs_allocated_count; i++) {
5030 if (adapter->vf_data[i].vfdev) {
5031 if (adapter->vf_data[i].vfdev->dev_flags &
5032 PCI_DEV_FLAGS_ASSIGNED)
5033 return true;
5034 }
5035 }
5036 return false;
5037}
5038
5039#endif
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005040static void igb_ping_all_vfs(struct igb_adapter *adapter)
5041{
5042 struct e1000_hw *hw = &adapter->hw;
5043 u32 ping;
5044 int i;
5045
5046 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
5047 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005048 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005049 ping |= E1000_VT_MSGTYPE_CTS;
5050 igb_write_mbx(hw, &ping, 1, i);
5051 }
5052}
5053
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005054static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5055{
5056 struct e1000_hw *hw = &adapter->hw;
5057 u32 vmolr = rd32(E1000_VMOLR(vf));
5058 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5059
Alexander Duyckd85b90042010-09-22 17:56:20 +00005060 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005061 IGB_VF_FLAG_MULTI_PROMISC);
5062 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5063
5064 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
5065 vmolr |= E1000_VMOLR_MPME;
Alexander Duyckd85b90042010-09-22 17:56:20 +00005066 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005067 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
5068 } else {
5069 /*
5070 * if we have hashes and we are clearing a multicast promisc
5071 * flag we need to write the hashes to the MTA as this step
5072 * was previously skipped
5073 */
5074 if (vf_data->num_vf_mc_hashes > 30) {
5075 vmolr |= E1000_VMOLR_MPME;
5076 } else if (vf_data->num_vf_mc_hashes) {
5077 int j;
5078 vmolr |= E1000_VMOLR_ROMPE;
5079 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5080 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5081 }
5082 }
5083
5084 wr32(E1000_VMOLR(vf), vmolr);
5085
5086 /* there are flags left unprocessed, likely not supported */
5087 if (*msgbuf & E1000_VT_MSGINFO_MASK)
5088 return -EINVAL;
5089
5090 return 0;
5091
5092}
5093
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005094static int igb_set_vf_multicasts(struct igb_adapter *adapter,
5095 u32 *msgbuf, u32 vf)
5096{
5097 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5098 u16 *hash_list = (u16 *)&msgbuf[1];
5099 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5100 int i;
5101
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005102 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005103 * to this VF for later use to restore when the PF multi cast
5104 * list changes
5105 */
5106 vf_data->num_vf_mc_hashes = n;
5107
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005108 /* only up to 30 hash values supported */
5109 if (n > 30)
5110 n = 30;
5111
5112 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005113 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07005114 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005115
5116 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005117 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005118
5119 return 0;
5120}
5121
5122static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5123{
5124 struct e1000_hw *hw = &adapter->hw;
5125 struct vf_data_storage *vf_data;
5126 int i, j;
5127
5128 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005129 u32 vmolr = rd32(E1000_VMOLR(i));
5130 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5131
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005132 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005133
5134 if ((vf_data->num_vf_mc_hashes > 30) ||
5135 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5136 vmolr |= E1000_VMOLR_MPME;
5137 } else if (vf_data->num_vf_mc_hashes) {
5138 vmolr |= E1000_VMOLR_ROMPE;
5139 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5140 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5141 }
5142 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005143 }
5144}
5145
5146static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5147{
5148 struct e1000_hw *hw = &adapter->hw;
5149 u32 pool_mask, reg, vid;
5150 int i;
5151
5152 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5153
5154 /* Find the vlan filter for this id */
5155 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5156 reg = rd32(E1000_VLVF(i));
5157
5158 /* remove the vf from the pool */
5159 reg &= ~pool_mask;
5160
5161 /* if pool is empty then remove entry from vfta */
5162 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5163 (reg & E1000_VLVF_VLANID_ENABLE)) {
5164 reg = 0;
5165 vid = reg & E1000_VLVF_VLANID_MASK;
5166 igb_vfta_set(hw, vid, false);
5167 }
5168
5169 wr32(E1000_VLVF(i), reg);
5170 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005171
5172 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005173}
5174
5175static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5176{
5177 struct e1000_hw *hw = &adapter->hw;
5178 u32 reg, i;
5179
Alexander Duyck51466232009-10-27 23:47:35 +00005180 /* The vlvf table only exists on 82576 hardware and newer */
5181 if (hw->mac.type < e1000_82576)
5182 return -1;
5183
5184 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005185 if (!adapter->vfs_allocated_count)
5186 return -1;
5187
5188 /* Find the vlan filter for this id */
5189 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5190 reg = rd32(E1000_VLVF(i));
5191 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5192 vid == (reg & E1000_VLVF_VLANID_MASK))
5193 break;
5194 }
5195
5196 if (add) {
5197 if (i == E1000_VLVF_ARRAY_SIZE) {
5198 /* Did not find a matching VLAN ID entry that was
5199 * enabled. Search for a free filter entry, i.e.
5200 * one without the enable bit set
5201 */
5202 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5203 reg = rd32(E1000_VLVF(i));
5204 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5205 break;
5206 }
5207 }
5208 if (i < E1000_VLVF_ARRAY_SIZE) {
5209 /* Found an enabled/available entry */
5210 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5211
5212 /* if !enabled we need to set this up in vfta */
5213 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00005214 /* add VID to filter table */
5215 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005216 reg |= E1000_VLVF_VLANID_ENABLE;
5217 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00005218 reg &= ~E1000_VLVF_VLANID_MASK;
5219 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005220 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005221
5222 /* do not modify RLPML for PF devices */
5223 if (vf >= adapter->vfs_allocated_count)
5224 return 0;
5225
5226 if (!adapter->vf_data[vf].vlans_enabled) {
5227 u32 size;
5228 reg = rd32(E1000_VMOLR(vf));
5229 size = reg & E1000_VMOLR_RLPML_MASK;
5230 size += 4;
5231 reg &= ~E1000_VMOLR_RLPML_MASK;
5232 reg |= size;
5233 wr32(E1000_VMOLR(vf), reg);
5234 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005235
Alexander Duyck51466232009-10-27 23:47:35 +00005236 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005237 }
5238 } else {
5239 if (i < E1000_VLVF_ARRAY_SIZE) {
5240 /* remove vf from the pool */
5241 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5242 /* if pool is empty then remove entry from vfta */
5243 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5244 reg = 0;
5245 igb_vfta_set(hw, vid, false);
5246 }
5247 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005248
5249 /* do not modify RLPML for PF devices */
5250 if (vf >= adapter->vfs_allocated_count)
5251 return 0;
5252
5253 adapter->vf_data[vf].vlans_enabled--;
5254 if (!adapter->vf_data[vf].vlans_enabled) {
5255 u32 size;
5256 reg = rd32(E1000_VMOLR(vf));
5257 size = reg & E1000_VMOLR_RLPML_MASK;
5258 size -= 4;
5259 reg &= ~E1000_VMOLR_RLPML_MASK;
5260 reg |= size;
5261 wr32(E1000_VMOLR(vf), reg);
5262 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005263 }
5264 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00005265 return 0;
5266}
5267
5268static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5269{
5270 struct e1000_hw *hw = &adapter->hw;
5271
5272 if (vid)
5273 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5274 else
5275 wr32(E1000_VMVIR(vf), 0);
5276}
5277
5278static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5279 int vf, u16 vlan, u8 qos)
5280{
5281 int err = 0;
5282 struct igb_adapter *adapter = netdev_priv(netdev);
5283
5284 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5285 return -EINVAL;
5286 if (vlan || qos) {
5287 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5288 if (err)
5289 goto out;
5290 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5291 igb_set_vmolr(adapter, vf, !vlan);
5292 adapter->vf_data[vf].pf_vlan = vlan;
5293 adapter->vf_data[vf].pf_qos = qos;
5294 dev_info(&adapter->pdev->dev,
5295 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5296 if (test_bit(__IGB_DOWN, &adapter->state)) {
5297 dev_warn(&adapter->pdev->dev,
5298 "The VF VLAN has been set,"
5299 " but the PF device is not up.\n");
5300 dev_warn(&adapter->pdev->dev,
5301 "Bring the PF device up before"
5302 " attempting to use the VF device.\n");
5303 }
5304 } else {
5305 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5306 false, vf);
5307 igb_set_vmvir(adapter, vlan, vf);
5308 igb_set_vmolr(adapter, vf, true);
5309 adapter->vf_data[vf].pf_vlan = 0;
5310 adapter->vf_data[vf].pf_qos = 0;
5311 }
5312out:
5313 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005314}
5315
5316static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5317{
5318 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5319 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5320
5321 return igb_vlvf_set(adapter, vid, add, vf);
5322}
5323
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005324static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005325{
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005326 /* clear flags - except flag that indicates PF has set the MAC */
5327 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005328 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005329
5330 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005331 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005332
5333 /* reset vlans for device */
5334 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005335 if (adapter->vf_data[vf].pf_vlan)
5336 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5337 adapter->vf_data[vf].pf_vlan,
5338 adapter->vf_data[vf].pf_qos);
5339 else
5340 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005341
5342 /* reset multicast table array for vf */
5343 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5344
5345 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005346 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005347}
5348
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005349static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5350{
5351 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5352
5353 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005354 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
5355 random_ether_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005356
5357 /* process remaining reset events */
5358 igb_vf_reset(adapter, vf);
5359}
5360
5361static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005362{
5363 struct e1000_hw *hw = &adapter->hw;
5364 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005365 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005366 u32 reg, msgbuf[3];
5367 u8 *addr = (u8 *)(&msgbuf[1]);
5368
5369 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005370 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005371
5372 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00005373 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005374
5375 /* enable transmit and receive for vf */
5376 reg = rd32(E1000_VFTE);
5377 wr32(E1000_VFTE, reg | (1 << vf));
5378 reg = rd32(E1000_VFRE);
5379 wr32(E1000_VFRE, reg | (1 << vf));
5380
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005381 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005382
5383 /* reply to reset with ack and vf mac address */
5384 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5385 memcpy(addr, vf_mac, 6);
5386 igb_write_mbx(hw, msgbuf, 3, vf);
5387}
5388
5389static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5390{
Greg Rosede42edd2010-07-01 13:39:23 +00005391 /*
5392 * The VF MAC Address is stored in a packed array of bytes
5393 * starting at the second 32 bit word of the msg array
5394 */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005395 unsigned char *addr = (char *)&msg[1];
5396 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005397
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005398 if (is_valid_ether_addr(addr))
5399 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005400
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005401 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005402}
5403
5404static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5405{
5406 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005407 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005408 u32 msg = E1000_VT_MSGTYPE_NACK;
5409
5410 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005411 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5412 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005413 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005414 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005415 }
5416}
5417
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005418static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005419{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005420 struct pci_dev *pdev = adapter->pdev;
5421 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005422 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005423 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005424 s32 retval;
5425
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005426 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005427
Alexander Duyckfef45f42009-12-11 22:57:34 -08005428 if (retval) {
5429 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005430 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08005431 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5432 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5433 return;
5434 goto out;
5435 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005436
5437 /* this is a message we already processed, do nothing */
5438 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005439 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005440
5441 /*
5442 * until the vf completes a reset it should not be
5443 * allowed to start any configuration.
5444 */
5445
5446 if (msgbuf[0] == E1000_VF_RESET) {
5447 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005448 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005449 }
5450
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005451 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08005452 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5453 return;
5454 retval = -1;
5455 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005456 }
5457
5458 switch ((msgbuf[0] & 0xFFFF)) {
5459 case E1000_VF_SET_MAC_ADDR:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005460 retval = -EINVAL;
5461 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5462 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5463 else
5464 dev_warn(&pdev->dev,
5465 "VF %d attempted to override administratively "
5466 "set MAC address\nReload the VF driver to "
5467 "resume operations\n", vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005468 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005469 case E1000_VF_SET_PROMISC:
5470 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5471 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005472 case E1000_VF_SET_MULTICAST:
5473 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5474 break;
5475 case E1000_VF_SET_LPE:
5476 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5477 break;
5478 case E1000_VF_SET_VLAN:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005479 retval = -1;
5480 if (vf_data->pf_vlan)
5481 dev_warn(&pdev->dev,
5482 "VF %d attempted to override administratively "
5483 "set VLAN tag\nReload the VF driver to "
5484 "resume operations\n", vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005485 else
5486 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005487 break;
5488 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005489 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005490 retval = -1;
5491 break;
5492 }
5493
Alexander Duyckfef45f42009-12-11 22:57:34 -08005494 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5495out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005496 /* notify the VF of the results of what it sent us */
5497 if (retval)
5498 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5499 else
5500 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5501
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005502 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005503}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005504
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005505static void igb_msg_task(struct igb_adapter *adapter)
5506{
5507 struct e1000_hw *hw = &adapter->hw;
5508 u32 vf;
5509
5510 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5511 /* process any reset requests */
5512 if (!igb_check_for_rst(hw, vf))
5513 igb_vf_reset_event(adapter, vf);
5514
5515 /* process any messages pending */
5516 if (!igb_check_for_msg(hw, vf))
5517 igb_rcv_msg_from_vf(adapter, vf);
5518
5519 /* process any acks */
5520 if (!igb_check_for_ack(hw, vf))
5521 igb_rcv_ack_from_vf(adapter, vf);
5522 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005523}
5524
Auke Kok9d5c8242008-01-24 02:22:38 -08005525/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00005526 * igb_set_uta - Set unicast filter table address
5527 * @adapter: board private structure
5528 *
5529 * The unicast table address is a register array of 32-bit registers.
5530 * The table is meant to be used in a way similar to how the MTA is used
5531 * however due to certain limitations in the hardware it is necessary to
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005532 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5533 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
Alexander Duyck68d480c2009-10-05 06:33:08 +00005534 **/
5535static void igb_set_uta(struct igb_adapter *adapter)
5536{
5537 struct e1000_hw *hw = &adapter->hw;
5538 int i;
5539
5540 /* The UTA table only exists on 82576 hardware and newer */
5541 if (hw->mac.type < e1000_82576)
5542 return;
5543
5544 /* we only need to do this if VMDq is enabled */
5545 if (!adapter->vfs_allocated_count)
5546 return;
5547
5548 for (i = 0; i < hw->mac.uta_reg_count; i++)
5549 array_wr32(E1000_UTA, i, ~0);
5550}
5551
5552/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005553 * igb_intr_msi - Interrupt Handler
5554 * @irq: interrupt number
5555 * @data: pointer to a network interface device structure
5556 **/
5557static irqreturn_t igb_intr_msi(int irq, void *data)
5558{
Alexander Duyck047e0032009-10-27 15:49:27 +00005559 struct igb_adapter *adapter = data;
5560 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005561 struct e1000_hw *hw = &adapter->hw;
5562 /* read ICR disables interrupts using IAM */
5563 u32 icr = rd32(E1000_ICR);
5564
Alexander Duyck047e0032009-10-27 15:49:27 +00005565 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005566
Alexander Duyck7f081d42010-01-07 17:41:00 +00005567 if (icr & E1000_ICR_DRSTA)
5568 schedule_work(&adapter->reset_task);
5569
Alexander Duyck047e0032009-10-27 15:49:27 +00005570 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005571 /* HW is reporting DMA is out of sync */
5572 adapter->stats.doosync++;
5573 }
5574
Auke Kok9d5c8242008-01-24 02:22:38 -08005575 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5576 hw->mac.get_link_status = 1;
5577 if (!test_bit(__IGB_DOWN, &adapter->state))
5578 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5579 }
5580
Alexander Duyck047e0032009-10-27 15:49:27 +00005581 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005582
5583 return IRQ_HANDLED;
5584}
5585
5586/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00005587 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08005588 * @irq: interrupt number
5589 * @data: pointer to a network interface device structure
5590 **/
5591static irqreturn_t igb_intr(int irq, void *data)
5592{
Alexander Duyck047e0032009-10-27 15:49:27 +00005593 struct igb_adapter *adapter = data;
5594 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005595 struct e1000_hw *hw = &adapter->hw;
5596 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5597 * need for the IMC write */
5598 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08005599
5600 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5601 * not set, then the adapter didn't send an interrupt */
5602 if (!(icr & E1000_ICR_INT_ASSERTED))
5603 return IRQ_NONE;
5604
Alexander Duyck0ba82992011-08-26 07:45:47 +00005605 igb_write_itr(q_vector);
5606
Alexander Duyck7f081d42010-01-07 17:41:00 +00005607 if (icr & E1000_ICR_DRSTA)
5608 schedule_work(&adapter->reset_task);
5609
Alexander Duyck047e0032009-10-27 15:49:27 +00005610 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005611 /* HW is reporting DMA is out of sync */
5612 adapter->stats.doosync++;
5613 }
5614
Auke Kok9d5c8242008-01-24 02:22:38 -08005615 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5616 hw->mac.get_link_status = 1;
5617 /* guard against interrupt when we're going down */
5618 if (!test_bit(__IGB_DOWN, &adapter->state))
5619 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5620 }
5621
Alexander Duyck047e0032009-10-27 15:49:27 +00005622 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005623
5624 return IRQ_HANDLED;
5625}
5626
Alexander Duyck0ba82992011-08-26 07:45:47 +00005627void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08005628{
Alexander Duyck047e0032009-10-27 15:49:27 +00005629 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08005630 struct e1000_hw *hw = &adapter->hw;
5631
Alexander Duyck0ba82992011-08-26 07:45:47 +00005632 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
5633 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
5634 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
5635 igb_set_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005636 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005637 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005638 }
5639
5640 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5641 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00005642 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08005643 else
5644 igb_irq_enable(adapter);
5645 }
5646}
5647
Auke Kok9d5c8242008-01-24 02:22:38 -08005648/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005649 * igb_poll - NAPI Rx polling callback
5650 * @napi: napi polling structure
5651 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08005652 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005653static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005654{
Alexander Duyck047e0032009-10-27 15:49:27 +00005655 struct igb_q_vector *q_vector = container_of(napi,
5656 struct igb_q_vector,
5657 napi);
Alexander Duyck16eb8812011-08-26 07:43:54 +00005658 bool clean_complete = true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005659
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005660#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00005661 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5662 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005663#endif
Alexander Duyck0ba82992011-08-26 07:45:47 +00005664 if (q_vector->tx.ring)
Alexander Duyck13fde972011-10-05 13:35:24 +00005665 clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005666
Alexander Duyck0ba82992011-08-26 07:45:47 +00005667 if (q_vector->rx.ring)
Alexander Duyckcd392f52011-08-26 07:43:59 +00005668 clean_complete &= igb_clean_rx_irq(q_vector, budget);
Alexander Duyck047e0032009-10-27 15:49:27 +00005669
Alexander Duyck16eb8812011-08-26 07:43:54 +00005670 /* If all work not completed, return budget and keep polling */
5671 if (!clean_complete)
5672 return budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005673
Alexander Duyck46544252009-02-19 20:39:04 -08005674 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck16eb8812011-08-26 07:43:54 +00005675 napi_complete(napi);
5676 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005677
Alexander Duyck16eb8812011-08-26 07:43:54 +00005678 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005679}
Al Viro6d8126f2008-03-16 22:23:24 +00005680
Auke Kok9d5c8242008-01-24 02:22:38 -08005681/**
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005682 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005683 * @adapter: board private structure
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005684 * @shhwtstamps: timestamp structure to update
5685 * @regval: unsigned 64bit system time value.
5686 *
5687 * We need to convert the system time value stored in the RX/TXSTMP registers
5688 * into a hwtstamp which can be used by the upper level timestamping functions
5689 */
5690static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
5691 struct skb_shared_hwtstamps *shhwtstamps,
5692 u64 regval)
5693{
5694 u64 ns;
5695
Alexander Duyck55cac242009-11-19 12:42:21 +00005696 /*
5697 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
5698 * 24 to match clock shift we setup earlier.
5699 */
Alexander Duyck06218a82011-08-26 07:46:55 +00005700 if (adapter->hw.mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00005701 regval <<= IGB_82580_TSYNC_SHIFT;
5702
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005703 ns = timecounter_cyc2time(&adapter->clock, regval);
5704 timecompare_update(&adapter->compare, ns);
5705 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
5706 shhwtstamps->hwtstamp = ns_to_ktime(ns);
5707 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
5708}
5709
5710/**
5711 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5712 * @q_vector: pointer to q_vector containing needed info
Alexander Duyck06034642011-08-26 07:44:22 +00005713 * @buffer: pointer to igb_tx_buffer structure
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005714 *
5715 * If we were asked to do hardware stamping and such a time stamp is
5716 * available, then it must have been for this skb here because we only
5717 * allow only one such packet into the queue.
5718 */
Alexander Duyck06034642011-08-26 07:44:22 +00005719static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5720 struct igb_tx_buffer *buffer_info)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005721{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005722 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005723 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005724 struct skb_shared_hwtstamps shhwtstamps;
5725 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005726
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005727 /* if skb does not support hw timestamp or TX stamp not valid exit */
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00005728 if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005729 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5730 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005731
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005732 regval = rd32(E1000_TXSTMPL);
5733 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
5734
5735 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
Nick Nunley28739572010-05-04 21:58:07 +00005736 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005737}
5738
5739/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005740 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00005741 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08005742 * returns true if ring is completely cleaned
5743 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005744static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005745{
Alexander Duyck047e0032009-10-27 15:49:27 +00005746 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005747 struct igb_ring *tx_ring = q_vector->tx.ring;
Alexander Duyck06034642011-08-26 07:44:22 +00005748 struct igb_tx_buffer *tx_buffer;
Alexander Duyck8542db02011-08-26 07:44:43 +00005749 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005750 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005751 unsigned int budget = q_vector->tx.work_limit;
Alexander Duyck8542db02011-08-26 07:44:43 +00005752 unsigned int i = tx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005753
Alexander Duyck13fde972011-10-05 13:35:24 +00005754 if (test_bit(__IGB_DOWN, &adapter->state))
5755 return true;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005756
Alexander Duyck06034642011-08-26 07:44:22 +00005757 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyck13fde972011-10-05 13:35:24 +00005758 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck8542db02011-08-26 07:44:43 +00005759 i -= tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005760
Alexander Duyck13fde972011-10-05 13:35:24 +00005761 for (; budget; budget--) {
Alexander Duyck8542db02011-08-26 07:44:43 +00005762 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005763
Alexander Duyck8542db02011-08-26 07:44:43 +00005764 /* prevent any other reads prior to eop_desc */
5765 rmb();
5766
5767 /* if next_to_watch is not set then there is no work pending */
5768 if (!eop_desc)
5769 break;
Alexander Duyck13fde972011-10-05 13:35:24 +00005770
5771 /* if DD is not set pending work has not been completed */
5772 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5773 break;
5774
Alexander Duyck8542db02011-08-26 07:44:43 +00005775 /* clear next_to_watch to prevent false hangs */
5776 tx_buffer->next_to_watch = NULL;
Alexander Duyck13fde972011-10-05 13:35:24 +00005777
Alexander Duyckebe42d12011-08-26 07:45:09 +00005778 /* update the statistics for this packet */
5779 total_bytes += tx_buffer->bytecount;
5780 total_packets += tx_buffer->gso_segs;
Alexander Duyck13fde972011-10-05 13:35:24 +00005781
Alexander Duyckebe42d12011-08-26 07:45:09 +00005782 /* retrieve hardware timestamp */
5783 igb_tx_hwtstamp(q_vector, tx_buffer);
Auke Kok9d5c8242008-01-24 02:22:38 -08005784
Alexander Duyckebe42d12011-08-26 07:45:09 +00005785 /* free the skb */
5786 dev_kfree_skb_any(tx_buffer->skb);
5787 tx_buffer->skb = NULL;
5788
5789 /* unmap skb header data */
5790 dma_unmap_single(tx_ring->dev,
5791 tx_buffer->dma,
5792 tx_buffer->length,
5793 DMA_TO_DEVICE);
5794
5795 /* clear last DMA location and unmap remaining buffers */
5796 while (tx_desc != eop_desc) {
5797 tx_buffer->dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005798
Alexander Duyck13fde972011-10-05 13:35:24 +00005799 tx_buffer++;
5800 tx_desc++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005801 i++;
Alexander Duyck8542db02011-08-26 07:44:43 +00005802 if (unlikely(!i)) {
5803 i -= tx_ring->count;
Alexander Duyck06034642011-08-26 07:44:22 +00005804 tx_buffer = tx_ring->tx_buffer_info;
Alexander Duyck13fde972011-10-05 13:35:24 +00005805 tx_desc = IGB_TX_DESC(tx_ring, 0);
5806 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00005807
5808 /* unmap any remaining paged data */
5809 if (tx_buffer->dma) {
5810 dma_unmap_page(tx_ring->dev,
5811 tx_buffer->dma,
5812 tx_buffer->length,
5813 DMA_TO_DEVICE);
5814 }
5815 }
5816
5817 /* clear last DMA location */
5818 tx_buffer->dma = 0;
5819
5820 /* move us one more past the eop_desc for start of next pkt */
5821 tx_buffer++;
5822 tx_desc++;
5823 i++;
5824 if (unlikely(!i)) {
5825 i -= tx_ring->count;
5826 tx_buffer = tx_ring->tx_buffer_info;
5827 tx_desc = IGB_TX_DESC(tx_ring, 0);
5828 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005829 }
5830
Alexander Duyck8542db02011-08-26 07:44:43 +00005831 i += tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005832 tx_ring->next_to_clean = i;
Alexander Duyck13fde972011-10-05 13:35:24 +00005833 u64_stats_update_begin(&tx_ring->tx_syncp);
5834 tx_ring->tx_stats.bytes += total_bytes;
5835 tx_ring->tx_stats.packets += total_packets;
5836 u64_stats_update_end(&tx_ring->tx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00005837 q_vector->tx.total_bytes += total_bytes;
5838 q_vector->tx.total_packets += total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08005839
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005840 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
Alexander Duyck13fde972011-10-05 13:35:24 +00005841 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck13fde972011-10-05 13:35:24 +00005842
Alexander Duyck8542db02011-08-26 07:44:43 +00005843 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005844
Auke Kok9d5c8242008-01-24 02:22:38 -08005845 /* Detect a transmit hang in hardware, this serializes the
5846 * check with the clearing of time_stamp and movement of i */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005847 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyck8542db02011-08-26 07:44:43 +00005848 if (eop_desc &&
5849 time_after(jiffies, tx_buffer->time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005850 (adapter->tx_timeout_factor * HZ)) &&
5851 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005852
Auke Kok9d5c8242008-01-24 02:22:38 -08005853 /* detected Tx unit hang */
Alexander Duyck59d71982010-04-27 13:09:25 +00005854 dev_err(tx_ring->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005855 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005856 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005857 " TDH <%x>\n"
5858 " TDT <%x>\n"
5859 " next_to_use <%x>\n"
5860 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005861 "buffer_info[next_to_clean]\n"
5862 " time_stamp <%lx>\n"
Alexander Duyck8542db02011-08-26 07:44:43 +00005863 " next_to_watch <%p>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005864 " jiffies <%lx>\n"
5865 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005866 tx_ring->queue_index,
Alexander Duyck238ac812011-08-26 07:43:48 +00005867 rd32(E1000_TDH(tx_ring->reg_idx)),
Alexander Duyckfce99e32009-10-27 15:51:27 +00005868 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005869 tx_ring->next_to_use,
5870 tx_ring->next_to_clean,
Alexander Duyck8542db02011-08-26 07:44:43 +00005871 tx_buffer->time_stamp,
5872 eop_desc,
Auke Kok9d5c8242008-01-24 02:22:38 -08005873 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005874 eop_desc->wb.status);
Alexander Duyck13fde972011-10-05 13:35:24 +00005875 netif_stop_subqueue(tx_ring->netdev,
5876 tx_ring->queue_index);
5877
5878 /* we are about to reset, no point in enabling stuff */
5879 return true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005880 }
5881 }
Alexander Duyck13fde972011-10-05 13:35:24 +00005882
5883 if (unlikely(total_packets &&
5884 netif_carrier_ok(tx_ring->netdev) &&
5885 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5886 /* Make sure that anybody stopping the queue after this
5887 * sees the new next_to_clean.
5888 */
5889 smp_mb();
5890 if (__netif_subqueue_stopped(tx_ring->netdev,
5891 tx_ring->queue_index) &&
5892 !(test_bit(__IGB_DOWN, &adapter->state))) {
5893 netif_wake_subqueue(tx_ring->netdev,
5894 tx_ring->queue_index);
5895
5896 u64_stats_update_begin(&tx_ring->tx_syncp);
5897 tx_ring->tx_stats.restart_queue++;
5898 u64_stats_update_end(&tx_ring->tx_syncp);
5899 }
5900 }
5901
5902 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005903}
5904
Alexander Duyckcd392f52011-08-26 07:43:59 +00005905static inline void igb_rx_checksum(struct igb_ring *ring,
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005906 union e1000_adv_rx_desc *rx_desc,
5907 struct sk_buff *skb)
Auke Kok9d5c8242008-01-24 02:22:38 -08005908{
Eric Dumazetbc8acf22010-09-02 13:07:41 -07005909 skb_checksum_none_assert(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005910
Alexander Duyck294e7d72011-08-26 07:45:57 +00005911 /* Ignore Checksum bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005912 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
Alexander Duyck294e7d72011-08-26 07:45:57 +00005913 return;
5914
5915 /* Rx checksum disabled via ethtool */
5916 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005917 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005918
Auke Kok9d5c8242008-01-24 02:22:38 -08005919 /* TCP/UDP checksum error bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005920 if (igb_test_staterr(rx_desc,
5921 E1000_RXDEXT_STATERR_TCPE |
5922 E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005923 /*
5924 * work around errata with sctp packets where the TCPE aka
5925 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5926 * packets, (aka let the stack check the crc32c)
5927 */
Alexander Duyck866cff02011-08-26 07:45:36 +00005928 if (!((skb->len == 60) &&
5929 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
Eric Dumazet12dcd862010-10-15 17:27:10 +00005930 u64_stats_update_begin(&ring->rx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005931 ring->rx_stats.csum_err++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005932 u64_stats_update_end(&ring->rx_syncp);
5933 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005934 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005935 return;
5936 }
5937 /* It must be a TCP or UDP packet with a valid checksum */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005938 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
5939 E1000_RXD_STAT_UDPCS))
Auke Kok9d5c8242008-01-24 02:22:38 -08005940 skb->ip_summed = CHECKSUM_UNNECESSARY;
5941
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005942 dev_dbg(ring->dev, "cksum success: bits %08X\n",
5943 le32_to_cpu(rx_desc->wb.upper.status_error));
Auke Kok9d5c8242008-01-24 02:22:38 -08005944}
5945
Alexander Duyck077887c2011-08-26 07:46:29 +00005946static inline void igb_rx_hash(struct igb_ring *ring,
5947 union e1000_adv_rx_desc *rx_desc,
5948 struct sk_buff *skb)
5949{
5950 if (ring->netdev->features & NETIF_F_RXHASH)
5951 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5952}
5953
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005954static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
5955 union e1000_adv_rx_desc *rx_desc,
5956 struct sk_buff *skb)
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005957{
5958 struct igb_adapter *adapter = q_vector->adapter;
5959 struct e1000_hw *hw = &adapter->hw;
5960 u64 regval;
5961
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005962 if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
5963 E1000_RXDADV_STAT_TS))
5964 return;
5965
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005966 /*
5967 * If this bit is set, then the RX registers contain the time stamp. No
5968 * other packet will be time stamped until we read these registers, so
5969 * read the registers to make them available again. Because only one
5970 * packet can be time stamped at a time, we know that the register
5971 * values must belong to this one here and therefore we don't need to
5972 * compare any of the additional attributes stored for it.
5973 *
Oliver Hartkopp2244d072010-08-17 08:59:14 +00005974 * If nothing went wrong, then it should have a shared tx_flags that we
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005975 * can turn into a skb_shared_hwtstamps.
5976 */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005977 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
Nick Nunley757b77e2010-03-26 11:36:47 +00005978 u32 *stamp = (u32 *)skb->data;
5979 regval = le32_to_cpu(*(stamp + 2));
5980 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5981 skb_pull(skb, IGB_TS_HDR_LEN);
5982 } else {
5983 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5984 return;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005985
Nick Nunley757b77e2010-03-26 11:36:47 +00005986 regval = rd32(E1000_RXSTMPL);
5987 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5988 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005989
5990 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5991}
Alexander Duyck8be10e92011-08-26 07:47:11 +00005992
5993static void igb_rx_vlan(struct igb_ring *ring,
5994 union e1000_adv_rx_desc *rx_desc,
5995 struct sk_buff *skb)
5996{
5997 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
5998 u16 vid;
5999 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
6000 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
6001 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
6002 else
6003 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
6004
6005 __vlan_hwaccel_put_tag(skb, vid);
6006 }
6007}
6008
Alexander Duyck44390ca2011-08-26 07:43:38 +00006009static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00006010{
6011 /* HW will not DMA in data larger than the given buffer, even if it
6012 * parses the (NFS, of course) header to be larger. In that case, it
6013 * fills the header buffer and spills the rest into the page.
6014 */
6015 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
6016 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck44390ca2011-08-26 07:43:38 +00006017 if (hlen > IGB_RX_HDR_LEN)
6018 hlen = IGB_RX_HDR_LEN;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00006019 return hlen;
6020}
6021
Alexander Duyckcd392f52011-08-26 07:43:59 +00006022static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08006023{
Alexander Duyck0ba82992011-08-26 07:45:47 +00006024 struct igb_ring *rx_ring = q_vector->rx.ring;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006025 union e1000_adv_rx_desc *rx_desc;
6026 const int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08006027 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006028 u16 cleaned_count = igb_desc_unused(rx_ring);
6029 u16 i = rx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08006030
Alexander Duyck601369062011-08-26 07:44:05 +00006031 rx_desc = IGB_RX_DESC(rx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08006032
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006033 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
Alexander Duyck06034642011-08-26 07:44:22 +00006034 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck16eb8812011-08-26 07:43:54 +00006035 struct sk_buff *skb = buffer_info->skb;
6036 union e1000_adv_rx_desc *next_rxd;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006037
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006038 buffer_info->skb = NULL;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006039 prefetch(skb->data);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006040
6041 i++;
6042 if (i == rx_ring->count)
6043 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00006044
Alexander Duyck601369062011-08-26 07:44:05 +00006045 next_rxd = IGB_RX_DESC(rx_ring, i);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006046 prefetch(next_rxd);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006047
Alexander Duyck16eb8812011-08-26 07:43:54 +00006048 /*
6049 * This memory barrier is needed to keep us from reading
6050 * any other fields out of the rx_desc until we know the
6051 * RXD_STAT_DD bit is set
6052 */
6053 rmb();
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006054
Alexander Duyck16eb8812011-08-26 07:43:54 +00006055 if (!skb_is_nonlinear(skb)) {
6056 __skb_put(skb, igb_get_hlen(rx_desc));
6057 dma_unmap_single(rx_ring->dev, buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00006058 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00006059 DMA_FROM_DEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00006060 buffer_info->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006061 }
6062
Alexander Duyck16eb8812011-08-26 07:43:54 +00006063 if (rx_desc->wb.upper.length) {
6064 u16 length = le16_to_cpu(rx_desc->wb.upper.length);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006065
Koki Sanagiaa913402010-04-27 01:01:19 +00006066 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006067 buffer_info->page,
6068 buffer_info->page_offset,
6069 length);
6070
Alexander Duyck16eb8812011-08-26 07:43:54 +00006071 skb->len += length;
6072 skb->data_len += length;
Eric Dumazet95b9c1d2011-10-13 07:56:41 +00006073 skb->truesize += PAGE_SIZE / 2;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006074
Alexander Duyckd1eff352009-11-12 18:38:35 +00006075 if ((page_count(buffer_info->page) != 1) ||
6076 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006077 buffer_info->page = NULL;
6078 else
6079 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08006080
Alexander Duyck16eb8812011-08-26 07:43:54 +00006081 dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
6082 PAGE_SIZE / 2, DMA_FROM_DEVICE);
6083 buffer_info->page_dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006084 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006085
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006086 if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
Alexander Duyck06034642011-08-26 07:44:22 +00006087 struct igb_rx_buffer *next_buffer;
6088 next_buffer = &rx_ring->rx_buffer_info[i];
Alexander Duyckb2d56532008-11-20 00:47:34 -08006089 buffer_info->skb = next_buffer->skb;
6090 buffer_info->dma = next_buffer->dma;
6091 next_buffer->skb = skb;
6092 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006093 goto next_desc;
6094 }
Alexander Duyck44390ca2011-08-26 07:43:38 +00006095
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006096 if (igb_test_staterr(rx_desc,
6097 E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
Alexander Duyck16eb8812011-08-26 07:43:54 +00006098 dev_kfree_skb_any(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006099 goto next_desc;
6100 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006101
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006102 igb_rx_hwtstamp(q_vector, rx_desc, skb);
Alexander Duyck077887c2011-08-26 07:46:29 +00006103 igb_rx_hash(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006104 igb_rx_checksum(rx_ring, rx_desc, skb);
Alexander Duyck8be10e92011-08-26 07:47:11 +00006105 igb_rx_vlan(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006106
6107 total_bytes += skb->len;
6108 total_packets++;
6109
6110 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
6111
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006112 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006113
Alexander Duyck16eb8812011-08-26 07:43:54 +00006114 budget--;
Auke Kok9d5c8242008-01-24 02:22:38 -08006115next_desc:
Alexander Duyck16eb8812011-08-26 07:43:54 +00006116 if (!budget)
6117 break;
6118
6119 cleaned_count++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006120 /* return some buffers to hardware, one at a time is too slow */
6121 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Alexander Duyckcd392f52011-08-26 07:43:59 +00006122 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08006123 cleaned_count = 0;
6124 }
6125
6126 /* use prefetched values */
6127 rx_desc = next_rxd;
Auke Kok9d5c8242008-01-24 02:22:38 -08006128 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006129
Auke Kok9d5c8242008-01-24 02:22:38 -08006130 rx_ring->next_to_clean = i;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006131 u64_stats_update_begin(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08006132 rx_ring->rx_stats.packets += total_packets;
6133 rx_ring->rx_stats.bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006134 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00006135 q_vector->rx.total_packets += total_packets;
6136 q_vector->rx.total_bytes += total_bytes;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006137
6138 if (cleaned_count)
Alexander Duyckcd392f52011-08-26 07:43:59 +00006139 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006140
Alexander Duyck16eb8812011-08-26 07:43:54 +00006141 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08006142}
6143
Alexander Duyckc023cd82011-08-26 07:43:43 +00006144static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006145 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006146{
6147 struct sk_buff *skb = bi->skb;
6148 dma_addr_t dma = bi->dma;
6149
6150 if (dma)
6151 return true;
6152
6153 if (likely(!skb)) {
6154 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6155 IGB_RX_HDR_LEN);
6156 bi->skb = skb;
6157 if (!skb) {
6158 rx_ring->rx_stats.alloc_failed++;
6159 return false;
6160 }
6161
6162 /* initialize skb for ring */
6163 skb_record_rx_queue(skb, rx_ring->queue_index);
6164 }
6165
6166 dma = dma_map_single(rx_ring->dev, skb->data,
6167 IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
6168
6169 if (dma_mapping_error(rx_ring->dev, dma)) {
6170 rx_ring->rx_stats.alloc_failed++;
6171 return false;
6172 }
6173
6174 bi->dma = dma;
6175 return true;
6176}
6177
6178static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006179 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006180{
6181 struct page *page = bi->page;
6182 dma_addr_t page_dma = bi->page_dma;
6183 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
6184
6185 if (page_dma)
6186 return true;
6187
6188 if (!page) {
6189 page = netdev_alloc_page(rx_ring->netdev);
6190 bi->page = page;
6191 if (unlikely(!page)) {
6192 rx_ring->rx_stats.alloc_failed++;
6193 return false;
6194 }
6195 }
6196
6197 page_dma = dma_map_page(rx_ring->dev, page,
6198 page_offset, PAGE_SIZE / 2,
6199 DMA_FROM_DEVICE);
6200
6201 if (dma_mapping_error(rx_ring->dev, page_dma)) {
6202 rx_ring->rx_stats.alloc_failed++;
6203 return false;
6204 }
6205
6206 bi->page_dma = page_dma;
6207 bi->page_offset = page_offset;
6208 return true;
6209}
6210
Auke Kok9d5c8242008-01-24 02:22:38 -08006211/**
Alexander Duyckcd392f52011-08-26 07:43:59 +00006212 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
Auke Kok9d5c8242008-01-24 02:22:38 -08006213 * @adapter: address of board private structure
6214 **/
Alexander Duyckcd392f52011-08-26 07:43:59 +00006215void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08006216{
Auke Kok9d5c8242008-01-24 02:22:38 -08006217 union e1000_adv_rx_desc *rx_desc;
Alexander Duyck06034642011-08-26 07:44:22 +00006218 struct igb_rx_buffer *bi;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006219 u16 i = rx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08006220
Alexander Duyck601369062011-08-26 07:44:05 +00006221 rx_desc = IGB_RX_DESC(rx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +00006222 bi = &rx_ring->rx_buffer_info[i];
Alexander Duyckc023cd82011-08-26 07:43:43 +00006223 i -= rx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006224
6225 while (cleaned_count--) {
Alexander Duyckc023cd82011-08-26 07:43:43 +00006226 if (!igb_alloc_mapped_skb(rx_ring, bi))
6227 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006228
Alexander Duyckc023cd82011-08-26 07:43:43 +00006229 /* Refresh the desc even if buffer_addrs didn't change
6230 * because each write-back erases this info. */
6231 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006232
Alexander Duyckc023cd82011-08-26 07:43:43 +00006233 if (!igb_alloc_mapped_page(rx_ring, bi))
6234 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006235
Alexander Duyckc023cd82011-08-26 07:43:43 +00006236 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006237
Alexander Duyckc023cd82011-08-26 07:43:43 +00006238 rx_desc++;
6239 bi++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006240 i++;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006241 if (unlikely(!i)) {
Alexander Duyck601369062011-08-26 07:44:05 +00006242 rx_desc = IGB_RX_DESC(rx_ring, 0);
Alexander Duyck06034642011-08-26 07:44:22 +00006243 bi = rx_ring->rx_buffer_info;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006244 i -= rx_ring->count;
6245 }
6246
6247 /* clear the hdr_addr for the next_to_use descriptor */
6248 rx_desc->read.hdr_addr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006249 }
6250
Alexander Duyckc023cd82011-08-26 07:43:43 +00006251 i += rx_ring->count;
6252
Auke Kok9d5c8242008-01-24 02:22:38 -08006253 if (rx_ring->next_to_use != i) {
6254 rx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006255
6256 /* Force memory writes to complete before letting h/w
6257 * know there are new descriptors to fetch. (Only
6258 * applicable for weak-ordered memory model archs,
6259 * such as IA-64). */
6260 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00006261 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08006262 }
6263}
6264
6265/**
6266 * igb_mii_ioctl -
6267 * @netdev:
6268 * @ifreq:
6269 * @cmd:
6270 **/
6271static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6272{
6273 struct igb_adapter *adapter = netdev_priv(netdev);
6274 struct mii_ioctl_data *data = if_mii(ifr);
6275
6276 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6277 return -EOPNOTSUPP;
6278
6279 switch (cmd) {
6280 case SIOCGMIIPHY:
6281 data->phy_id = adapter->hw.phy.addr;
6282 break;
6283 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08006284 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6285 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08006286 return -EIO;
6287 break;
6288 case SIOCSMIIREG:
6289 default:
6290 return -EOPNOTSUPP;
6291 }
6292 return 0;
6293}
6294
6295/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006296 * igb_hwtstamp_ioctl - control hardware time stamping
6297 * @netdev:
6298 * @ifreq:
6299 * @cmd:
6300 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006301 * Outgoing time stamping can be enabled and disabled. Play nice and
6302 * disable it when requested, although it shouldn't case any overhead
6303 * when no packet needs it. At most one packet in the queue may be
6304 * marked for time stamping, otherwise it would be impossible to tell
6305 * for sure to which packet the hardware time stamp belongs.
6306 *
6307 * Incoming time stamping has to be configured via the hardware
6308 * filters. Not all combinations are supported, in particular event
6309 * type has to be specified. Matching the kind of event packet is
6310 * not supported, with the exception of "all V2 events regardless of
6311 * level 2 or 4".
6312 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006313 **/
6314static int igb_hwtstamp_ioctl(struct net_device *netdev,
6315 struct ifreq *ifr, int cmd)
6316{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006317 struct igb_adapter *adapter = netdev_priv(netdev);
6318 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006319 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006320 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
6321 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006322 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006323 bool is_l4 = false;
6324 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006325 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006326
6327 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
6328 return -EFAULT;
6329
6330 /* reserved for future extensions */
6331 if (config.flags)
6332 return -EINVAL;
6333
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006334 switch (config.tx_type) {
6335 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006336 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006337 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006338 break;
6339 default:
6340 return -ERANGE;
6341 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006342
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006343 switch (config.rx_filter) {
6344 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006345 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006346 break;
6347 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6348 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6349 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6350 case HWTSTAMP_FILTER_ALL:
6351 /*
6352 * register TSYNCRXCFG must be set, therefore it is not
6353 * possible to time stamp both Sync and Delay_Req messages
6354 * => fall back to time stamping all packets
6355 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006356 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006357 config.rx_filter = HWTSTAMP_FILTER_ALL;
6358 break;
6359 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006360 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006361 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006362 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006363 break;
6364 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006365 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006366 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006367 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006368 break;
6369 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6370 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006371 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006372 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006373 is_l2 = true;
6374 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006375 config.rx_filter = HWTSTAMP_FILTER_SOME;
6376 break;
6377 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6378 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006379 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006380 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006381 is_l2 = true;
6382 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006383 config.rx_filter = HWTSTAMP_FILTER_SOME;
6384 break;
6385 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6386 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6387 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006388 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006389 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006390 is_l2 = true;
Jacob Keller11ba69e2011-10-12 00:51:54 +00006391 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006392 break;
6393 default:
6394 return -ERANGE;
6395 }
6396
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006397 if (hw->mac.type == e1000_82575) {
6398 if (tsync_rx_ctl | tsync_tx_ctl)
6399 return -EINVAL;
6400 return 0;
6401 }
6402
Nick Nunley757b77e2010-03-26 11:36:47 +00006403 /*
6404 * Per-packet timestamping only works if all packets are
6405 * timestamped, so enable timestamping in all packets as
6406 * long as one rx filter was configured.
6407 */
Alexander Duyck06218a82011-08-26 07:46:55 +00006408 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
Nick Nunley757b77e2010-03-26 11:36:47 +00006409 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6410 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6411 }
6412
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006413 /* enable/disable TX */
6414 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006415 regval &= ~E1000_TSYNCTXCTL_ENABLED;
6416 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006417 wr32(E1000_TSYNCTXCTL, regval);
6418
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006419 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006420 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006421 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
6422 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006423 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006424
6425 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006426 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
6427
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006428 /* define ethertype filter for timestamped packets */
6429 if (is_l2)
6430 wr32(E1000_ETQF(3),
6431 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
6432 E1000_ETQF_1588 | /* enable timestamping */
6433 ETH_P_1588)); /* 1588 eth protocol type */
6434 else
6435 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006436
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006437#define PTP_PORT 319
6438 /* L4 Queue Filter[3]: filter by destination port and protocol */
6439 if (is_l4) {
6440 u32 ftqf = (IPPROTO_UDP /* UDP */
6441 | E1000_FTQF_VF_BP /* VF not compared */
6442 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
6443 | E1000_FTQF_MASK); /* mask all inputs */
6444 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006445
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006446 wr32(E1000_IMIR(3), htons(PTP_PORT));
6447 wr32(E1000_IMIREXT(3),
6448 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
6449 if (hw->mac.type == e1000_82576) {
6450 /* enable source port check */
6451 wr32(E1000_SPQF(3), htons(PTP_PORT));
6452 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
6453 }
6454 wr32(E1000_FTQF(3), ftqf);
6455 } else {
6456 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
6457 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006458 wrfl();
6459
6460 adapter->hwtstamp_config = config;
6461
6462 /* clear TX/RX time stamp registers, just to be sure */
6463 regval = rd32(E1000_TXSTMPH);
6464 regval = rd32(E1000_RXSTMPH);
6465
6466 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
6467 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006468}
6469
6470/**
Auke Kok9d5c8242008-01-24 02:22:38 -08006471 * igb_ioctl -
6472 * @netdev:
6473 * @ifreq:
6474 * @cmd:
6475 **/
6476static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6477{
6478 switch (cmd) {
6479 case SIOCGMIIPHY:
6480 case SIOCGMIIREG:
6481 case SIOCSMIIREG:
6482 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006483 case SIOCSHWTSTAMP:
6484 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08006485 default:
6486 return -EOPNOTSUPP;
6487 }
6488}
6489
Alexander Duyck009bc062009-07-23 18:08:35 +00006490s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6491{
6492 struct igb_adapter *adapter = hw->back;
6493 u16 cap_offset;
6494
Jon Masonbdaae042011-06-27 07:44:01 +00006495 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006496 if (!cap_offset)
6497 return -E1000_ERR_CONFIG;
6498
6499 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6500
6501 return 0;
6502}
6503
6504s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6505{
6506 struct igb_adapter *adapter = hw->back;
6507 u16 cap_offset;
6508
Jon Masonbdaae042011-06-27 07:44:01 +00006509 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006510 if (!cap_offset)
6511 return -E1000_ERR_CONFIG;
6512
6513 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6514
6515 return 0;
6516}
6517
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006518static void igb_vlan_mode(struct net_device *netdev, u32 features)
Auke Kok9d5c8242008-01-24 02:22:38 -08006519{
6520 struct igb_adapter *adapter = netdev_priv(netdev);
6521 struct e1000_hw *hw = &adapter->hw;
6522 u32 ctrl, rctl;
Alexander Duyck5faf0302011-08-26 07:46:08 +00006523 bool enable = !!(features & NETIF_F_HW_VLAN_RX);
Auke Kok9d5c8242008-01-24 02:22:38 -08006524
Alexander Duyck5faf0302011-08-26 07:46:08 +00006525 if (enable) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006526 /* enable VLAN tag insert/strip */
6527 ctrl = rd32(E1000_CTRL);
6528 ctrl |= E1000_CTRL_VME;
6529 wr32(E1000_CTRL, ctrl);
6530
Alexander Duyck51466232009-10-27 23:47:35 +00006531 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08006532 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08006533 rctl &= ~E1000_RCTL_CFIEN;
6534 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006535 } else {
6536 /* disable VLAN tag insert/strip */
6537 ctrl = rd32(E1000_CTRL);
6538 ctrl &= ~E1000_CTRL_VME;
6539 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006540 }
6541
Alexander Duycke1739522009-02-19 20:39:44 -08006542 igb_rlpml_set(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006543}
6544
6545static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
6546{
6547 struct igb_adapter *adapter = netdev_priv(netdev);
6548 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006549 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006550
Alexander Duyck51466232009-10-27 23:47:35 +00006551 /* attempt to add filter to vlvf array */
6552 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006553
Alexander Duyck51466232009-10-27 23:47:35 +00006554 /* add the filter since PF can receive vlans w/o entry in vlvf */
6555 igb_vfta_set(hw, vid, true);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006556
6557 set_bit(vid, adapter->active_vlans);
Auke Kok9d5c8242008-01-24 02:22:38 -08006558}
6559
6560static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
6561{
6562 struct igb_adapter *adapter = netdev_priv(netdev);
6563 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006564 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00006565 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006566
Alexander Duyck51466232009-10-27 23:47:35 +00006567 /* remove vlan from VLVF table array */
6568 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08006569
Alexander Duyck51466232009-10-27 23:47:35 +00006570 /* if vid was not present in VLVF just remove it from table */
6571 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006572 igb_vfta_set(hw, vid, false);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006573
6574 clear_bit(vid, adapter->active_vlans);
Auke Kok9d5c8242008-01-24 02:22:38 -08006575}
6576
6577static void igb_restore_vlan(struct igb_adapter *adapter)
6578{
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006579 u16 vid;
Auke Kok9d5c8242008-01-24 02:22:38 -08006580
Alexander Duyck5faf0302011-08-26 07:46:08 +00006581 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
6582
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006583 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6584 igb_vlan_rx_add_vid(adapter->netdev, vid);
Auke Kok9d5c8242008-01-24 02:22:38 -08006585}
6586
David Decotigny14ad2512011-04-27 18:32:43 +00006587int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
Auke Kok9d5c8242008-01-24 02:22:38 -08006588{
Alexander Duyck090b1792009-10-27 23:51:55 +00006589 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08006590 struct e1000_mac_info *mac = &adapter->hw.mac;
6591
6592 mac->autoneg = 0;
6593
David Decotigny14ad2512011-04-27 18:32:43 +00006594 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6595 * for the switch() below to work */
6596 if ((spd & 1) || (dplx & ~1))
6597 goto err_inval;
6598
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006599 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6600 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
David Decotigny14ad2512011-04-27 18:32:43 +00006601 spd != SPEED_1000 &&
6602 dplx != DUPLEX_FULL)
6603 goto err_inval;
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006604
David Decotigny14ad2512011-04-27 18:32:43 +00006605 switch (spd + dplx) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006606 case SPEED_10 + DUPLEX_HALF:
6607 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6608 break;
6609 case SPEED_10 + DUPLEX_FULL:
6610 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6611 break;
6612 case SPEED_100 + DUPLEX_HALF:
6613 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6614 break;
6615 case SPEED_100 + DUPLEX_FULL:
6616 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6617 break;
6618 case SPEED_1000 + DUPLEX_FULL:
6619 mac->autoneg = 1;
6620 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6621 break;
6622 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6623 default:
David Decotigny14ad2512011-04-27 18:32:43 +00006624 goto err_inval;
Auke Kok9d5c8242008-01-24 02:22:38 -08006625 }
6626 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00006627
6628err_inval:
6629 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6630 return -EINVAL;
Auke Kok9d5c8242008-01-24 02:22:38 -08006631}
6632
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006633static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
Auke Kok9d5c8242008-01-24 02:22:38 -08006634{
6635 struct net_device *netdev = pci_get_drvdata(pdev);
6636 struct igb_adapter *adapter = netdev_priv(netdev);
6637 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07006638 u32 ctrl, rctl, status;
Auke Kok9d5c8242008-01-24 02:22:38 -08006639 u32 wufc = adapter->wol;
6640#ifdef CONFIG_PM
6641 int retval = 0;
6642#endif
6643
6644 netif_device_detach(netdev);
6645
Alexander Duycka88f10e2008-07-08 15:13:38 -07006646 if (netif_running(netdev))
6647 igb_close(netdev);
6648
Alexander Duyck047e0032009-10-27 15:49:27 +00006649 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006650
6651#ifdef CONFIG_PM
6652 retval = pci_save_state(pdev);
6653 if (retval)
6654 return retval;
6655#endif
6656
6657 status = rd32(E1000_STATUS);
6658 if (status & E1000_STATUS_LU)
6659 wufc &= ~E1000_WUFC_LNKC;
6660
6661 if (wufc) {
6662 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006663 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006664
6665 /* turn on all-multi mode if wake on multicast is enabled */
6666 if (wufc & E1000_WUFC_MC) {
6667 rctl = rd32(E1000_RCTL);
6668 rctl |= E1000_RCTL_MPE;
6669 wr32(E1000_RCTL, rctl);
6670 }
6671
6672 ctrl = rd32(E1000_CTRL);
6673 /* advertise wake from D3Cold */
6674 #define E1000_CTRL_ADVD3WUC 0x00100000
6675 /* phy power management enable */
6676 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6677 ctrl |= E1000_CTRL_ADVD3WUC;
6678 wr32(E1000_CTRL, ctrl);
6679
Auke Kok9d5c8242008-01-24 02:22:38 -08006680 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00006681 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08006682
6683 wr32(E1000_WUC, E1000_WUC_PME_EN);
6684 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08006685 } else {
6686 wr32(E1000_WUC, 0);
6687 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08006688 }
6689
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006690 *enable_wake = wufc || adapter->en_mng_pt;
6691 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00006692 igb_power_down_link(adapter);
6693 else
6694 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006695
6696 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6697 * would have already happened in close and is redundant. */
6698 igb_release_hw_control(adapter);
6699
6700 pci_disable_device(pdev);
6701
Auke Kok9d5c8242008-01-24 02:22:38 -08006702 return 0;
6703}
6704
6705#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006706static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
6707{
6708 int retval;
6709 bool wake;
6710
6711 retval = __igb_shutdown(pdev, &wake);
6712 if (retval)
6713 return retval;
6714
6715 if (wake) {
6716 pci_prepare_to_sleep(pdev);
6717 } else {
6718 pci_wake_from_d3(pdev, false);
6719 pci_set_power_state(pdev, PCI_D3hot);
6720 }
6721
6722 return 0;
6723}
6724
Auke Kok9d5c8242008-01-24 02:22:38 -08006725static int igb_resume(struct pci_dev *pdev)
6726{
6727 struct net_device *netdev = pci_get_drvdata(pdev);
6728 struct igb_adapter *adapter = netdev_priv(netdev);
6729 struct e1000_hw *hw = &adapter->hw;
6730 u32 err;
6731
6732 pci_set_power_state(pdev, PCI_D0);
6733 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006734 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006735
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006736 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006737 if (err) {
6738 dev_err(&pdev->dev,
6739 "igb: Cannot enable PCI device from suspend\n");
6740 return err;
6741 }
6742 pci_set_master(pdev);
6743
6744 pci_enable_wake(pdev, PCI_D3hot, 0);
6745 pci_enable_wake(pdev, PCI_D3cold, 0);
6746
Alexander Duyck047e0032009-10-27 15:49:27 +00006747 if (igb_init_interrupt_scheme(adapter)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07006748 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6749 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08006750 }
6751
Auke Kok9d5c8242008-01-24 02:22:38 -08006752 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00006753
6754 /* let the f/w know that the h/w is now under the control of the
6755 * driver. */
6756 igb_get_hw_control(adapter);
6757
Auke Kok9d5c8242008-01-24 02:22:38 -08006758 wr32(E1000_WUS, ~0);
6759
Alexander Duycka88f10e2008-07-08 15:13:38 -07006760 if (netif_running(netdev)) {
6761 err = igb_open(netdev);
6762 if (err)
6763 return err;
6764 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006765
6766 netif_device_attach(netdev);
6767
Auke Kok9d5c8242008-01-24 02:22:38 -08006768 return 0;
6769}
6770#endif
6771
6772static void igb_shutdown(struct pci_dev *pdev)
6773{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006774 bool wake;
6775
6776 __igb_shutdown(pdev, &wake);
6777
6778 if (system_state == SYSTEM_POWER_OFF) {
6779 pci_wake_from_d3(pdev, wake);
6780 pci_set_power_state(pdev, PCI_D3hot);
6781 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006782}
6783
6784#ifdef CONFIG_NET_POLL_CONTROLLER
6785/*
6786 * Polling 'interrupt' - used by things like netconsole to send skbs
6787 * without having to re-enable interrupts. It's not called while
6788 * the interrupt routine is executing.
6789 */
6790static void igb_netpoll(struct net_device *netdev)
6791{
6792 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006793 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00006794 struct igb_q_vector *q_vector;
Auke Kok9d5c8242008-01-24 02:22:38 -08006795 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006796
Alexander Duyck047e0032009-10-27 15:49:27 +00006797 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00006798 q_vector = adapter->q_vector[i];
6799 if (adapter->msix_entries)
6800 wr32(E1000_EIMC, q_vector->eims_value);
6801 else
6802 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00006803 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006804 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006805}
6806#endif /* CONFIG_NET_POLL_CONTROLLER */
6807
6808/**
6809 * igb_io_error_detected - called when PCI error is detected
6810 * @pdev: Pointer to PCI device
6811 * @state: The current pci connection state
6812 *
6813 * This function is called after a PCI bus error affecting
6814 * this device has been detected.
6815 */
6816static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6817 pci_channel_state_t state)
6818{
6819 struct net_device *netdev = pci_get_drvdata(pdev);
6820 struct igb_adapter *adapter = netdev_priv(netdev);
6821
6822 netif_device_detach(netdev);
6823
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00006824 if (state == pci_channel_io_perm_failure)
6825 return PCI_ERS_RESULT_DISCONNECT;
6826
Auke Kok9d5c8242008-01-24 02:22:38 -08006827 if (netif_running(netdev))
6828 igb_down(adapter);
6829 pci_disable_device(pdev);
6830
6831 /* Request a slot slot reset. */
6832 return PCI_ERS_RESULT_NEED_RESET;
6833}
6834
6835/**
6836 * igb_io_slot_reset - called after the pci bus has been reset.
6837 * @pdev: Pointer to PCI device
6838 *
6839 * Restart the card from scratch, as if from a cold-boot. Implementation
6840 * resembles the first-half of the igb_resume routine.
6841 */
6842static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6843{
6844 struct net_device *netdev = pci_get_drvdata(pdev);
6845 struct igb_adapter *adapter = netdev_priv(netdev);
6846 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08006847 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006848 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006849
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006850 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006851 dev_err(&pdev->dev,
6852 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08006853 result = PCI_ERS_RESULT_DISCONNECT;
6854 } else {
6855 pci_set_master(pdev);
6856 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006857 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006858
6859 pci_enable_wake(pdev, PCI_D3hot, 0);
6860 pci_enable_wake(pdev, PCI_D3cold, 0);
6861
6862 igb_reset(adapter);
6863 wr32(E1000_WUS, ~0);
6864 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006865 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006866
Jeff Kirsherea943d42008-12-11 20:34:19 -08006867 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6868 if (err) {
6869 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6870 "failed 0x%0x\n", err);
6871 /* non-fatal, continue */
6872 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006873
Alexander Duyck40a914f2008-11-27 00:24:37 -08006874 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006875}
6876
6877/**
6878 * igb_io_resume - called when traffic can start flowing again.
6879 * @pdev: Pointer to PCI device
6880 *
6881 * This callback is called when the error recovery driver tells us that
6882 * its OK to resume normal operation. Implementation resembles the
6883 * second-half of the igb_resume routine.
6884 */
6885static void igb_io_resume(struct pci_dev *pdev)
6886{
6887 struct net_device *netdev = pci_get_drvdata(pdev);
6888 struct igb_adapter *adapter = netdev_priv(netdev);
6889
Auke Kok9d5c8242008-01-24 02:22:38 -08006890 if (netif_running(netdev)) {
6891 if (igb_up(adapter)) {
6892 dev_err(&pdev->dev, "igb_up failed after reset\n");
6893 return;
6894 }
6895 }
6896
6897 netif_device_attach(netdev);
6898
6899 /* let the f/w know that the h/w is now under the control of the
6900 * driver. */
6901 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006902}
6903
Alexander Duyck26ad9172009-10-05 06:32:49 +00006904static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6905 u8 qsel)
6906{
6907 u32 rar_low, rar_high;
6908 struct e1000_hw *hw = &adapter->hw;
6909
6910 /* HW expects these in little endian so we reverse the byte order
6911 * from network order (big endian) to little endian
6912 */
6913 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6914 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6915 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6916
6917 /* Indicate to hardware the Address is Valid. */
6918 rar_high |= E1000_RAH_AV;
6919
6920 if (hw->mac.type == e1000_82575)
6921 rar_high |= E1000_RAH_POOL_1 * qsel;
6922 else
6923 rar_high |= E1000_RAH_POOL_1 << qsel;
6924
6925 wr32(E1000_RAL(index), rar_low);
6926 wrfl();
6927 wr32(E1000_RAH(index), rar_high);
6928 wrfl();
6929}
6930
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006931static int igb_set_vf_mac(struct igb_adapter *adapter,
6932 int vf, unsigned char *mac_addr)
6933{
6934 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006935 /* VF MAC addresses start at end of receive addresses and moves
6936 * torwards the first, as a result a collision should not be possible */
6937 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006938
Alexander Duyck37680112009-02-19 20:40:30 -08006939 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006940
Alexander Duyck26ad9172009-10-05 06:32:49 +00006941 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006942
6943 return 0;
6944}
6945
Williams, Mitch A8151d292010-02-10 01:44:24 +00006946static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6947{
6948 struct igb_adapter *adapter = netdev_priv(netdev);
6949 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6950 return -EINVAL;
6951 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6952 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6953 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6954 " change effective.");
6955 if (test_bit(__IGB_DOWN, &adapter->state)) {
6956 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6957 " but the PF device is not up.\n");
6958 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6959 " attempting to use the VF device.\n");
6960 }
6961 return igb_set_vf_mac(adapter, vf, mac);
6962}
6963
Lior Levy17dc5662011-02-08 02:28:46 +00006964static int igb_link_mbps(int internal_link_speed)
6965{
6966 switch (internal_link_speed) {
6967 case SPEED_100:
6968 return 100;
6969 case SPEED_1000:
6970 return 1000;
6971 default:
6972 return 0;
6973 }
6974}
6975
6976static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6977 int link_speed)
6978{
6979 int rf_dec, rf_int;
6980 u32 bcnrc_val;
6981
6982 if (tx_rate != 0) {
6983 /* Calculate the rate factor values to set */
6984 rf_int = link_speed / tx_rate;
6985 rf_dec = (link_speed - (rf_int * tx_rate));
6986 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6987
6988 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6989 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6990 E1000_RTTBCNRC_RF_INT_MASK);
6991 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6992 } else {
6993 bcnrc_val = 0;
6994 }
6995
6996 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
6997 wr32(E1000_RTTBCNRC, bcnrc_val);
6998}
6999
7000static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
7001{
7002 int actual_link_speed, i;
7003 bool reset_rate = false;
7004
7005 /* VF TX rate limit was not set or not supported */
7006 if ((adapter->vf_rate_link_speed == 0) ||
7007 (adapter->hw.mac.type != e1000_82576))
7008 return;
7009
7010 actual_link_speed = igb_link_mbps(adapter->link_speed);
7011 if (actual_link_speed != adapter->vf_rate_link_speed) {
7012 reset_rate = true;
7013 adapter->vf_rate_link_speed = 0;
7014 dev_info(&adapter->pdev->dev,
7015 "Link speed has been changed. VF Transmit "
7016 "rate is disabled\n");
7017 }
7018
7019 for (i = 0; i < adapter->vfs_allocated_count; i++) {
7020 if (reset_rate)
7021 adapter->vf_data[i].tx_rate = 0;
7022
7023 igb_set_vf_rate_limit(&adapter->hw, i,
7024 adapter->vf_data[i].tx_rate,
7025 actual_link_speed);
7026 }
7027}
7028
Williams, Mitch A8151d292010-02-10 01:44:24 +00007029static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
7030{
Lior Levy17dc5662011-02-08 02:28:46 +00007031 struct igb_adapter *adapter = netdev_priv(netdev);
7032 struct e1000_hw *hw = &adapter->hw;
7033 int actual_link_speed;
7034
7035 if (hw->mac.type != e1000_82576)
7036 return -EOPNOTSUPP;
7037
7038 actual_link_speed = igb_link_mbps(adapter->link_speed);
7039 if ((vf >= adapter->vfs_allocated_count) ||
7040 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
7041 (tx_rate < 0) || (tx_rate > actual_link_speed))
7042 return -EINVAL;
7043
7044 adapter->vf_rate_link_speed = actual_link_speed;
7045 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
7046 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
7047
7048 return 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007049}
7050
7051static int igb_ndo_get_vf_config(struct net_device *netdev,
7052 int vf, struct ifla_vf_info *ivi)
7053{
7054 struct igb_adapter *adapter = netdev_priv(netdev);
7055 if (vf >= adapter->vfs_allocated_count)
7056 return -EINVAL;
7057 ivi->vf = vf;
7058 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
Lior Levy17dc5662011-02-08 02:28:46 +00007059 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007060 ivi->vlan = adapter->vf_data[vf].pf_vlan;
7061 ivi->qos = adapter->vf_data[vf].pf_qos;
7062 return 0;
7063}
7064
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007065static void igb_vmm_control(struct igb_adapter *adapter)
7066{
7067 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00007068 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007069
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007070 switch (hw->mac.type) {
7071 case e1000_82575:
7072 default:
7073 /* replication is not supported for 82575 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007074 return;
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007075 case e1000_82576:
7076 /* notify HW that the MAC is adding vlan tags */
7077 reg = rd32(E1000_DTXCTL);
7078 reg |= E1000_DTXCTL_VLAN_ADDED;
7079 wr32(E1000_DTXCTL, reg);
7080 case e1000_82580:
7081 /* enable replication vlan tag stripping */
7082 reg = rd32(E1000_RPLOLR);
7083 reg |= E1000_RPLOLR_STRVLAN;
7084 wr32(E1000_RPLOLR, reg);
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00007085 case e1000_i350:
7086 /* none of the above registers are supported by i350 */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007087 break;
7088 }
Alexander Duyck10d8e902009-10-27 15:54:04 +00007089
Alexander Duyckd4960302009-10-27 15:53:45 +00007090 if (adapter->vfs_allocated_count) {
7091 igb_vmdq_set_loopback_pf(hw, true);
7092 igb_vmdq_set_replication_pf(hw, true);
Greg Rose13800462010-11-06 02:08:26 +00007093 igb_vmdq_set_anti_spoofing_pf(hw, true,
7094 adapter->vfs_allocated_count);
Alexander Duyckd4960302009-10-27 15:53:45 +00007095 } else {
7096 igb_vmdq_set_loopback_pf(hw, false);
7097 igb_vmdq_set_replication_pf(hw, false);
7098 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007099}
7100
Auke Kok9d5c8242008-01-24 02:22:38 -08007101/* igb_main.c */