blob: 410834deb8d418bfd4ceabb990dc52f92bb0b92d [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28MODULE_AUTHOR("ServerEngines Corporation");
29MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070047 { 0 }
48};
49MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000050/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070051static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000052 "CEV",
53 "CTX",
54 "DBUF",
55 "ERX",
56 "Host",
57 "MPU",
58 "NDMA",
59 "PTC ",
60 "RDMA ",
61 "RXF ",
62 "RXIPS ",
63 "RXULP0 ",
64 "RXULP1 ",
65 "RXULP2 ",
66 "TIM ",
67 "TPOST ",
68 "TPRE ",
69 "TXIPS ",
70 "TXULP0 ",
71 "TXULP1 ",
72 "UC ",
73 "WDMA ",
74 "TXULP2 ",
75 "HOST1 ",
76 "P0_OB_LINK ",
77 "P1_OB_LINK ",
78 "HOST_GPIO ",
79 "MBOX ",
80 "AXGMAC0",
81 "AXGMAC1",
82 "JTAG",
83 "MPU_INTPEND"
84};
85/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070086static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000087 "LPCMEMHOST",
88 "MGMT_MAC",
89 "PCS0ONLINE",
90 "MPU_IRAM",
91 "PCS1ONLINE",
92 "PCTL0",
93 "PCTL1",
94 "PMEM",
95 "RR",
96 "TXPB",
97 "RXPP",
98 "XAUI",
99 "TXP",
100 "ARM",
101 "IPC",
102 "HOST2",
103 "HOST3",
104 "HOST4",
105 "HOST5",
106 "HOST6",
107 "HOST7",
108 "HOST8",
109 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700110 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown"
119};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700120
Sathya Perla752961a2011-10-24 02:45:03 +0000121/* Is BE in a multi-channel mode */
122static inline bool be_is_mc(struct be_adapter *adapter) {
123 return (adapter->function_mode & FLEX10_MODE ||
124 adapter->function_mode & VNIC_MODE ||
125 adapter->function_mode & UMC_ENABLED);
126}
127
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000131 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000132 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
133 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000134 mem->va = NULL;
135 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136}
137
138static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
139 u16 len, u16 entry_size)
140{
141 struct be_dma_mem *mem = &q->dma_mem;
142
143 memset(q, 0, sizeof(*q));
144 q->len = len;
145 q->entry_size = entry_size;
146 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000147 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
148 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000150 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151 memset(mem->va, 0, mem->size);
152 return 0;
153}
154
Sathya Perla8788fdc2009-07-27 22:52:03 +0000155static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000159 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000160 return;
161
Sathya Perladb3ea782011-08-22 19:41:52 +0000162 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
163 &reg);
164 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700167 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000168 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000170 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700171 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172
Sathya Perladb3ea782011-08-22 19:41:52 +0000173 pci_write_config_dword(adapter->pdev,
174 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175}
176
Sathya Perla8788fdc2009-07-27 22:52:03 +0000177static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178{
179 u32 val = 0;
180 val |= qid & DB_RQ_RING_ID_MASK;
181 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000182
183 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_TXULP_RING_ID_MASK;
191 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198 bool arm, bool clear_int, u16 num_popped)
199{
200 u32 val = 0;
201 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000202 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
203 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000204
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000205 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000206 return;
207
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208 if (arm)
209 val |= 1 << DB_EQ_REARM_SHIFT;
210 if (clear_int)
211 val |= 1 << DB_EQ_CLR_SHIFT;
212 val |= 1 << DB_EQ_EVNT_SHIFT;
213 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215}
216
Sathya Perla8788fdc2009-07-27 22:52:03 +0000217void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218{
219 u32 val = 0;
220 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000221 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
222 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000223
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000224 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000225 return;
226
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227 if (arm)
228 val |= 1 << DB_CQ_REARM_SHIFT;
229 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000230 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700233static int be_mac_addr_set(struct net_device *netdev, void *p)
234{
235 struct be_adapter *adapter = netdev_priv(netdev);
236 struct sockaddr *addr = p;
237 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000238 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000239 u32 pmac_id = adapter->pmac_id[0];
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000240 bool active_mac = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000242 if (!is_valid_ether_addr(addr->sa_data))
243 return -EADDRNOTAVAIL;
244
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000245 /* For BE VF, MAC address is already activated by PF.
246 * Hence only operation left is updating netdev->devaddr.
247 * Update it if user is passing the same MAC which was used
248 * during configuring VF MAC from PF(Hypervisor).
249 */
250 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
251 status = be_cmd_mac_addr_query(adapter, current_mac,
252 false, adapter->if_handle, 0);
253 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
254 goto done;
255 else
256 goto err;
257 }
258
259 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
260 goto done;
261
262 /* For Lancer check if any MAC is active.
263 * If active, get its mac id.
264 */
265 if (lancer_chip(adapter) && !be_physfn(adapter))
266 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
267 &pmac_id, 0);
268
269 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
270 adapter->if_handle,
271 &adapter->pmac_id[0], 0);
272
Sathya Perlaa65027e2009-08-17 00:58:04 +0000273 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000274 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700275
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000276 if (active_mac)
277 be_cmd_pmac_del(adapter, adapter->if_handle,
278 pmac_id, 0);
279done:
Somnath Koture3a7ae22011-10-27 07:14:05 +0000280 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
281 return 0;
282err:
283 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700284 return status;
285}
286
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000287static void populate_be2_stats(struct be_adapter *adapter)
288{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000289 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
290 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
291 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000292 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000293 &rxf_stats->port[adapter->port_num];
294 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000295
Sathya Perlaac124ff2011-07-25 19:10:14 +0000296 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000297 drvs->rx_pause_frames = port_stats->rx_pause_frames;
298 drvs->rx_crc_errors = port_stats->rx_crc_errors;
299 drvs->rx_control_frames = port_stats->rx_control_frames;
300 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
301 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
302 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
303 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
304 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
305 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
306 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
307 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
308 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
309 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
310 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000311 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000312 drvs->rx_dropped_header_too_small =
313 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000314 drvs->rx_address_mismatch_drops =
315 port_stats->rx_address_mismatch_drops +
316 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000317 drvs->rx_alignment_symbol_errors =
318 port_stats->rx_alignment_symbol_errors;
319
320 drvs->tx_pauseframes = port_stats->tx_pauseframes;
321 drvs->tx_controlframes = port_stats->tx_controlframes;
322
323 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000324 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000326 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000327 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000328 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000329 drvs->forwarded_packets = rxf_stats->forwarded_packets;
330 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000331 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
332 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000333 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
334}
335
336static void populate_be3_stats(struct be_adapter *adapter)
337{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
339 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
340 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 &rxf_stats->port[adapter->port_num];
343 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000344
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000346 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
347 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348 drvs->rx_pause_frames = port_stats->rx_pause_frames;
349 drvs->rx_crc_errors = port_stats->rx_crc_errors;
350 drvs->rx_control_frames = port_stats->rx_control_frames;
351 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
352 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
353 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
354 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
355 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
356 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
357 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
358 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
359 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
360 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
361 drvs->rx_dropped_header_too_small =
362 port_stats->rx_dropped_header_too_small;
363 drvs->rx_input_fifo_overflow_drop =
364 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000365 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000366 drvs->rx_alignment_symbol_errors =
367 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->tx_pauseframes = port_stats->tx_pauseframes;
370 drvs->tx_controlframes = port_stats->tx_controlframes;
371 drvs->jabber_events = port_stats->jabber_events;
372 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000373 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000374 drvs->forwarded_packets = rxf_stats->forwarded_packets;
375 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000376 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
377 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
379}
380
Selvin Xavier005d5692011-05-16 07:36:35 +0000381static void populate_lancer_stats(struct be_adapter *adapter)
382{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383
Selvin Xavier005d5692011-05-16 07:36:35 +0000384 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000385 struct lancer_pport_stats *pport_stats =
386 pport_stats_from_cmd(adapter);
387
388 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
389 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
390 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
391 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000392 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000393 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000394 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
395 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
396 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
397 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
398 drvs->rx_dropped_tcp_length =
399 pport_stats->rx_dropped_invalid_tcp_length;
400 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
403 drvs->rx_dropped_header_too_small =
404 pport_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000406 drvs->rx_address_mismatch_drops =
407 pport_stats->rx_address_mismatch_drops +
408 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000409 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000410 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
412 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000413 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000414 drvs->forwarded_packets = pport_stats->num_forwards_lo;
415 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000416 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000417 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000418}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419
Sathya Perla09c1c682011-08-22 19:41:53 +0000420static void accumulate_16bit_val(u32 *acc, u16 val)
421{
422#define lo(x) (x & 0xFFFF)
423#define hi(x) (x & 0xFFFF0000)
424 bool wrapped = val < lo(*acc);
425 u32 newacc = hi(*acc) + val;
426
427 if (wrapped)
428 newacc += 65536;
429 ACCESS_ONCE(*acc) = newacc;
430}
431
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432void be_parse_stats(struct be_adapter *adapter)
433{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000434 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
435 struct be_rx_obj *rxo;
436 int i;
437
Selvin Xavier005d5692011-05-16 07:36:35 +0000438 if (adapter->generation == BE_GEN3) {
439 if (lancer_chip(adapter))
440 populate_lancer_stats(adapter);
441 else
442 populate_be3_stats(adapter);
443 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000444 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000445 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000446
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000447 if (lancer_chip(adapter))
448 goto done;
449
Sathya Perlaac124ff2011-07-25 19:10:14 +0000450 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000451 for_all_rx_queues(adapter, rxo, i) {
452 /* below erx HW counter can actually wrap around after
453 * 65535. Driver accumulates a 32-bit value
454 */
455 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
456 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
457 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000458done:
459 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000460}
461
Sathya Perlaab1594e2011-07-25 19:10:15 +0000462static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
463 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700464{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000466 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700467 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000468 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000469 u64 pkts, bytes;
470 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700471 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700472
Sathya Perla3abcded2010-10-03 22:12:27 -0700473 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000474 const struct be_rx_stats *rx_stats = rx_stats(rxo);
475 do {
476 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
477 pkts = rx_stats(rxo)->rx_pkts;
478 bytes = rx_stats(rxo)->rx_bytes;
479 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
480 stats->rx_packets += pkts;
481 stats->rx_bytes += bytes;
482 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
483 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
484 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700485 }
486
Sathya Perla3c8def92011-06-12 20:01:58 +0000487 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 const struct be_tx_stats *tx_stats = tx_stats(txo);
489 do {
490 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
491 pkts = tx_stats(txo)->tx_pkts;
492 bytes = tx_stats(txo)->tx_bytes;
493 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
494 stats->tx_packets += pkts;
495 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000496 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497
498 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_alignment_symbol_errors +
501 drvs->rx_in_range_errors +
502 drvs->rx_out_range_errors +
503 drvs->rx_frame_too_long +
504 drvs->rx_dropped_too_small +
505 drvs->rx_dropped_too_short +
506 drvs->rx_dropped_header_too_small +
507 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000508 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700509
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700510 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000511 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000512 drvs->rx_out_range_errors +
513 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000514
Sathya Perlaab1594e2011-07-25 19:10:15 +0000515 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700516
517 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000518 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000519
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700520 /* receiver fifo overrun */
521 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000522 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000523 drvs->rx_input_fifo_overflow_drop +
524 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700526}
527
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000528void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700529{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 struct net_device *netdev = adapter->netdev;
531
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000532 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000533 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000534 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700535 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000536
537 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
538 netif_carrier_on(netdev);
539 else
540 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700541}
542
Sathya Perla3c8def92011-06-12 20:01:58 +0000543static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000544 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700545{
Sathya Perla3c8def92011-06-12 20:01:58 +0000546 struct be_tx_stats *stats = tx_stats(txo);
547
Sathya Perlaab1594e2011-07-25 19:10:15 +0000548 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000549 stats->tx_reqs++;
550 stats->tx_wrbs += wrb_cnt;
551 stats->tx_bytes += copied;
552 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700553 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000554 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000555 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700556}
557
558/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000559static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
560 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700562 int cnt = (skb->len > skb->data_len);
563
564 cnt += skb_shinfo(skb)->nr_frags;
565
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700566 /* to account for hdr wrb */
567 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000568 if (lancer_chip(adapter) || !(cnt & 1)) {
569 *dummy = false;
570 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700571 /* add a dummy to make it an even num */
572 cnt++;
573 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000574 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700575 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
576 return cnt;
577}
578
579static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
580{
581 wrb->frag_pa_hi = upper_32_bits(addr);
582 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
583 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000584 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700585}
586
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000587static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
588 struct sk_buff *skb)
589{
590 u8 vlan_prio;
591 u16 vlan_tag;
592
593 vlan_tag = vlan_tx_tag_get(skb);
594 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
595 /* If vlan priority provided by OS is NOT in available bmap */
596 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
597 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
598 adapter->recommended_prio;
599
600 return vlan_tag;
601}
602
Somnath Kotur93040ae2012-06-26 22:32:10 +0000603static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
604{
605 return vlan_tx_tag_present(skb) || adapter->pvid;
606}
607
Somnath Koturcc4ce022010-10-21 07:11:14 -0700608static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
609 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000611 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700612
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613 memset(hdr, 0, sizeof(*hdr));
614
615 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
616
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000617 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
620 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000621 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000622 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000623 if (lancer_chip(adapter) && adapter->sli_family ==
624 LANCER_A0_SLI_FAMILY) {
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
626 if (is_tcp_pkt(skb))
627 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
628 tcpcs, hdr, 1);
629 else if (is_udp_pkt(skb))
630 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
631 udpcs, hdr, 1);
632 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
634 if (is_tcp_pkt(skb))
635 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
636 else if (is_udp_pkt(skb))
637 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
638 }
639
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700640 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000642 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700643 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644 }
645
646 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
647 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
648 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
649 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
650}
651
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000652static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000653 bool unmap_single)
654{
655 dma_addr_t dma;
656
657 be_dws_le_to_cpu(wrb, sizeof(*wrb));
658
659 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000660 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000661 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000662 dma_unmap_single(dev, dma, wrb->frag_len,
663 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000664 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000665 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000666 }
667}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668
Sathya Perla3c8def92011-06-12 20:01:58 +0000669static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
671{
Sathya Perla7101e112010-03-22 20:41:12 +0000672 dma_addr_t busaddr;
673 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000674 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700675 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676 struct be_eth_wrb *wrb;
677 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000678 bool map_single = false;
679 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 hdr = queue_head_node(txq);
682 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000683 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684
David S. Millerebc8d2a2009-06-09 01:01:31 -0700685 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700686 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000687 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
688 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000689 goto dma_err;
690 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700691 wrb = queue_head_node(txq);
692 wrb_fill(wrb, busaddr, len);
693 be_dws_cpu_to_le(wrb, sizeof(*wrb));
694 queue_head_inc(txq);
695 copied += len;
696 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697
David S. Millerebc8d2a2009-06-09 01:01:31 -0700698 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000699 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700700 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000701 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000702 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000703 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000704 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700705 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000706 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700707 be_dws_cpu_to_le(wrb, sizeof(*wrb));
708 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000709 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710 }
711
712 if (dummy_wrb) {
713 wrb = queue_head_node(txq);
714 wrb_fill(wrb, 0, 0);
715 be_dws_cpu_to_le(wrb, sizeof(*wrb));
716 queue_head_inc(txq);
717 }
718
Somnath Koturcc4ce022010-10-21 07:11:14 -0700719 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700720 be_dws_cpu_to_le(hdr, sizeof(*hdr));
721
722 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000723dma_err:
724 txq->head = map_head;
725 while (copied) {
726 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000727 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000728 map_single = false;
729 copied -= wrb->frag_len;
730 queue_head_inc(txq);
731 }
732 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700733}
734
Somnath Kotur93040ae2012-06-26 22:32:10 +0000735static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
736 struct sk_buff *skb)
737{
738 u16 vlan_tag = 0;
739
740 skb = skb_share_check(skb, GFP_ATOMIC);
741 if (unlikely(!skb))
742 return skb;
743
744 if (vlan_tx_tag_present(skb)) {
745 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
746 __vlan_put_tag(skb, vlan_tag);
747 skb->vlan_tci = 0;
748 }
749
750 return skb;
751}
752
Stephen Hemminger613573252009-08-31 19:50:58 +0000753static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700754 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700755{
756 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000757 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
758 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000759 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700760 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000761 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700762 bool dummy_wrb, stopped = false;
763
Somnath Kotur93040ae2012-06-26 22:32:10 +0000764 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
765 VLAN_ETH_HLEN : ETH_HLEN;
766
767 /* HW has a bug which considers padding bytes as legal
768 * and modifies the IPv4 hdr's 'tot_len' field
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000769 */
Somnath Kotur93040ae2012-06-26 22:32:10 +0000770 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
771 is_ipv4_pkt(skb)) {
772 ip = (struct iphdr *)ip_hdr(skb);
773 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
774 }
775
776 /* HW has a bug wherein it will calculate CSUM for VLAN
777 * pkts even though it is disabled.
778 * Manually insert VLAN in pkt.
779 */
780 if (skb->ip_summed != CHECKSUM_PARTIAL &&
781 be_vlan_tag_chk(adapter, skb)) {
782 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000783 if (unlikely(!skb))
784 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000785 }
786
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000787 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788
Sathya Perla3c8def92011-06-12 20:01:58 +0000789 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000790 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000791 int gso_segs = skb_shinfo(skb)->gso_segs;
792
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000793 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000794 BUG_ON(txo->sent_skb_list[start]);
795 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700796
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000797 /* Ensure txq has space for the next skb; Else stop the queue
798 * *BEFORE* ringing the tx doorbell, so that we serialze the
799 * tx compls of the current transmit which'll wake up the queue
800 */
Sathya Perla7101e112010-03-22 20:41:12 +0000801 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000802 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
803 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000804 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000805 stopped = true;
806 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700807
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000808 be_txq_notify(adapter, txq->id, wrb_cnt);
809
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000810 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000811 } else {
812 txq->head = start;
813 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000815tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700816 return NETDEV_TX_OK;
817}
818
819static int be_change_mtu(struct net_device *netdev, int new_mtu)
820{
821 struct be_adapter *adapter = netdev_priv(netdev);
822 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000823 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
824 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700825 dev_info(&adapter->pdev->dev,
826 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000827 BE_MIN_MTU,
828 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700829 return -EINVAL;
830 }
831 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
832 netdev->mtu, new_mtu);
833 netdev->mtu = new_mtu;
834 return 0;
835}
836
837/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000838 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
839 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700840 */
Sathya Perla10329df2012-06-05 19:37:18 +0000841static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700842{
Sathya Perla10329df2012-06-05 19:37:18 +0000843 u16 vids[BE_NUM_VLANS_SUPPORTED];
844 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000845 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000846
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000847 /* No need to further configure vids if in promiscuous mode */
848 if (adapter->promiscuous)
849 return 0;
850
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000851 if (adapter->vlans_added > adapter->max_vlans)
852 goto set_vlan_promisc;
853
854 /* Construct VLAN Table to give to HW */
855 for (i = 0; i < VLAN_N_VID; i++)
856 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000857 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000858
859 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000860 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000861
862 /* Set to VLAN promisc mode as setting VLAN filter failed */
863 if (status) {
864 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
865 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
866 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700867 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000868
Sathya Perlab31c50a2009-09-17 10:30:13 -0700869 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000870
871set_vlan_promisc:
872 status = be_cmd_vlan_config(adapter, adapter->if_handle,
873 NULL, 0, 1, 1);
874 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700875}
876
Jiri Pirko8e586132011-12-08 19:52:37 -0500877static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700878{
879 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000880 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700881
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000882 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000883 status = -EINVAL;
884 goto ret;
885 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000886
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000887 /* Packets with VID 0 are always received by Lancer by default */
888 if (lancer_chip(adapter) && vid == 0)
889 goto ret;
890
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700891 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000892 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000893 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500894
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000895 if (!status)
896 adapter->vlans_added++;
897 else
898 adapter->vlan_tag[vid] = 0;
899ret:
900 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700901}
902
Jiri Pirko8e586132011-12-08 19:52:37 -0500903static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700904{
905 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000906 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700907
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000908 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000909 status = -EINVAL;
910 goto ret;
911 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000912
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000913 /* Packets with VID 0 are always received by Lancer by default */
914 if (lancer_chip(adapter) && vid == 0)
915 goto ret;
916
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700917 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000918 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000919 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500920
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000921 if (!status)
922 adapter->vlans_added--;
923 else
924 adapter->vlan_tag[vid] = 1;
925ret:
926 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700927}
928
Sathya Perlaa54769f2011-10-24 02:45:00 +0000929static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700930{
931 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000932 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700933
934 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000935 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000936 adapter->promiscuous = true;
937 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700938 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000939
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300940 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000941 if (adapter->promiscuous) {
942 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000943 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000944
945 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000946 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000947 }
948
Sathya Perlae7b909a2009-11-22 22:01:10 +0000949 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000950 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +0000951 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000952 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000953 goto done;
954 }
955
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000956 if (netdev_uc_count(netdev) != adapter->uc_macs) {
957 struct netdev_hw_addr *ha;
958 int i = 1; /* First slot is claimed by the Primary MAC */
959
960 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
961 be_cmd_pmac_del(adapter, adapter->if_handle,
962 adapter->pmac_id[i], 0);
963 }
964
965 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
966 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
967 adapter->promiscuous = true;
968 goto done;
969 }
970
971 netdev_for_each_uc_addr(ha, adapter->netdev) {
972 adapter->uc_macs++; /* First slot is for Primary MAC */
973 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
974 adapter->if_handle,
975 &adapter->pmac_id[adapter->uc_macs], 0);
976 }
977 }
978
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000979 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
980
981 /* Set to MCAST promisc mode if setting MULTICAST address fails */
982 if (status) {
983 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
984 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
985 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
986 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000987done:
988 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700989}
990
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000991static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
992{
993 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000994 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000995 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000996 bool active_mac = false;
997 u32 pmac_id;
998 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000999
Sathya Perla11ac75e2011-12-13 00:58:50 +00001000 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001001 return -EPERM;
1002
Sathya Perla11ac75e2011-12-13 00:58:50 +00001003 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001004 return -EINVAL;
1005
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001006 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001007 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1008 &pmac_id, vf + 1);
1009 if (!status && active_mac)
1010 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1011 pmac_id, vf + 1);
1012
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001013 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1014 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001015 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1016 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001017
Sathya Perla11ac75e2011-12-13 00:58:50 +00001018 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1019 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001020 }
1021
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001022 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001023 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1024 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001025 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001026 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001027
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001028 return status;
1029}
1030
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001031static int be_get_vf_config(struct net_device *netdev, int vf,
1032 struct ifla_vf_info *vi)
1033{
1034 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001035 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001036
Sathya Perla11ac75e2011-12-13 00:58:50 +00001037 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001038 return -EPERM;
1039
Sathya Perla11ac75e2011-12-13 00:58:50 +00001040 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001041 return -EINVAL;
1042
1043 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001044 vi->tx_rate = vf_cfg->tx_rate;
1045 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001046 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001047 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001048
1049 return 0;
1050}
1051
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001052static int be_set_vf_vlan(struct net_device *netdev,
1053 int vf, u16 vlan, u8 qos)
1054{
1055 struct be_adapter *adapter = netdev_priv(netdev);
1056 int status = 0;
1057
Sathya Perla11ac75e2011-12-13 00:58:50 +00001058 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001059 return -EPERM;
1060
Sathya Perla11ac75e2011-12-13 00:58:50 +00001061 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001062 return -EINVAL;
1063
1064 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001065 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1066 /* If this is new value, program it. Else skip. */
1067 adapter->vf_cfg[vf].vlan_tag = vlan;
1068
1069 status = be_cmd_set_hsw_config(adapter, vlan,
1070 vf + 1, adapter->vf_cfg[vf].if_handle);
1071 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001072 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001073 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001074 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001075 vlan = adapter->vf_cfg[vf].def_vid;
1076 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1077 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001078 }
1079
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001080
1081 if (status)
1082 dev_info(&adapter->pdev->dev,
1083 "VLAN %d config on VF %d failed\n", vlan, vf);
1084 return status;
1085}
1086
Ajit Khapardee1d18732010-07-23 01:52:13 +00001087static int be_set_vf_tx_rate(struct net_device *netdev,
1088 int vf, int rate)
1089{
1090 struct be_adapter *adapter = netdev_priv(netdev);
1091 int status = 0;
1092
Sathya Perla11ac75e2011-12-13 00:58:50 +00001093 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001094 return -EPERM;
1095
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001096 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001097 return -EINVAL;
1098
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001099 if (rate < 100 || rate > 10000) {
1100 dev_err(&adapter->pdev->dev,
1101 "tx rate must be between 100 and 10000 Mbps\n");
1102 return -EINVAL;
1103 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001104
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001105 if (lancer_chip(adapter))
1106 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1107 else
1108 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001109
1110 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001111 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001112 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001113 else
1114 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001115 return status;
1116}
1117
Sathya Perla39f1d942012-05-08 19:41:24 +00001118static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1119{
1120 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001121 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001122 u16 offset, stride;
1123
1124 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001125 if (!pos)
1126 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001127 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1128 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1129
1130 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1131 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001132 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001133 vfs++;
1134 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1135 assigned_vfs++;
1136 }
1137 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1138 }
1139 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1140}
1141
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001142static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001144 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001145 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001146 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001147 u64 pkts;
1148 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001149
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001150 if (!eqo->enable_aic) {
1151 eqd = eqo->eqd;
1152 goto modify_eqd;
1153 }
1154
1155 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001156 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001157
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001158 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1159
Sathya Perla4097f662009-03-24 16:40:13 -07001160 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001161 if (time_before(now, stats->rx_jiffies)) {
1162 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001163 return;
1164 }
1165
Sathya Perlaac124ff2011-07-25 19:10:14 +00001166 /* Update once a second */
1167 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001168 return;
1169
Sathya Perlaab1594e2011-07-25 19:10:15 +00001170 do {
1171 start = u64_stats_fetch_begin_bh(&stats->sync);
1172 pkts = stats->rx_pkts;
1173 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1174
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001175 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001176 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001177 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001178 eqd = (stats->rx_pps / 110000) << 3;
1179 eqd = min(eqd, eqo->max_eqd);
1180 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001181 if (eqd < 10)
1182 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001183
1184modify_eqd:
1185 if (eqd != eqo->cur_eqd) {
1186 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1187 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001188 }
Sathya Perla4097f662009-03-24 16:40:13 -07001189}
1190
Sathya Perla3abcded2010-10-03 22:12:27 -07001191static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001192 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001193{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001194 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001195
Sathya Perlaab1594e2011-07-25 19:10:15 +00001196 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001197 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001198 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001199 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001200 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001201 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001202 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001203 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001204 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205}
1206
Sathya Perla2e588f82011-03-11 02:49:26 +00001207static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001208{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001209 /* L4 checksum is not reliable for non TCP/UDP packets.
1210 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001211 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1212 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001213}
1214
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001215static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1216 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001218 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001219 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001220 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001221
Sathya Perla3abcded2010-10-03 22:12:27 -07001222 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001223 BUG_ON(!rx_page_info->page);
1224
Ajit Khaparde205859a2010-02-09 01:34:21 +00001225 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001226 dma_unmap_page(&adapter->pdev->dev,
1227 dma_unmap_addr(rx_page_info, bus),
1228 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001229 rx_page_info->last_page_user = false;
1230 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001231
1232 atomic_dec(&rxq->used);
1233 return rx_page_info;
1234}
1235
1236/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001237static void be_rx_compl_discard(struct be_rx_obj *rxo,
1238 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001239{
Sathya Perla3abcded2010-10-03 22:12:27 -07001240 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001241 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001242 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001243
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001244 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001245 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001246 put_page(page_info->page);
1247 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001248 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001249 }
1250}
1251
1252/*
1253 * skb_fill_rx_data forms a complete skb for an ether frame
1254 * indicated by rxcp.
1255 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001256static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1257 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258{
Sathya Perla3abcded2010-10-03 22:12:27 -07001259 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001261 u16 i, j;
1262 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001263 u8 *start;
1264
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001265 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001266 start = page_address(page_info->page) + page_info->page_offset;
1267 prefetch(start);
1268
1269 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001270 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001271
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001272 skb->len = curr_frag_len;
1273 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001274 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001275 /* Complete packet has now been moved to data */
1276 put_page(page_info->page);
1277 skb->data_len = 0;
1278 skb->tail += curr_frag_len;
1279 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001280 hdr_len = ETH_HLEN;
1281 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001282 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001283 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001284 skb_shinfo(skb)->frags[0].page_offset =
1285 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001286 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001287 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001288 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001289 skb->tail += hdr_len;
1290 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001291 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001292
Sathya Perla2e588f82011-03-11 02:49:26 +00001293 if (rxcp->pkt_size <= rx_frag_size) {
1294 BUG_ON(rxcp->num_rcvd != 1);
1295 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001296 }
1297
1298 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001299 index_inc(&rxcp->rxq_idx, rxq->len);
1300 remaining = rxcp->pkt_size - curr_frag_len;
1301 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001302 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001303 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001305 /* Coalesce all frags from the same physical page in one slot */
1306 if (page_info->page_offset == 0) {
1307 /* Fresh page */
1308 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001309 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001310 skb_shinfo(skb)->frags[j].page_offset =
1311 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001312 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001313 skb_shinfo(skb)->nr_frags++;
1314 } else {
1315 put_page(page_info->page);
1316 }
1317
Eric Dumazet9e903e02011-10-18 21:00:24 +00001318 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001319 skb->len += curr_frag_len;
1320 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001321 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001322 remaining -= curr_frag_len;
1323 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001324 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001325 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001326 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001327}
1328
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001329/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001330static void be_rx_compl_process(struct be_rx_obj *rxo,
1331 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001332{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001333 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001334 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001335 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001336
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001337 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001338 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001339 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001340 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001341 return;
1342 }
1343
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001344 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001345
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001346 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001347 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001348 else
1349 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001350
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001351 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001352 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001353 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001354 skb->rxhash = rxcp->rss_hash;
1355
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001356
Jiri Pirko343e43c2011-08-25 02:50:51 +00001357 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001358 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1359
1360 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001361}
1362
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001363/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001364void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1365 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001366{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001367 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001368 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001369 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001370 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001371 u16 remaining, curr_frag_len;
1372 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001373
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001374 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001375 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001376 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001377 return;
1378 }
1379
Sathya Perla2e588f82011-03-11 02:49:26 +00001380 remaining = rxcp->pkt_size;
1381 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001382 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001383
1384 curr_frag_len = min(remaining, rx_frag_size);
1385
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001386 /* Coalesce all frags from the same physical page in one slot */
1387 if (i == 0 || page_info->page_offset == 0) {
1388 /* First frag or Fresh page */
1389 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001390 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001391 skb_shinfo(skb)->frags[j].page_offset =
1392 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001393 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001394 } else {
1395 put_page(page_info->page);
1396 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001397 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001398 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001399 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001400 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001401 memset(page_info, 0, sizeof(*page_info));
1402 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001403 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001404
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001405 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001406 skb->len = rxcp->pkt_size;
1407 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001408 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001409 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001410 if (adapter->netdev->features & NETIF_F_RXHASH)
1411 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001412
Jiri Pirko343e43c2011-08-25 02:50:51 +00001413 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001414 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1415
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001416 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417}
1418
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001419static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1420 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001421{
Sathya Perla2e588f82011-03-11 02:49:26 +00001422 rxcp->pkt_size =
1423 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1424 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1425 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1426 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001427 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001428 rxcp->ip_csum =
1429 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1430 rxcp->l4_csum =
1431 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1432 rxcp->ipv6 =
1433 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1434 rxcp->rxq_idx =
1435 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1436 rxcp->num_rcvd =
1437 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1438 rxcp->pkt_type =
1439 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001440 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001441 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001442 if (rxcp->vlanf) {
1443 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001444 compl);
1445 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1446 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001447 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001448 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001449}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001450
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001451static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1452 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001453{
1454 rxcp->pkt_size =
1455 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1456 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1457 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1458 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001459 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001460 rxcp->ip_csum =
1461 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1462 rxcp->l4_csum =
1463 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1464 rxcp->ipv6 =
1465 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1466 rxcp->rxq_idx =
1467 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1468 rxcp->num_rcvd =
1469 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1470 rxcp->pkt_type =
1471 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001472 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001473 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001474 if (rxcp->vlanf) {
1475 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001476 compl);
1477 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1478 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001479 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001480 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001481}
1482
1483static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1484{
1485 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1486 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1487 struct be_adapter *adapter = rxo->adapter;
1488
1489 /* For checking the valid bit it is Ok to use either definition as the
1490 * valid bit is at the same position in both v0 and v1 Rx compl */
1491 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492 return NULL;
1493
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001494 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001495 be_dws_le_to_cpu(compl, sizeof(*compl));
1496
1497 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001498 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001499 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001500 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001501
Sathya Perla15d72182011-03-21 20:49:26 +00001502 if (rxcp->vlanf) {
1503 /* vlanf could be wrongly set in some cards.
1504 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001505 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001506 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001507
Sathya Perla15d72182011-03-21 20:49:26 +00001508 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001509 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001510
Somnath Kotur939cf302011-08-18 21:51:49 -07001511 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001512 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001513 rxcp->vlanf = 0;
1514 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001515
1516 /* As the compl has been parsed, reset it; we wont touch it again */
1517 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001518
Sathya Perla3abcded2010-10-03 22:12:27 -07001519 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520 return rxcp;
1521}
1522
Eric Dumazet1829b082011-03-01 05:48:12 +00001523static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001525 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001526
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001527 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001528 gfp |= __GFP_COMP;
1529 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001530}
1531
1532/*
1533 * Allocate a page, split it to fragments of size rx_frag_size and post as
1534 * receive buffers to BE
1535 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001536static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001537{
Sathya Perla3abcded2010-10-03 22:12:27 -07001538 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001539 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001540 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541 struct page *pagep = NULL;
1542 struct be_eth_rx_d *rxd;
1543 u64 page_dmaaddr = 0, frag_dmaaddr;
1544 u32 posted, page_offset = 0;
1545
Sathya Perla3abcded2010-10-03 22:12:27 -07001546 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1548 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001549 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001551 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552 break;
1553 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001554 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1555 0, adapter->big_page_size,
1556 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001557 page_info->page_offset = 0;
1558 } else {
1559 get_page(pagep);
1560 page_info->page_offset = page_offset + rx_frag_size;
1561 }
1562 page_offset = page_info->page_offset;
1563 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001564 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1566
1567 rxd = queue_head_node(rxq);
1568 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1569 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001570
1571 /* Any space left in the current big page for another frag? */
1572 if ((page_offset + rx_frag_size + rx_frag_size) >
1573 adapter->big_page_size) {
1574 pagep = NULL;
1575 page_info->last_page_user = true;
1576 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001577
1578 prev_page_info = page_info;
1579 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001580 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581 }
1582 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001583 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001584
1585 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001586 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001587 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001588 } else if (atomic_read(&rxq->used) == 0) {
1589 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001590 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001591 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001592}
1593
Sathya Perla5fb379e2009-06-18 00:02:59 +00001594static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001595{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1597
1598 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1599 return NULL;
1600
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001601 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1603
1604 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1605
1606 queue_tail_inc(tx_cq);
1607 return txcp;
1608}
1609
Sathya Perla3c8def92011-06-12 20:01:58 +00001610static u16 be_tx_compl_process(struct be_adapter *adapter,
1611 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612{
Sathya Perla3c8def92011-06-12 20:01:58 +00001613 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001614 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001615 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001616 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001617 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1618 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001619
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001620 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001622 sent_skbs[txq->tail] = NULL;
1623
1624 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001625 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001626
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001627 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001628 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001629 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001630 unmap_tx_frag(&adapter->pdev->dev, wrb,
1631 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001632 unmap_skb_hdr = false;
1633
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001634 num_wrbs++;
1635 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001636 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001638 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001639 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001640}
1641
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001642/* Return the number of events in the event queue */
1643static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001644{
1645 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001646 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001647
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001648 do {
1649 eqe = queue_tail_node(&eqo->q);
1650 if (eqe->evt == 0)
1651 break;
1652
1653 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001654 eqe->evt = 0;
1655 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001656 queue_tail_inc(&eqo->q);
1657 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001658
1659 return num;
1660}
1661
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001662static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001663{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001664 bool rearm = false;
1665 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001666
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001667 /* Deal with any spurious interrupts that come without events */
1668 if (!num)
1669 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001670
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001671 if (num || msix_enabled(eqo->adapter))
1672 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1673
Sathya Perla859b1e42009-08-10 03:43:51 +00001674 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001675 napi_schedule(&eqo->napi);
1676
1677 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001678}
1679
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001680/* Leaves the EQ is disarmed state */
1681static void be_eq_clean(struct be_eq_obj *eqo)
1682{
1683 int num = events_get(eqo);
1684
1685 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1686}
1687
1688static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001689{
1690 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001691 struct be_queue_info *rxq = &rxo->q;
1692 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001693 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001694 u16 tail;
1695
1696 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001697 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001698 be_rx_compl_discard(rxo, rxcp);
1699 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001700 }
1701
1702 /* Then free posted rx buffer that were not used */
1703 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001704 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001705 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001706 put_page(page_info->page);
1707 memset(page_info, 0, sizeof(*page_info));
1708 }
1709 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001710 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001711}
1712
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001713static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001714{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001715 struct be_tx_obj *txo;
1716 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001717 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001718 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001719 struct sk_buff *sent_skb;
1720 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001721 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722
Sathya Perlaa8e91792009-08-10 03:42:43 +00001723 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1724 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001725 pending_txqs = adapter->num_tx_qs;
1726
1727 for_all_tx_queues(adapter, txo, i) {
1728 txq = &txo->q;
1729 while ((txcp = be_tx_compl_get(&txo->cq))) {
1730 end_idx =
1731 AMAP_GET_BITS(struct amap_eth_tx_compl,
1732 wrb_index, txcp);
1733 num_wrbs += be_tx_compl_process(adapter, txo,
1734 end_idx);
1735 cmpl++;
1736 }
1737 if (cmpl) {
1738 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1739 atomic_sub(num_wrbs, &txq->used);
1740 cmpl = 0;
1741 num_wrbs = 0;
1742 }
1743 if (atomic_read(&txq->used) == 0)
1744 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001745 }
1746
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001747 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001748 break;
1749
1750 mdelay(1);
1751 } while (true);
1752
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001753 for_all_tx_queues(adapter, txo, i) {
1754 txq = &txo->q;
1755 if (atomic_read(&txq->used))
1756 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1757 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001758
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001759 /* free posted tx for which compls will never arrive */
1760 while (atomic_read(&txq->used)) {
1761 sent_skb = txo->sent_skb_list[txq->tail];
1762 end_idx = txq->tail;
1763 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1764 &dummy_wrb);
1765 index_adv(&end_idx, num_wrbs - 1, txq->len);
1766 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1767 atomic_sub(num_wrbs, &txq->used);
1768 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001769 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001770}
1771
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001772static void be_evt_queues_destroy(struct be_adapter *adapter)
1773{
1774 struct be_eq_obj *eqo;
1775 int i;
1776
1777 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001778 if (eqo->q.created) {
1779 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001780 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001781 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001782 be_queue_free(adapter, &eqo->q);
1783 }
1784}
1785
1786static int be_evt_queues_create(struct be_adapter *adapter)
1787{
1788 struct be_queue_info *eq;
1789 struct be_eq_obj *eqo;
1790 int i, rc;
1791
1792 adapter->num_evt_qs = num_irqs(adapter);
1793
1794 for_all_evt_queues(adapter, eqo, i) {
1795 eqo->adapter = adapter;
1796 eqo->tx_budget = BE_TX_BUDGET;
1797 eqo->idx = i;
1798 eqo->max_eqd = BE_MAX_EQD;
1799 eqo->enable_aic = true;
1800
1801 eq = &eqo->q;
1802 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1803 sizeof(struct be_eq_entry));
1804 if (rc)
1805 return rc;
1806
1807 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1808 if (rc)
1809 return rc;
1810 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001811 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001812}
1813
Sathya Perla5fb379e2009-06-18 00:02:59 +00001814static void be_mcc_queues_destroy(struct be_adapter *adapter)
1815{
1816 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001817
Sathya Perla8788fdc2009-07-27 22:52:03 +00001818 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001819 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001820 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001821 be_queue_free(adapter, q);
1822
Sathya Perla8788fdc2009-07-27 22:52:03 +00001823 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001824 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001825 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001826 be_queue_free(adapter, q);
1827}
1828
1829/* Must be called only after TX qs are created as MCC shares TX EQ */
1830static int be_mcc_queues_create(struct be_adapter *adapter)
1831{
1832 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001833
Sathya Perla8788fdc2009-07-27 22:52:03 +00001834 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001835 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001836 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001837 goto err;
1838
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001839 /* Use the default EQ for MCC completions */
1840 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001841 goto mcc_cq_free;
1842
Sathya Perla8788fdc2009-07-27 22:52:03 +00001843 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001844 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1845 goto mcc_cq_destroy;
1846
Sathya Perla8788fdc2009-07-27 22:52:03 +00001847 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001848 goto mcc_q_free;
1849
1850 return 0;
1851
1852mcc_q_free:
1853 be_queue_free(adapter, q);
1854mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001855 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001856mcc_cq_free:
1857 be_queue_free(adapter, cq);
1858err:
1859 return -1;
1860}
1861
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862static void be_tx_queues_destroy(struct be_adapter *adapter)
1863{
1864 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001865 struct be_tx_obj *txo;
1866 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867
Sathya Perla3c8def92011-06-12 20:01:58 +00001868 for_all_tx_queues(adapter, txo, i) {
1869 q = &txo->q;
1870 if (q->created)
1871 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1872 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001873
Sathya Perla3c8def92011-06-12 20:01:58 +00001874 q = &txo->cq;
1875 if (q->created)
1876 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1877 be_queue_free(adapter, q);
1878 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879}
1880
Sathya Perladafc0fe2011-10-24 02:45:02 +00001881static int be_num_txqs_want(struct be_adapter *adapter)
1882{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001883 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1884 be_is_mc(adapter) ||
1885 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perla39f1d942012-05-08 19:41:24 +00001886 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001887 return 1;
1888 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001889 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00001890}
1891
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001892static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001893{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001894 struct be_queue_info *cq, *eq;
1895 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001896 struct be_tx_obj *txo;
1897 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898
Sathya Perladafc0fe2011-10-24 02:45:02 +00001899 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001900 if (adapter->num_tx_qs != MAX_TX_QS) {
1901 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001902 netif_set_real_num_tx_queues(adapter->netdev,
1903 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001904 rtnl_unlock();
1905 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001906
Sathya Perla3c8def92011-06-12 20:01:58 +00001907 for_all_tx_queues(adapter, txo, i) {
1908 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001909 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1910 sizeof(struct be_eth_tx_compl));
1911 if (status)
1912 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001914 /* If num_evt_qs is less than num_tx_qs, then more than
1915 * one txq share an eq
1916 */
1917 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1918 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1919 if (status)
1920 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001921 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923}
1924
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001925static int be_tx_qs_create(struct be_adapter *adapter)
1926{
1927 struct be_tx_obj *txo;
1928 int i, status;
1929
1930 for_all_tx_queues(adapter, txo, i) {
1931 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1932 sizeof(struct be_eth_wrb));
1933 if (status)
1934 return status;
1935
1936 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1937 if (status)
1938 return status;
1939 }
1940
Sathya Perlad3791422012-09-28 04:39:44 +00001941 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1942 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001943 return 0;
1944}
1945
1946static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947{
1948 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001949 struct be_rx_obj *rxo;
1950 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951
Sathya Perla3abcded2010-10-03 22:12:27 -07001952 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001953 q = &rxo->cq;
1954 if (q->created)
1955 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1956 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001957 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001958}
1959
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001960static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001961{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001962 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001963 struct be_rx_obj *rxo;
1964 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001966 /* We'll create as many RSS rings as there are irqs.
1967 * But when there's only one irq there's no use creating RSS rings
1968 */
1969 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1970 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001971 if (adapter->num_rx_qs != MAX_RX_QS) {
1972 rtnl_lock();
1973 netif_set_real_num_rx_queues(adapter->netdev,
1974 adapter->num_rx_qs);
1975 rtnl_unlock();
1976 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001977
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001978 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001979 for_all_rx_queues(adapter, rxo, i) {
1980 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001981 cq = &rxo->cq;
1982 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1983 sizeof(struct be_eth_rx_compl));
1984 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001985 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001987 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1988 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001989 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001990 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001991 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001992
Sathya Perlad3791422012-09-28 04:39:44 +00001993 dev_info(&adapter->pdev->dev,
1994 "created %d RSS queue(s) and 1 default RX queue\n",
1995 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001996 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001997}
1998
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001999static irqreturn_t be_intx(int irq, void *dev)
2000{
2001 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002002 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002003
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002004 /* With INTx only one EQ is used */
2005 num_evts = event_handle(&adapter->eq_obj[0]);
2006 if (num_evts)
2007 return IRQ_HANDLED;
2008 else
2009 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002010}
2011
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002012static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002013{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002014 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002016 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002017 return IRQ_HANDLED;
2018}
2019
Sathya Perla2e588f82011-03-11 02:49:26 +00002020static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002021{
Sathya Perla2e588f82011-03-11 02:49:26 +00002022 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002023}
2024
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002025static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2026 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027{
Sathya Perla3abcded2010-10-03 22:12:27 -07002028 struct be_adapter *adapter = rxo->adapter;
2029 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002030 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002031 u32 work_done;
2032
2033 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002034 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035 if (!rxcp)
2036 break;
2037
Sathya Perla12004ae2011-08-02 19:57:46 +00002038 /* Is it a flush compl that has no data */
2039 if (unlikely(rxcp->num_rcvd == 0))
2040 goto loop_continue;
2041
2042 /* Discard compl with partial DMA Lancer B0 */
2043 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002044 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002045 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002046 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002047
Sathya Perla12004ae2011-08-02 19:57:46 +00002048 /* On BE drop pkts that arrive due to imperfect filtering in
2049 * promiscuous mode on some skews
2050 */
2051 if (unlikely(rxcp->port != adapter->port_num &&
2052 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002053 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002054 goto loop_continue;
2055 }
2056
2057 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002058 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002059 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002060 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002061loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002062 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002063 }
2064
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002065 if (work_done) {
2066 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002067
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002068 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2069 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002070 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002071
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002072 return work_done;
2073}
2074
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002075static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2076 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002077{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002078 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002079 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002080
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002081 for (work_done = 0; work_done < budget; work_done++) {
2082 txcp = be_tx_compl_get(&txo->cq);
2083 if (!txcp)
2084 break;
2085 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002086 AMAP_GET_BITS(struct amap_eth_tx_compl,
2087 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002088 }
2089
2090 if (work_done) {
2091 be_cq_notify(adapter, txo->cq.id, true, work_done);
2092 atomic_sub(num_wrbs, &txo->q.used);
2093
2094 /* As Tx wrbs have been freed up, wake up netdev queue
2095 * if it was stopped due to lack of tx wrbs. */
2096 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2097 atomic_read(&txo->q.used) < txo->q.len / 2) {
2098 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002099 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002100
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002101 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2102 tx_stats(txo)->tx_compl += work_done;
2103 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2104 }
2105 return (work_done < budget); /* Done */
2106}
Sathya Perla3c8def92011-06-12 20:01:58 +00002107
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002108int be_poll(struct napi_struct *napi, int budget)
2109{
2110 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2111 struct be_adapter *adapter = eqo->adapter;
2112 int max_work = 0, work, i;
2113 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002114
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002115 /* Process all TXQs serviced by this EQ */
2116 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2117 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2118 eqo->tx_budget, i);
2119 if (!tx_done)
2120 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121 }
2122
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002123 /* This loop will iterate twice for EQ0 in which
2124 * completions of the last RXQ (default one) are also processed
2125 * For other EQs the loop iterates only once
2126 */
2127 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2128 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2129 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002130 }
2131
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002132 if (is_mcc_eqo(eqo))
2133 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002134
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002135 if (max_work < budget) {
2136 napi_complete(napi);
2137 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2138 } else {
2139 /* As we'll continue in polling mode, count and clear events */
2140 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002141 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002142 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002143}
2144
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002145void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002146{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002147 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2148 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002149 u32 i;
2150
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002151 if (be_crit_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002152 return;
2153
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002154 if (lancer_chip(adapter)) {
2155 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2156 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2157 sliport_err1 = ioread32(adapter->db +
2158 SLIPORT_ERROR1_OFFSET);
2159 sliport_err2 = ioread32(adapter->db +
2160 SLIPORT_ERROR2_OFFSET);
2161 }
2162 } else {
2163 pci_read_config_dword(adapter->pdev,
2164 PCICFG_UE_STATUS_LOW, &ue_lo);
2165 pci_read_config_dword(adapter->pdev,
2166 PCICFG_UE_STATUS_HIGH, &ue_hi);
2167 pci_read_config_dword(adapter->pdev,
2168 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2169 pci_read_config_dword(adapter->pdev,
2170 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002171
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002172 ue_lo = (ue_lo & ~ue_lo_mask);
2173 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002174 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002175
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002176 /* On certain platforms BE hardware can indicate spurious UEs.
2177 * Allow the h/w to stop working completely in case of a real UE.
2178 * Hence not setting the hw_error for UE detection.
2179 */
2180 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002181 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002182 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002183 "Error detected in the card\n");
2184 }
2185
2186 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2187 dev_err(&adapter->pdev->dev,
2188 "ERR: sliport status 0x%x\n", sliport_status);
2189 dev_err(&adapter->pdev->dev,
2190 "ERR: sliport error1 0x%x\n", sliport_err1);
2191 dev_err(&adapter->pdev->dev,
2192 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002193 }
2194
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002195 if (ue_lo) {
2196 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2197 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002198 dev_err(&adapter->pdev->dev,
2199 "UE: %s bit set\n", ue_status_low_desc[i]);
2200 }
2201 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002202
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002203 if (ue_hi) {
2204 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2205 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002206 dev_err(&adapter->pdev->dev,
2207 "UE: %s bit set\n", ue_status_hi_desc[i]);
2208 }
2209 }
2210
2211}
2212
Sathya Perla8d56ff12009-11-22 22:02:26 +00002213static void be_msix_disable(struct be_adapter *adapter)
2214{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002215 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002216 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002217 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002218 }
2219}
2220
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002221static uint be_num_rss_want(struct be_adapter *adapter)
2222{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002223 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002224
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002225 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002226 (lancer_chip(adapter) ||
2227 (!sriov_want(adapter) && be_physfn(adapter)))) {
2228 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002229 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2230 }
2231 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002232}
2233
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234static void be_msix_enable(struct be_adapter *adapter)
2235{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002236#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002237 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002238 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002239
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002240 /* If RSS queues are not used, need a vec for default RX Q */
2241 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002242 if (be_roce_supported(adapter)) {
2243 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2244 (num_online_cpus() + 1));
2245 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2246 num_vec += num_roce_vec;
2247 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2248 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002249 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002250
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002251 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002252 adapter->msix_entries[i].entry = i;
2253
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002254 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002255 if (status == 0) {
2256 goto done;
2257 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002258 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002259 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002260 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002261 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002262 }
Sathya Perlad3791422012-09-28 04:39:44 +00002263
2264 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07002265 return;
2266done:
Parav Pandit045508a2012-03-26 14:27:13 +00002267 if (be_roce_supported(adapter)) {
2268 if (num_vec > num_roce_vec) {
2269 adapter->num_msix_vec = num_vec - num_roce_vec;
2270 adapter->num_msix_roce_vec =
2271 num_vec - adapter->num_msix_vec;
2272 } else {
2273 adapter->num_msix_vec = num_vec;
2274 adapter->num_msix_roce_vec = 0;
2275 }
2276 } else
2277 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002278 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002279 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002280}
2281
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002282static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002283 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002284{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002285 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002286}
2287
2288static int be_msix_register(struct be_adapter *adapter)
2289{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002290 struct net_device *netdev = adapter->netdev;
2291 struct be_eq_obj *eqo;
2292 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002294 for_all_evt_queues(adapter, eqo, i) {
2295 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2296 vec = be_msix_vec_get(adapter, eqo);
2297 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002298 if (status)
2299 goto err_msix;
2300 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002301
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002303err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002304 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2305 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2306 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2307 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002308 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309 return status;
2310}
2311
2312static int be_irq_register(struct be_adapter *adapter)
2313{
2314 struct net_device *netdev = adapter->netdev;
2315 int status;
2316
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002317 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002318 status = be_msix_register(adapter);
2319 if (status == 0)
2320 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002321 /* INTx is not supported for VF */
2322 if (!be_physfn(adapter))
2323 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002324 }
2325
2326 /* INTx */
2327 netdev->irq = adapter->pdev->irq;
2328 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2329 adapter);
2330 if (status) {
2331 dev_err(&adapter->pdev->dev,
2332 "INTx request IRQ failed - err %d\n", status);
2333 return status;
2334 }
2335done:
2336 adapter->isr_registered = true;
2337 return 0;
2338}
2339
2340static void be_irq_unregister(struct be_adapter *adapter)
2341{
2342 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002343 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002344 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002345
2346 if (!adapter->isr_registered)
2347 return;
2348
2349 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002350 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002351 free_irq(netdev->irq, adapter);
2352 goto done;
2353 }
2354
2355 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002356 for_all_evt_queues(adapter, eqo, i)
2357 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002358
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002359done:
2360 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002361}
2362
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002363static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002364{
2365 struct be_queue_info *q;
2366 struct be_rx_obj *rxo;
2367 int i;
2368
2369 for_all_rx_queues(adapter, rxo, i) {
2370 q = &rxo->q;
2371 if (q->created) {
2372 be_cmd_rxq_destroy(adapter, q);
2373 /* After the rxq is invalidated, wait for a grace time
2374 * of 1ms for all dma to end and the flush compl to
2375 * arrive
2376 */
2377 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002378 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002379 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002380 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002381 }
2382}
2383
Sathya Perla889cd4b2010-05-30 23:33:45 +00002384static int be_close(struct net_device *netdev)
2385{
2386 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002387 struct be_eq_obj *eqo;
2388 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002389
Parav Pandit045508a2012-03-26 14:27:13 +00002390 be_roce_dev_close(adapter);
2391
Sathya Perla889cd4b2010-05-30 23:33:45 +00002392 be_async_mcc_disable(adapter);
2393
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002394 if (!lancer_chip(adapter))
2395 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002396
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002397 for_all_evt_queues(adapter, eqo, i) {
2398 napi_disable(&eqo->napi);
2399 if (msix_enabled(adapter))
2400 synchronize_irq(be_msix_vec_get(adapter, eqo));
2401 else
2402 synchronize_irq(netdev->irq);
2403 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002404 }
2405
Sathya Perla889cd4b2010-05-30 23:33:45 +00002406 be_irq_unregister(adapter);
2407
Sathya Perla889cd4b2010-05-30 23:33:45 +00002408 /* Wait for all pending tx completions to arrive so that
2409 * all tx skbs are freed.
2410 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002411 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002412
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002413 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002414 return 0;
2415}
2416
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002417static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002418{
2419 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002420 int rc, i, j;
2421 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002422
2423 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002424 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2425 sizeof(struct be_eth_rx_d));
2426 if (rc)
2427 return rc;
2428 }
2429
2430 /* The FW would like the default RXQ to be created first */
2431 rxo = default_rxo(adapter);
2432 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2433 adapter->if_handle, false, &rxo->rss_id);
2434 if (rc)
2435 return rc;
2436
2437 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002438 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002439 rx_frag_size, adapter->if_handle,
2440 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002441 if (rc)
2442 return rc;
2443 }
2444
2445 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002446 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2447 for_all_rss_queues(adapter, rxo, i) {
2448 if ((j + i) >= 128)
2449 break;
2450 rsstable[j + i] = rxo->rss_id;
2451 }
2452 }
2453 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002454 if (rc)
2455 return rc;
2456 }
2457
2458 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002459 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002460 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002461 return 0;
2462}
2463
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002464static int be_open(struct net_device *netdev)
2465{
2466 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002467 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002468 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002469 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002470 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002471 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002472
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002473 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002474 if (status)
2475 goto err;
2476
Sathya Perla5fb379e2009-06-18 00:02:59 +00002477 be_irq_register(adapter);
2478
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002479 if (!lancer_chip(adapter))
2480 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002481
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002482 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002483 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002484
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002485 for_all_tx_queues(adapter, txo, i)
2486 be_cq_notify(adapter, txo->cq.id, true, 0);
2487
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002488 be_async_mcc_enable(adapter);
2489
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002490 for_all_evt_queues(adapter, eqo, i) {
2491 napi_enable(&eqo->napi);
2492 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2493 }
2494
Sathya Perla323ff712012-09-28 04:39:43 +00002495 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002496 if (!status)
2497 be_link_status_update(adapter, link_status);
2498
Parav Pandit045508a2012-03-26 14:27:13 +00002499 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002500 return 0;
2501err:
2502 be_close(adapter->netdev);
2503 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002504}
2505
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002506static int be_setup_wol(struct be_adapter *adapter, bool enable)
2507{
2508 struct be_dma_mem cmd;
2509 int status = 0;
2510 u8 mac[ETH_ALEN];
2511
2512 memset(mac, 0, ETH_ALEN);
2513
2514 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002515 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2516 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002517 if (cmd.va == NULL)
2518 return -1;
2519 memset(cmd.va, 0, cmd.size);
2520
2521 if (enable) {
2522 status = pci_write_config_dword(adapter->pdev,
2523 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2524 if (status) {
2525 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002526 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002527 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2528 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002529 return status;
2530 }
2531 status = be_cmd_enable_magic_wol(adapter,
2532 adapter->netdev->dev_addr, &cmd);
2533 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2534 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2535 } else {
2536 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2537 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2538 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2539 }
2540
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002541 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002542 return status;
2543}
2544
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002545/*
2546 * Generate a seed MAC address from the PF MAC Address using jhash.
2547 * MAC Address for VFs are assigned incrementally starting from the seed.
2548 * These addresses are programmed in the ASIC by the PF and the VF driver
2549 * queries for the MAC address during its probe.
2550 */
2551static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2552{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002553 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002554 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002555 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002556 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002557
2558 be_vf_eth_addr_generate(adapter, mac);
2559
Sathya Perla11ac75e2011-12-13 00:58:50 +00002560 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002561 if (lancer_chip(adapter)) {
2562 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2563 } else {
2564 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002565 vf_cfg->if_handle,
2566 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002567 }
2568
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002569 if (status)
2570 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002571 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002572 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002573 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002574
2575 mac[5] += 1;
2576 }
2577 return status;
2578}
2579
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002580static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002581{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002582 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002583 u32 vf;
2584
Sathya Perla39f1d942012-05-08 19:41:24 +00002585 if (be_find_vfs(adapter, ASSIGNED)) {
2586 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2587 goto done;
2588 }
2589
Sathya Perla11ac75e2011-12-13 00:58:50 +00002590 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002591 if (lancer_chip(adapter))
2592 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2593 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002594 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2595 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002596
Sathya Perla11ac75e2011-12-13 00:58:50 +00002597 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2598 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002599 pci_disable_sriov(adapter->pdev);
2600done:
2601 kfree(adapter->vf_cfg);
2602 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002603}
2604
Sathya Perlaa54769f2011-10-24 02:45:00 +00002605static int be_clear(struct be_adapter *adapter)
2606{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002607 int i = 1;
2608
Sathya Perla191eb752012-02-23 18:50:13 +00002609 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2610 cancel_delayed_work_sync(&adapter->work);
2611 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2612 }
2613
Sathya Perla11ac75e2011-12-13 00:58:50 +00002614 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002615 be_vf_clear(adapter);
2616
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002617 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2618 be_cmd_pmac_del(adapter, adapter->if_handle,
2619 adapter->pmac_id[i], 0);
2620
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002621 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002622
2623 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002624 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002625 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002626 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002627
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002628 kfree(adapter->pmac_id);
2629 adapter->pmac_id = NULL;
2630
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002631 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002632 return 0;
2633}
2634
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002635static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2636 u32 *cap_flags, u8 domain)
2637{
2638 bool profile_present = false;
2639 int status;
2640
2641 if (lancer_chip(adapter)) {
2642 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2643 if (!status)
2644 profile_present = true;
2645 }
2646
2647 if (!profile_present)
2648 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2649 BE_IF_FLAGS_MULTICAST;
2650}
2651
Sathya Perla39f1d942012-05-08 19:41:24 +00002652static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002653{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002654 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002655 int vf;
2656
Sathya Perla39f1d942012-05-08 19:41:24 +00002657 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2658 GFP_KERNEL);
2659 if (!adapter->vf_cfg)
2660 return -ENOMEM;
2661
Sathya Perla11ac75e2011-12-13 00:58:50 +00002662 for_all_vfs(adapter, vf_cfg, vf) {
2663 vf_cfg->if_handle = -1;
2664 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002665 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002666 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002667}
2668
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002669static int be_vf_setup(struct be_adapter *adapter)
2670{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002671 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002672 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002673 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002674 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002675 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002676
Sathya Perla39f1d942012-05-08 19:41:24 +00002677 enabled_vfs = be_find_vfs(adapter, ENABLED);
2678 if (enabled_vfs) {
2679 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2680 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2681 return 0;
2682 }
2683
2684 if (num_vfs > adapter->dev_num_vfs) {
2685 dev_warn(dev, "Device supports %d VFs and not %d\n",
2686 adapter->dev_num_vfs, num_vfs);
2687 num_vfs = adapter->dev_num_vfs;
2688 }
2689
2690 status = pci_enable_sriov(adapter->pdev, num_vfs);
2691 if (!status) {
2692 adapter->num_vfs = num_vfs;
2693 } else {
2694 /* Platform doesn't support SRIOV though device supports it */
2695 dev_warn(dev, "SRIOV enable failed\n");
2696 return 0;
2697 }
2698
2699 status = be_vf_setup_init(adapter);
2700 if (status)
2701 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002702
Sathya Perla11ac75e2011-12-13 00:58:50 +00002703 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002704 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2705
2706 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2707 BE_IF_FLAGS_BROADCAST |
2708 BE_IF_FLAGS_MULTICAST);
2709
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002710 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2711 &vf_cfg->if_handle, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002712 if (status)
2713 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002714 }
2715
Sathya Perla39f1d942012-05-08 19:41:24 +00002716 if (!enabled_vfs) {
2717 status = be_vf_eth_addr_config(adapter);
2718 if (status)
2719 goto err;
2720 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002721
Sathya Perla11ac75e2011-12-13 00:58:50 +00002722 for_all_vfs(adapter, vf_cfg, vf) {
Vasundhara Volam8a046d32012-08-28 20:37:42 +00002723 lnk_speed = 1000;
2724 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002725 if (status)
2726 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002727 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002728
2729 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2730 vf + 1, vf_cfg->if_handle);
2731 if (status)
2732 goto err;
2733 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002734 }
2735 return 0;
2736err:
2737 return status;
2738}
2739
Sathya Perla30128032011-11-10 19:17:57 +00002740static void be_setup_init(struct be_adapter *adapter)
2741{
2742 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002743 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002744 adapter->if_handle = -1;
2745 adapter->be3_native = false;
2746 adapter->promiscuous = false;
2747 adapter->eq_next_idx = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002748
2749 if (be_physfn(adapter))
2750 adapter->cmd_privileges = MAX_PRIVILEGES;
2751 else
2752 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002753}
2754
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002755static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2756 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002757{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002758 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002759
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002760 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2761 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2762 if (!lancer_chip(adapter) && !be_physfn(adapter))
2763 *active_mac = true;
2764 else
2765 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002766
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002767 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002768 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002769
2770 if (lancer_chip(adapter)) {
2771 status = be_cmd_get_mac_from_list(adapter, mac,
2772 active_mac, pmac_id, 0);
2773 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002774 status = be_cmd_mac_addr_query(adapter, mac, false,
2775 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002776 }
2777 } else if (be_physfn(adapter)) {
2778 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002779 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002780 *active_mac = false;
2781 } else {
2782 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002783 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002784 if_handle, 0);
2785 *active_mac = true;
2786 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002787 return status;
2788}
2789
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002790static void be_get_resources(struct be_adapter *adapter)
2791{
2792 int status;
2793 bool profile_present = false;
2794
2795 if (lancer_chip(adapter)) {
2796 status = be_cmd_get_func_config(adapter);
2797
2798 if (!status)
2799 profile_present = true;
2800 }
2801
2802 if (profile_present) {
2803 /* Sanity fixes for Lancer */
2804 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2805 BE_UC_PMAC_COUNT);
2806 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2807 BE_NUM_VLANS_SUPPORTED);
2808 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2809 BE_MAX_MC);
2810 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2811 MAX_TX_QS);
2812 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2813 BE3_MAX_RSS_QS);
2814 adapter->max_event_queues = min_t(u16,
2815 adapter->max_event_queues,
2816 BE3_MAX_RSS_QS);
2817
2818 if (adapter->max_rss_queues &&
2819 adapter->max_rss_queues == adapter->max_rx_queues)
2820 adapter->max_rss_queues -= 1;
2821
2822 if (adapter->max_event_queues < adapter->max_rss_queues)
2823 adapter->max_rss_queues = adapter->max_event_queues;
2824
2825 } else {
2826 if (be_physfn(adapter))
2827 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2828 else
2829 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2830
2831 if (adapter->function_mode & FLEX10_MODE)
2832 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2833 else
2834 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2835
2836 adapter->max_mcast_mac = BE_MAX_MC;
2837 adapter->max_tx_queues = MAX_TX_QS;
2838 adapter->max_rss_queues = (adapter->be3_native) ?
2839 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2840 adapter->max_event_queues = BE3_MAX_RSS_QS;
2841
2842 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2843 BE_IF_FLAGS_BROADCAST |
2844 BE_IF_FLAGS_MULTICAST |
2845 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2846 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2847 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2848 BE_IF_FLAGS_PROMISCUOUS;
2849
2850 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2851 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2852 }
2853}
2854
Sathya Perla39f1d942012-05-08 19:41:24 +00002855/* Routine to query per function resource limits */
2856static int be_get_config(struct be_adapter *adapter)
2857{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002858 int pos, status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002859 u16 dev_num_vfs;
2860
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002861 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2862 &adapter->function_mode,
2863 &adapter->function_caps);
2864 if (status)
2865 goto err;
2866
2867 be_get_resources(adapter);
2868
2869 /* primary mac needs 1 pmac entry */
2870 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2871 sizeof(u32), GFP_KERNEL);
2872 if (!adapter->pmac_id) {
2873 status = -ENOMEM;
2874 goto err;
2875 }
2876
Sathya Perla39f1d942012-05-08 19:41:24 +00002877 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2878 if (pos) {
2879 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2880 &dev_num_vfs);
Vasundhara Volam7c5a5242012-08-28 20:37:41 +00002881 if (!lancer_chip(adapter))
2882 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
Sathya Perla39f1d942012-05-08 19:41:24 +00002883 adapter->dev_num_vfs = dev_num_vfs;
2884 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002885err:
2886 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002887}
2888
Sathya Perla5fb379e2009-06-18 00:02:59 +00002889static int be_setup(struct be_adapter *adapter)
2890{
Sathya Perla39f1d942012-05-08 19:41:24 +00002891 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002892 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002893 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002894 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002895 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002896 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002897
Sathya Perla30128032011-11-10 19:17:57 +00002898 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002899
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002900 if (!lancer_chip(adapter))
2901 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00002902
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002903 status = be_get_config(adapter);
2904 if (status)
2905 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002906
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002907 be_msix_enable(adapter);
2908
2909 status = be_evt_queues_create(adapter);
2910 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002911 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002912
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002913 status = be_tx_cqs_create(adapter);
2914 if (status)
2915 goto err;
2916
2917 status = be_rx_cqs_create(adapter);
2918 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002919 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002920
Sathya Perla5fb379e2009-06-18 00:02:59 +00002921 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002922 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002923 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002924
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002925 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
2926 /* In UMC mode FW does not return right privileges.
2927 * Override with correct privilege equivalent to PF.
2928 */
2929 if (be_is_mc(adapter))
2930 adapter->cmd_privileges = MAX_PRIVILEGES;
2931
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002932 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2933 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002934
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002935 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002936 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002937
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002938 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00002939
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002940 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002941 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002942 if (status != 0)
2943 goto err;
2944
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002945 memset(mac, 0, ETH_ALEN);
2946 active_mac = false;
2947 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2948 &active_mac, &adapter->pmac_id[0]);
2949 if (status != 0)
2950 goto err;
2951
2952 if (!active_mac) {
2953 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2954 &adapter->pmac_id[0], 0);
2955 if (status != 0)
2956 goto err;
2957 }
2958
2959 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2960 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2961 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002962 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002963
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002964 status = be_tx_qs_create(adapter);
2965 if (status)
2966 goto err;
2967
Sathya Perla04b71172011-09-27 13:30:27 -04002968 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002969
Sathya Perla1d1e9a42012-06-05 19:37:17 +00002970 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00002971 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002972
2973 be_set_rx_mode(adapter->netdev);
2974
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002975 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002976
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002977 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2978 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002979 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002980
Sathya Perla39f1d942012-05-08 19:41:24 +00002981 if (be_physfn(adapter) && num_vfs) {
2982 if (adapter->dev_num_vfs)
2983 be_vf_setup(adapter);
2984 else
2985 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002986 }
2987
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002988 status = be_cmd_get_phy_info(adapter);
2989 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002990 adapter->phy.fc_autoneg = 1;
2991
Sathya Perla191eb752012-02-23 18:50:13 +00002992 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2993 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002994 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002995err:
2996 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002997 return status;
2998}
2999
Ivan Vecera66268732011-12-08 01:31:21 +00003000#ifdef CONFIG_NET_POLL_CONTROLLER
3001static void be_netpoll(struct net_device *netdev)
3002{
3003 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003004 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003005 int i;
3006
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003007 for_all_evt_queues(adapter, eqo, i)
3008 event_handle(eqo);
3009
3010 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003011}
3012#endif
3013
Ajit Khaparde84517482009-09-04 03:12:16 +00003014#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003015char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3016
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003017static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003018 const u8 *p, u32 img_start, int image_size,
3019 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003020{
3021 u32 crc_offset;
3022 u8 flashed_crc[4];
3023 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003024
3025 crc_offset = hdr_size + img_start + image_size - 4;
3026
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003027 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003028
3029 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003030 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003031 if (status) {
3032 dev_err(&adapter->pdev->dev,
3033 "could not get crc from flash, not flashing redboot\n");
3034 return false;
3035 }
3036
3037 /*update redboot only if crc does not match*/
3038 if (!memcmp(flashed_crc, p, 4))
3039 return false;
3040 else
3041 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003042}
3043
Sathya Perla306f1342011-08-02 19:57:45 +00003044static bool phy_flashing_required(struct be_adapter *adapter)
3045{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003046 return (adapter->phy.phy_type == TN_8022 &&
3047 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003048}
3049
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003050static bool is_comp_in_ufi(struct be_adapter *adapter,
3051 struct flash_section_info *fsec, int type)
3052{
3053 int i = 0, img_type = 0;
3054 struct flash_section_info_g2 *fsec_g2 = NULL;
3055
3056 if (adapter->generation != BE_GEN3)
3057 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3058
3059 for (i = 0; i < MAX_FLASH_COMP; i++) {
3060 if (fsec_g2)
3061 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3062 else
3063 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3064
3065 if (img_type == type)
3066 return true;
3067 }
3068 return false;
3069
3070}
3071
3072struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3073 int header_size,
3074 const struct firmware *fw)
3075{
3076 struct flash_section_info *fsec = NULL;
3077 const u8 *p = fw->data;
3078
3079 p += header_size;
3080 while (p < (fw->data + fw->size)) {
3081 fsec = (struct flash_section_info *)p;
3082 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3083 return fsec;
3084 p += 32;
3085 }
3086 return NULL;
3087}
3088
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003089static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003090 const struct firmware *fw,
3091 struct be_dma_mem *flash_cmd,
3092 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003093
Ajit Khaparde84517482009-09-04 03:12:16 +00003094{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003095 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003096 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003097 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00003098 int num_bytes;
3099 const u8 *p = fw->data;
3100 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08003101 const struct flash_comp *pflashcomp;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003102 int num_comp, hdr_size;
3103 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003104
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003105 struct flash_comp gen3_flash_types[] = {
3106 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3107 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3108 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3109 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3110 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3111 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3112 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3113 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3114 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3115 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3116 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3117 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3118 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3119 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3120 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3121 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3122 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3123 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3124 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3125 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003126 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003127
3128 struct flash_comp gen2_flash_types[] = {
3129 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3130 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3131 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3132 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3133 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3134 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3135 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3136 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3137 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3138 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3139 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3140 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3141 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3142 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3143 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3144 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003145 };
3146
3147 if (adapter->generation == BE_GEN3) {
3148 pflashcomp = gen3_flash_types;
3149 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003150 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003151 } else {
3152 pflashcomp = gen2_flash_types;
3153 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003154 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003155 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003156 /* Get flash section info*/
3157 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3158 if (!fsec) {
3159 dev_err(&adapter->pdev->dev,
3160 "Invalid Cookie. UFI corrupted ?\n");
3161 return -1;
3162 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003163 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003164 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003165 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003166
3167 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3168 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3169 continue;
3170
3171 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
Sathya Perla306f1342011-08-02 19:57:45 +00003172 if (!phy_flashing_required(adapter))
3173 continue;
3174 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003175
3176 hdr_size = filehdr_size +
3177 (num_of_images * sizeof(struct image_hdr));
3178
3179 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3180 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3181 pflashcomp[i].size, hdr_size)))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003182 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003183
3184 /* Flash the component */
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003185 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003186 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003187 if (p + pflashcomp[i].size > fw->data + fw->size)
3188 return -1;
3189 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003190 while (total_bytes) {
3191 if (total_bytes > 32*1024)
3192 num_bytes = 32*1024;
3193 else
3194 num_bytes = total_bytes;
3195 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00003196 if (!total_bytes) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003197 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003198 flash_op = FLASHROM_OPER_PHY_FLASH;
3199 else
3200 flash_op = FLASHROM_OPER_FLASH;
3201 } else {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003202 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003203 flash_op = FLASHROM_OPER_PHY_SAVE;
3204 else
3205 flash_op = FLASHROM_OPER_SAVE;
3206 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003207 memcpy(req->params.data_buf, p, num_bytes);
3208 p += num_bytes;
3209 status = be_cmd_write_flashrom(adapter, flash_cmd,
3210 pflashcomp[i].optype, flash_op, num_bytes);
3211 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00003212 if ((status == ILLEGAL_IOCTL_REQ) &&
3213 (pflashcomp[i].optype ==
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003214 OPTYPE_PHY_FW))
Sathya Perla306f1342011-08-02 19:57:45 +00003215 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003216 dev_err(&adapter->pdev->dev,
3217 "cmd to write to flash rom failed.\n");
3218 return -1;
3219 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003220 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003221 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003222 return 0;
3223}
3224
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003225static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3226{
3227 if (fhdr == NULL)
3228 return 0;
3229 if (fhdr->build[0] == '3')
3230 return BE_GEN3;
3231 else if (fhdr->build[0] == '2')
3232 return BE_GEN2;
3233 else
3234 return 0;
3235}
3236
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003237static int lancer_wait_idle(struct be_adapter *adapter)
3238{
3239#define SLIPORT_IDLE_TIMEOUT 30
3240 u32 reg_val;
3241 int status = 0, i;
3242
3243 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3244 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3245 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3246 break;
3247
3248 ssleep(1);
3249 }
3250
3251 if (i == SLIPORT_IDLE_TIMEOUT)
3252 status = -1;
3253
3254 return status;
3255}
3256
3257static int lancer_fw_reset(struct be_adapter *adapter)
3258{
3259 int status = 0;
3260
3261 status = lancer_wait_idle(adapter);
3262 if (status)
3263 return status;
3264
3265 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3266 PHYSDEV_CONTROL_OFFSET);
3267
3268 return status;
3269}
3270
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003271static int lancer_fw_download(struct be_adapter *adapter,
3272 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003273{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003274#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3275#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3276 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003277 const u8 *data_ptr = NULL;
3278 u8 *dest_image_ptr = NULL;
3279 size_t image_size = 0;
3280 u32 chunk_size = 0;
3281 u32 data_written = 0;
3282 u32 offset = 0;
3283 int status = 0;
3284 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003285 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003286
3287 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3288 dev_err(&adapter->pdev->dev,
3289 "FW Image not properly aligned. "
3290 "Length must be 4 byte aligned.\n");
3291 status = -EINVAL;
3292 goto lancer_fw_exit;
3293 }
3294
3295 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3296 + LANCER_FW_DOWNLOAD_CHUNK;
3297 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3298 &flash_cmd.dma, GFP_KERNEL);
3299 if (!flash_cmd.va) {
3300 status = -ENOMEM;
3301 dev_err(&adapter->pdev->dev,
3302 "Memory allocation failure while flashing\n");
3303 goto lancer_fw_exit;
3304 }
3305
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003306 dest_image_ptr = flash_cmd.va +
3307 sizeof(struct lancer_cmd_req_write_object);
3308 image_size = fw->size;
3309 data_ptr = fw->data;
3310
3311 while (image_size) {
3312 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3313
3314 /* Copy the image chunk content. */
3315 memcpy(dest_image_ptr, data_ptr, chunk_size);
3316
3317 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003318 chunk_size, offset,
3319 LANCER_FW_DOWNLOAD_LOCATION,
3320 &data_written, &change_status,
3321 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003322 if (status)
3323 break;
3324
3325 offset += data_written;
3326 data_ptr += data_written;
3327 image_size -= data_written;
3328 }
3329
3330 if (!status) {
3331 /* Commit the FW written */
3332 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003333 0, offset,
3334 LANCER_FW_DOWNLOAD_LOCATION,
3335 &data_written, &change_status,
3336 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003337 }
3338
3339 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3340 flash_cmd.dma);
3341 if (status) {
3342 dev_err(&adapter->pdev->dev,
3343 "Firmware load error. "
3344 "Status code: 0x%x Additional Status: 0x%x\n",
3345 status, add_status);
3346 goto lancer_fw_exit;
3347 }
3348
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003349 if (change_status == LANCER_FW_RESET_NEEDED) {
3350 status = lancer_fw_reset(adapter);
3351 if (status) {
3352 dev_err(&adapter->pdev->dev,
3353 "Adapter busy for FW reset.\n"
3354 "New FW will not be active.\n");
3355 goto lancer_fw_exit;
3356 }
3357 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3358 dev_err(&adapter->pdev->dev,
3359 "System reboot required for new FW"
3360 " to be active\n");
3361 }
3362
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003363 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3364lancer_fw_exit:
3365 return status;
3366}
3367
3368static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3369{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003370 struct flash_file_hdr_g2 *fhdr;
3371 struct flash_file_hdr_g3 *fhdr3;
3372 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003373 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003374 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003375 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003376
3377 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003378 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003379
Ajit Khaparde84517482009-09-04 03:12:16 +00003380 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003381 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3382 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003383 if (!flash_cmd.va) {
3384 status = -ENOMEM;
3385 dev_err(&adapter->pdev->dev,
3386 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003387 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003388 }
3389
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003390 if ((adapter->generation == BE_GEN3) &&
3391 (get_ufigen_type(fhdr) == BE_GEN3)) {
3392 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003393 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3394 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003395 img_hdr_ptr = (struct image_hdr *) (fw->data +
3396 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003397 i * sizeof(struct image_hdr)));
3398 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3399 status = be_flash_data(adapter, fw, &flash_cmd,
3400 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003401 }
3402 } else if ((adapter->generation == BE_GEN2) &&
3403 (get_ufigen_type(fhdr) == BE_GEN2)) {
3404 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3405 } else {
3406 dev_err(&adapter->pdev->dev,
3407 "UFI and Interface are not compatible for flashing\n");
3408 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003409 }
3410
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003411 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3412 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003413 if (status) {
3414 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003415 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003416 }
3417
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003418 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003419
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003420be_fw_exit:
3421 return status;
3422}
3423
3424int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3425{
3426 const struct firmware *fw;
3427 int status;
3428
3429 if (!netif_running(adapter->netdev)) {
3430 dev_err(&adapter->pdev->dev,
3431 "Firmware load not allowed (interface is down)\n");
3432 return -1;
3433 }
3434
3435 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3436 if (status)
3437 goto fw_exit;
3438
3439 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3440
3441 if (lancer_chip(adapter))
3442 status = lancer_fw_download(adapter, fw);
3443 else
3444 status = be_fw_download(adapter, fw);
3445
Ajit Khaparde84517482009-09-04 03:12:16 +00003446fw_exit:
3447 release_firmware(fw);
3448 return status;
3449}
3450
stephen hemmingere5686ad2012-01-05 19:10:25 +00003451static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003452 .ndo_open = be_open,
3453 .ndo_stop = be_close,
3454 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003455 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003456 .ndo_set_mac_address = be_mac_addr_set,
3457 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003458 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003459 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003460 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3461 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003462 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003463 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003464 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003465 .ndo_get_vf_config = be_get_vf_config,
3466#ifdef CONFIG_NET_POLL_CONTROLLER
3467 .ndo_poll_controller = be_netpoll,
3468#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003469};
3470
3471static void be_netdev_init(struct net_device *netdev)
3472{
3473 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003474 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003475 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003476
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003477 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003478 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3479 NETIF_F_HW_VLAN_TX;
3480 if (be_multi_rxq(adapter))
3481 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003482
3483 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003484 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003485
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003486 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003487 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003488
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003489 netdev->priv_flags |= IFF_UNICAST_FLT;
3490
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003491 netdev->flags |= IFF_MULTICAST;
3492
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003493 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003494
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003495 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003496
3497 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3498
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003499 for_all_evt_queues(adapter, eqo, i)
3500 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003501}
3502
3503static void be_unmap_pci_bars(struct be_adapter *adapter)
3504{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003505 if (adapter->csr)
3506 iounmap(adapter->csr);
3507 if (adapter->db)
3508 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003509 if (adapter->roce_db.base)
3510 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3511}
3512
3513static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3514{
3515 struct pci_dev *pdev = adapter->pdev;
3516 u8 __iomem *addr;
3517
3518 addr = pci_iomap(pdev, 2, 0);
3519 if (addr == NULL)
3520 return -ENOMEM;
3521
3522 adapter->roce_db.base = addr;
3523 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3524 adapter->roce_db.size = 8192;
3525 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3526 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003527}
3528
3529static int be_map_pci_bars(struct be_adapter *adapter)
3530{
3531 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003532 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003533
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003534 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003535 if (be_type_2_3(adapter)) {
3536 addr = ioremap_nocache(
3537 pci_resource_start(adapter->pdev, 0),
3538 pci_resource_len(adapter->pdev, 0));
3539 if (addr == NULL)
3540 return -ENOMEM;
3541 adapter->db = addr;
3542 }
3543 if (adapter->if_type == SLI_INTF_TYPE_3) {
3544 if (lancer_roce_map_pci_bars(adapter))
3545 goto pci_map_err;
3546 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003547 return 0;
3548 }
3549
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003550 if (be_physfn(adapter)) {
3551 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3552 pci_resource_len(adapter->pdev, 2));
3553 if (addr == NULL)
3554 return -ENOMEM;
3555 adapter->csr = addr;
3556 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003557
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003558 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003559 db_reg = 4;
3560 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003561 if (be_physfn(adapter))
3562 db_reg = 4;
3563 else
3564 db_reg = 0;
3565 }
3566 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3567 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003568 if (addr == NULL)
3569 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003570 adapter->db = addr;
Parav Pandit045508a2012-03-26 14:27:13 +00003571 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3572 adapter->roce_db.size = 4096;
3573 adapter->roce_db.io_addr =
3574 pci_resource_start(adapter->pdev, db_reg);
3575 adapter->roce_db.total_size =
3576 pci_resource_len(adapter->pdev, db_reg);
3577 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003578 return 0;
3579pci_map_err:
3580 be_unmap_pci_bars(adapter);
3581 return -ENOMEM;
3582}
3583
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003584static void be_ctrl_cleanup(struct be_adapter *adapter)
3585{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003586 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003587
3588 be_unmap_pci_bars(adapter);
3589
3590 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003591 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3592 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003593
Sathya Perla5b8821b2011-08-02 19:57:44 +00003594 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003595 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003596 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3597 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003598}
3599
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003600static int be_ctrl_init(struct be_adapter *adapter)
3601{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003602 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3603 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003604 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003605 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003606
3607 status = be_map_pci_bars(adapter);
3608 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003609 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003610
3611 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003612 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3613 mbox_mem_alloc->size,
3614 &mbox_mem_alloc->dma,
3615 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003616 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003617 status = -ENOMEM;
3618 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003619 }
3620 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3621 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3622 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3623 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003624
Sathya Perla5b8821b2011-08-02 19:57:44 +00003625 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3626 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3627 &rx_filter->dma, GFP_KERNEL);
3628 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003629 status = -ENOMEM;
3630 goto free_mbox;
3631 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003632 memset(rx_filter->va, 0, rx_filter->size);
Ivan Vecera29849612010-12-14 05:43:19 +00003633 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003634 spin_lock_init(&adapter->mcc_lock);
3635 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003636
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003637 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003638 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003639 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003640
3641free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003642 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3643 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003644
3645unmap_pci_bars:
3646 be_unmap_pci_bars(adapter);
3647
3648done:
3649 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003650}
3651
3652static void be_stats_cleanup(struct be_adapter *adapter)
3653{
Sathya Perla3abcded2010-10-03 22:12:27 -07003654 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003655
3656 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003657 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3658 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003659}
3660
3661static int be_stats_init(struct be_adapter *adapter)
3662{
Sathya Perla3abcded2010-10-03 22:12:27 -07003663 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003664
Selvin Xavier005d5692011-05-16 07:36:35 +00003665 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003666 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003667 } else {
3668 if (lancer_chip(adapter))
3669 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3670 else
3671 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3672 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003673 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3674 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003675 if (cmd->va == NULL)
3676 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003677 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003678 return 0;
3679}
3680
3681static void __devexit be_remove(struct pci_dev *pdev)
3682{
3683 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003684
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003685 if (!adapter)
3686 return;
3687
Parav Pandit045508a2012-03-26 14:27:13 +00003688 be_roce_dev_remove(adapter);
3689
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003690 cancel_delayed_work_sync(&adapter->func_recovery_work);
3691
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003692 unregister_netdev(adapter->netdev);
3693
Sathya Perla5fb379e2009-06-18 00:02:59 +00003694 be_clear(adapter);
3695
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003696 /* tell fw we're done with firing cmds */
3697 be_cmd_fw_clean(adapter);
3698
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003699 be_stats_cleanup(adapter);
3700
3701 be_ctrl_cleanup(adapter);
3702
Sathya Perlad6b6d982012-09-05 01:56:48 +00003703 pci_disable_pcie_error_reporting(pdev);
3704
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003705 pci_set_drvdata(pdev, NULL);
3706 pci_release_regions(pdev);
3707 pci_disable_device(pdev);
3708
3709 free_netdev(adapter->netdev);
3710}
3711
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003712bool be_is_wol_supported(struct be_adapter *adapter)
3713{
3714 return ((adapter->wol_cap & BE_WOL_CAP) &&
3715 !be_is_wol_excluded(adapter)) ? true : false;
3716}
3717
Somnath Kotur941a77d2012-05-17 22:59:03 +00003718u32 be_get_fw_log_level(struct be_adapter *adapter)
3719{
3720 struct be_dma_mem extfat_cmd;
3721 struct be_fat_conf_params *cfgs;
3722 int status;
3723 u32 level = 0;
3724 int j;
3725
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003726 if (lancer_chip(adapter))
3727 return 0;
3728
Somnath Kotur941a77d2012-05-17 22:59:03 +00003729 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3730 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3731 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3732 &extfat_cmd.dma);
3733
3734 if (!extfat_cmd.va) {
3735 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3736 __func__);
3737 goto err;
3738 }
3739
3740 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3741 if (!status) {
3742 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3743 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00003744 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00003745 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3746 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3747 }
3748 }
3749 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3750 extfat_cmd.dma);
3751err:
3752 return level;
3753}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003754
Sathya Perla39f1d942012-05-08 19:41:24 +00003755static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003756{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003757 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003758 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003759
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003760 status = be_cmd_get_cntl_attributes(adapter);
3761 if (status)
3762 return status;
3763
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003764 status = be_cmd_get_acpi_wol_cap(adapter);
3765 if (status) {
3766 /* in case of a failure to get wol capabillities
3767 * check the exclusion list to determine WOL capability */
3768 if (!be_is_wol_excluded(adapter))
3769 adapter->wol_cap |= BE_WOL_CAP;
3770 }
3771
3772 if (be_is_wol_supported(adapter))
3773 adapter->wol = true;
3774
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003775 /* Must be a power of 2 or else MODULO will BUG_ON */
3776 adapter->be_get_temp_freq = 64;
3777
Somnath Kotur941a77d2012-05-17 22:59:03 +00003778 level = be_get_fw_log_level(adapter);
3779 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3780
Sathya Perla2243e2e2009-11-22 22:02:03 +00003781 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003782}
3783
Sathya Perla39f1d942012-05-08 19:41:24 +00003784static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003785{
3786 struct pci_dev *pdev = adapter->pdev;
3787 u32 sli_intf = 0, if_type;
3788
3789 switch (pdev->device) {
3790 case BE_DEVICE_ID1:
3791 case OC_DEVICE_ID1:
3792 adapter->generation = BE_GEN2;
3793 break;
3794 case BE_DEVICE_ID2:
3795 case OC_DEVICE_ID2:
3796 adapter->generation = BE_GEN3;
3797 break;
3798 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003799 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003800 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003801 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3802 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003803 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3804 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003805 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003806 !be_type_2_3(adapter)) {
3807 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3808 return -EINVAL;
3809 }
3810 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3811 SLI_INTF_FAMILY_SHIFT);
3812 adapter->generation = BE_GEN3;
3813 break;
3814 case OC_DEVICE_ID5:
3815 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3816 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003817 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3818 return -EINVAL;
3819 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003820 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3821 SLI_INTF_FAMILY_SHIFT);
3822 adapter->generation = BE_GEN3;
3823 break;
3824 default:
3825 adapter->generation = 0;
3826 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003827
3828 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3829 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003830 return 0;
3831}
3832
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003833static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003834{
3835 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003836
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003837 status = lancer_test_and_set_rdy_state(adapter);
3838 if (status)
3839 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003840
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003841 if (netif_running(adapter->netdev))
3842 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003843
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003844 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003845
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003846 adapter->hw_error = false;
3847 adapter->fw_timeout = false;
3848
3849 status = be_setup(adapter);
3850 if (status)
3851 goto err;
3852
3853 if (netif_running(adapter->netdev)) {
3854 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003855 if (status)
3856 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003857 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003858
3859 dev_err(&adapter->pdev->dev,
3860 "Adapter SLIPORT recovery succeeded\n");
3861 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003862err:
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +00003863 if (adapter->eeh_error)
3864 dev_err(&adapter->pdev->dev,
3865 "Adapter SLIPORT recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003866
3867 return status;
3868}
3869
3870static void be_func_recovery_task(struct work_struct *work)
3871{
3872 struct be_adapter *adapter =
3873 container_of(work, struct be_adapter, func_recovery_work.work);
3874 int status;
3875
3876 be_detect_error(adapter);
3877
3878 if (adapter->hw_error && lancer_chip(adapter)) {
3879
3880 if (adapter->eeh_error)
3881 goto out;
3882
3883 rtnl_lock();
3884 netif_device_detach(adapter->netdev);
3885 rtnl_unlock();
3886
3887 status = lancer_recover_func(adapter);
3888
3889 if (!status)
3890 netif_device_attach(adapter->netdev);
3891 }
3892
3893out:
3894 schedule_delayed_work(&adapter->func_recovery_work,
3895 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003896}
3897
3898static void be_worker(struct work_struct *work)
3899{
3900 struct be_adapter *adapter =
3901 container_of(work, struct be_adapter, work.work);
3902 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003903 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003904 int i;
3905
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003906 /* when interrupts are not yet enabled, just reap any pending
3907 * mcc completions */
3908 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00003909 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003910 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00003911 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003912 goto reschedule;
3913 }
3914
3915 if (!adapter->stats_cmd_sent) {
3916 if (lancer_chip(adapter))
3917 lancer_cmd_get_pport_stats(adapter,
3918 &adapter->stats_cmd);
3919 else
3920 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3921 }
3922
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003923 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3924 be_cmd_get_die_temperature(adapter);
3925
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003926 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003927 if (rxo->rx_post_starved) {
3928 rxo->rx_post_starved = false;
3929 be_post_rx_frags(rxo, GFP_KERNEL);
3930 }
3931 }
3932
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003933 for_all_evt_queues(adapter, eqo, i)
3934 be_eqd_update(adapter, eqo);
3935
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003936reschedule:
3937 adapter->work_counter++;
3938 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3939}
3940
Sathya Perla39f1d942012-05-08 19:41:24 +00003941static bool be_reset_required(struct be_adapter *adapter)
3942{
Sathya Perlad79c0a22012-06-05 19:37:22 +00003943 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00003944}
3945
Sathya Perlad3791422012-09-28 04:39:44 +00003946static char *mc_name(struct be_adapter *adapter)
3947{
3948 if (adapter->function_mode & FLEX10_MODE)
3949 return "FLEX10";
3950 else if (adapter->function_mode & VNIC_MODE)
3951 return "vNIC";
3952 else if (adapter->function_mode & UMC_ENABLED)
3953 return "UMC";
3954 else
3955 return "";
3956}
3957
3958static inline char *func_name(struct be_adapter *adapter)
3959{
3960 return be_physfn(adapter) ? "PF" : "VF";
3961}
3962
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003963static int __devinit be_probe(struct pci_dev *pdev,
3964 const struct pci_device_id *pdev_id)
3965{
3966 int status = 0;
3967 struct be_adapter *adapter;
3968 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003969 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003970
3971 status = pci_enable_device(pdev);
3972 if (status)
3973 goto do_none;
3974
3975 status = pci_request_regions(pdev, DRV_NAME);
3976 if (status)
3977 goto disable_dev;
3978 pci_set_master(pdev);
3979
Sathya Perla7f640062012-06-05 19:37:20 +00003980 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003981 if (netdev == NULL) {
3982 status = -ENOMEM;
3983 goto rel_reg;
3984 }
3985 adapter = netdev_priv(netdev);
3986 adapter->pdev = pdev;
3987 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003988
Sathya Perla39f1d942012-05-08 19:41:24 +00003989 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003990 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003991 goto free_netdev;
3992
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003993 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003994 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003995
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003996 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003997 if (!status) {
3998 netdev->features |= NETIF_F_HIGHDMA;
3999 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004000 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004001 if (status) {
4002 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4003 goto free_netdev;
4004 }
4005 }
4006
Sathya Perlad6b6d982012-09-05 01:56:48 +00004007 status = pci_enable_pcie_error_reporting(pdev);
4008 if (status)
4009 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4010
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004011 status = be_ctrl_init(adapter);
4012 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004013 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004014
Sathya Perla2243e2e2009-11-22 22:02:03 +00004015 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004016 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004017 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004018 if (status)
4019 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004020 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004021
4022 /* tell fw we're ready to fire cmds */
4023 status = be_cmd_fw_init(adapter);
4024 if (status)
4025 goto ctrl_clean;
4026
Sathya Perla39f1d942012-05-08 19:41:24 +00004027 if (be_reset_required(adapter)) {
4028 status = be_cmd_reset_function(adapter);
4029 if (status)
4030 goto ctrl_clean;
4031 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004032
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004033 /* The INTR bit may be set in the card when probed by a kdump kernel
4034 * after a crash.
4035 */
4036 if (!lancer_chip(adapter))
4037 be_intr_set(adapter, false);
4038
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004039 status = be_stats_init(adapter);
4040 if (status)
4041 goto ctrl_clean;
4042
Sathya Perla39f1d942012-05-08 19:41:24 +00004043 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004044 if (status)
4045 goto stats_clean;
4046
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004047 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004048 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004049 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004050
Sathya Perla5fb379e2009-06-18 00:02:59 +00004051 status = be_setup(adapter);
4052 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004053 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004054
Sathya Perla3abcded2010-10-03 22:12:27 -07004055 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004056 status = register_netdev(netdev);
4057 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004058 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004059
Parav Pandit045508a2012-03-26 14:27:13 +00004060 be_roce_dev_add(adapter);
4061
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004062 schedule_delayed_work(&adapter->func_recovery_work,
4063 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004064
4065 be_cmd_query_port_name(adapter, &port_name);
4066
Sathya Perlad3791422012-09-28 04:39:44 +00004067 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4068 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004069
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004070 return 0;
4071
Sathya Perla5fb379e2009-06-18 00:02:59 +00004072unsetup:
4073 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004074stats_clean:
4075 be_stats_cleanup(adapter);
4076ctrl_clean:
4077 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004078free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004079 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004080 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004081rel_reg:
4082 pci_release_regions(pdev);
4083disable_dev:
4084 pci_disable_device(pdev);
4085do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004086 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004087 return status;
4088}
4089
4090static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4091{
4092 struct be_adapter *adapter = pci_get_drvdata(pdev);
4093 struct net_device *netdev = adapter->netdev;
4094
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004095 if (adapter->wol)
4096 be_setup_wol(adapter, true);
4097
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004098 cancel_delayed_work_sync(&adapter->func_recovery_work);
4099
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004100 netif_device_detach(netdev);
4101 if (netif_running(netdev)) {
4102 rtnl_lock();
4103 be_close(netdev);
4104 rtnl_unlock();
4105 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004106 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004107
4108 pci_save_state(pdev);
4109 pci_disable_device(pdev);
4110 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4111 return 0;
4112}
4113
4114static int be_resume(struct pci_dev *pdev)
4115{
4116 int status = 0;
4117 struct be_adapter *adapter = pci_get_drvdata(pdev);
4118 struct net_device *netdev = adapter->netdev;
4119
4120 netif_device_detach(netdev);
4121
4122 status = pci_enable_device(pdev);
4123 if (status)
4124 return status;
4125
4126 pci_set_power_state(pdev, 0);
4127 pci_restore_state(pdev);
4128
Sathya Perla2243e2e2009-11-22 22:02:03 +00004129 /* tell fw we're ready to fire cmds */
4130 status = be_cmd_fw_init(adapter);
4131 if (status)
4132 return status;
4133
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004134 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004135 if (netif_running(netdev)) {
4136 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004137 be_open(netdev);
4138 rtnl_unlock();
4139 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004140
4141 schedule_delayed_work(&adapter->func_recovery_work,
4142 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004143 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004144
4145 if (adapter->wol)
4146 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004147
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004148 return 0;
4149}
4150
Sathya Perla82456b02010-02-17 01:35:37 +00004151/*
4152 * An FLR will stop BE from DMAing any data.
4153 */
4154static void be_shutdown(struct pci_dev *pdev)
4155{
4156 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004157
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004158 if (!adapter)
4159 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004160
Sathya Perla0f4a6822011-03-21 20:49:28 +00004161 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004162 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004163
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004164 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004165
Sathya Perla82456b02010-02-17 01:35:37 +00004166 if (adapter->wol)
4167 be_setup_wol(adapter, true);
4168
Ajit Khaparde57841862011-04-06 18:08:43 +00004169 be_cmd_reset_function(adapter);
4170
Sathya Perla82456b02010-02-17 01:35:37 +00004171 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004172}
4173
Sathya Perlacf588472010-02-14 21:22:01 +00004174static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4175 pci_channel_state_t state)
4176{
4177 struct be_adapter *adapter = pci_get_drvdata(pdev);
4178 struct net_device *netdev = adapter->netdev;
4179
4180 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4181
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004182 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004183
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004184 cancel_delayed_work_sync(&adapter->func_recovery_work);
4185
4186 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004187 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004188 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004189
4190 if (netif_running(netdev)) {
4191 rtnl_lock();
4192 be_close(netdev);
4193 rtnl_unlock();
4194 }
4195 be_clear(adapter);
4196
4197 if (state == pci_channel_io_perm_failure)
4198 return PCI_ERS_RESULT_DISCONNECT;
4199
4200 pci_disable_device(pdev);
4201
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004202 /* The error could cause the FW to trigger a flash debug dump.
4203 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004204 * can cause it not to recover; wait for it to finish.
4205 * Wait only for first function as it is needed only once per
4206 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004207 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004208 if (pdev->devfn == 0)
4209 ssleep(30);
4210
Sathya Perlacf588472010-02-14 21:22:01 +00004211 return PCI_ERS_RESULT_NEED_RESET;
4212}
4213
4214static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4215{
4216 struct be_adapter *adapter = pci_get_drvdata(pdev);
4217 int status;
4218
4219 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004220 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004221
4222 status = pci_enable_device(pdev);
4223 if (status)
4224 return PCI_ERS_RESULT_DISCONNECT;
4225
4226 pci_set_master(pdev);
4227 pci_set_power_state(pdev, 0);
4228 pci_restore_state(pdev);
4229
4230 /* Check if card is ok and fw is ready */
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004231 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004232 if (status)
4233 return PCI_ERS_RESULT_DISCONNECT;
4234
Sathya Perlad6b6d982012-09-05 01:56:48 +00004235 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004236 return PCI_ERS_RESULT_RECOVERED;
4237}
4238
4239static void be_eeh_resume(struct pci_dev *pdev)
4240{
4241 int status = 0;
4242 struct be_adapter *adapter = pci_get_drvdata(pdev);
4243 struct net_device *netdev = adapter->netdev;
4244
4245 dev_info(&adapter->pdev->dev, "EEH resume\n");
4246
4247 pci_save_state(pdev);
4248
4249 /* tell fw we're ready to fire cmds */
4250 status = be_cmd_fw_init(adapter);
4251 if (status)
4252 goto err;
4253
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004254 status = be_cmd_reset_function(adapter);
4255 if (status)
4256 goto err;
4257
Sathya Perlacf588472010-02-14 21:22:01 +00004258 status = be_setup(adapter);
4259 if (status)
4260 goto err;
4261
4262 if (netif_running(netdev)) {
4263 status = be_open(netdev);
4264 if (status)
4265 goto err;
4266 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004267
4268 schedule_delayed_work(&adapter->func_recovery_work,
4269 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004270 netif_device_attach(netdev);
4271 return;
4272err:
4273 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004274}
4275
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004276static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004277 .error_detected = be_eeh_err_detected,
4278 .slot_reset = be_eeh_reset,
4279 .resume = be_eeh_resume,
4280};
4281
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004282static struct pci_driver be_driver = {
4283 .name = DRV_NAME,
4284 .id_table = be_dev_ids,
4285 .probe = be_probe,
4286 .remove = be_remove,
4287 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004288 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004289 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004290 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004291};
4292
4293static int __init be_init_module(void)
4294{
Joe Perches8e95a202009-12-03 07:58:21 +00004295 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4296 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004297 printk(KERN_WARNING DRV_NAME
4298 " : Module param rx_frag_size must be 2048/4096/8192."
4299 " Using 2048\n");
4300 rx_frag_size = 2048;
4301 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004302
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004303 return pci_register_driver(&be_driver);
4304}
4305module_init(be_init_module);
4306
4307static void __exit be_exit_module(void)
4308{
4309 pci_unregister_driver(&be_driver);
4310}
4311module_exit(be_exit_module);