blob: 76f3a985e1d54ab43e88b3c049ef3b26cc8a61fa [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
130 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000144 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700146 if (!mem->va)
147 return -1;
148 memset(mem->va, 0, mem->size);
149 return 0;
150}
151
Sathya Perla8788fdc2009-07-27 22:52:03 +0000152static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perlacf588472010-02-14 21:22:01 +0000156 if (adapter->eeh_err)
157 return;
158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Sathya Perla8788fdc2009-07-27 22:52:03 +0000174static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175{
176 u32 val = 0;
177 val |= qid & DB_RQ_RING_ID_MASK;
178 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000179
180 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000181 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182}
183
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185{
186 u32 val = 0;
187 val |= qid & DB_TXULP_RING_ID_MASK;
188 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000189
190 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000191 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192}
193
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195 bool arm, bool clear_int, u16 num_popped)
196{
197 u32 val = 0;
198 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000199 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
200 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000201
202 if (adapter->eeh_err)
203 return;
204
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 if (arm)
206 val |= 1 << DB_EQ_REARM_SHIFT;
207 if (clear_int)
208 val |= 1 << DB_EQ_CLR_SHIFT;
209 val |= 1 << DB_EQ_EVNT_SHIFT;
210 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212}
213
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215{
216 u32 val = 0;
217 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000218 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
219 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000220
221 if (adapter->eeh_err)
222 return;
223
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700224 if (arm)
225 val |= 1 << DB_CQ_REARM_SHIFT;
226 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228}
229
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230static int be_mac_addr_set(struct net_device *netdev, void *p)
231{
232 struct be_adapter *adapter = netdev_priv(netdev);
233 struct sockaddr *addr = p;
234 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000235 u8 current_mac[ETH_ALEN];
236 u32 pmac_id = adapter->pmac_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000238 if (!is_valid_ether_addr(addr->sa_data))
239 return -EADDRNOTAVAIL;
240
Somnath Koture3a7ae22011-10-27 07:14:05 +0000241 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000242 MAC_ADDRESS_TYPE_NETWORK, false,
243 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000244 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000245 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
248 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000249 adapter->if_handle, &adapter->pmac_id, 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000250 if (status)
251 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252
Somnath Koture3a7ae22011-10-27 07:14:05 +0000253 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
254 }
255 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
256 return 0;
257err:
258 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700259 return status;
260}
261
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000262static void populate_be2_stats(struct be_adapter *adapter)
263{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000264 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
265 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
266 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000267 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000268 &rxf_stats->port[adapter->port_num];
269 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000270
Sathya Perlaac124ff2011-07-25 19:10:14 +0000271 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272 drvs->rx_pause_frames = port_stats->rx_pause_frames;
273 drvs->rx_crc_errors = port_stats->rx_crc_errors;
274 drvs->rx_control_frames = port_stats->rx_control_frames;
275 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
276 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
277 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
278 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
279 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
280 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
281 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
282 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
283 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
284 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
285 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000286 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000287 drvs->rx_dropped_header_too_small =
288 port_stats->rx_dropped_header_too_small;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000289 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000290 drvs->rx_alignment_symbol_errors =
291 port_stats->rx_alignment_symbol_errors;
292
293 drvs->tx_pauseframes = port_stats->tx_pauseframes;
294 drvs->tx_controlframes = port_stats->tx_controlframes;
295
296 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000297 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000298 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000299 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000300 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
301 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
302 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
303 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
304 drvs->forwarded_packets = rxf_stats->forwarded_packets;
305 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000306 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
307 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000308 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
309}
310
311static void populate_be3_stats(struct be_adapter *adapter)
312{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000313 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
314 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
315 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000316 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000317 &rxf_stats->port[adapter->port_num];
318 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000319
Sathya Perlaac124ff2011-07-25 19:10:14 +0000320 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000321 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
322 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000323 drvs->rx_pause_frames = port_stats->rx_pause_frames;
324 drvs->rx_crc_errors = port_stats->rx_crc_errors;
325 drvs->rx_control_frames = port_stats->rx_control_frames;
326 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
327 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
328 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
329 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
330 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
331 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
332 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
333 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
334 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
335 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
336 drvs->rx_dropped_header_too_small =
337 port_stats->rx_dropped_header_too_small;
338 drvs->rx_input_fifo_overflow_drop =
339 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000340 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341 drvs->rx_alignment_symbol_errors =
342 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000343 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000344 drvs->tx_pauseframes = port_stats->tx_pauseframes;
345 drvs->tx_controlframes = port_stats->tx_controlframes;
346 drvs->jabber_events = port_stats->jabber_events;
347 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
348 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
349 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
350 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000385 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
387 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000388 drvs->jabber_events = pport_stats->rx_jabbers;
Selvin Xavier005d5692011-05-16 07:36:35 +0000389 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000390 drvs->forwarded_packets = pport_stats->num_forwards_lo;
391 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000392 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000393 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000394}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395
Sathya Perla09c1c682011-08-22 19:41:53 +0000396static void accumulate_16bit_val(u32 *acc, u16 val)
397{
398#define lo(x) (x & 0xFFFF)
399#define hi(x) (x & 0xFFFF0000)
400 bool wrapped = val < lo(*acc);
401 u32 newacc = hi(*acc) + val;
402
403 if (wrapped)
404 newacc += 65536;
405 ACCESS_ONCE(*acc) = newacc;
406}
407
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408void be_parse_stats(struct be_adapter *adapter)
409{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
411 struct be_rx_obj *rxo;
412 int i;
413
Selvin Xavier005d5692011-05-16 07:36:35 +0000414 if (adapter->generation == BE_GEN3) {
415 if (lancer_chip(adapter))
416 populate_lancer_stats(adapter);
417 else
418 populate_be3_stats(adapter);
419 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000420 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000421 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000422
423 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000424 for_all_rx_queues(adapter, rxo, i) {
425 /* below erx HW counter can actually wrap around after
426 * 65535. Driver accumulates a 32-bit value
427 */
428 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
429 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
430 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000431}
432
Sathya Perlaab1594e2011-07-25 19:10:15 +0000433static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
434 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700435{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000436 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700438 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000439 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000440 u64 pkts, bytes;
441 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700442 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700443
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000445 const struct be_rx_stats *rx_stats = rx_stats(rxo);
446 do {
447 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
448 pkts = rx_stats(rxo)->rx_pkts;
449 bytes = rx_stats(rxo)->rx_bytes;
450 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
451 stats->rx_packets += pkts;
452 stats->rx_bytes += bytes;
453 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
454 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
455 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700456 }
457
Sathya Perla3c8def92011-06-12 20:01:58 +0000458 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000459 const struct be_tx_stats *tx_stats = tx_stats(txo);
460 do {
461 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
462 pkts = tx_stats(txo)->tx_pkts;
463 bytes = tx_stats(txo)->tx_bytes;
464 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
465 stats->tx_packets += pkts;
466 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000467 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700468
469 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000470 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000471 drvs->rx_alignment_symbol_errors +
472 drvs->rx_in_range_errors +
473 drvs->rx_out_range_errors +
474 drvs->rx_frame_too_long +
475 drvs->rx_dropped_too_small +
476 drvs->rx_dropped_too_short +
477 drvs->rx_dropped_header_too_small +
478 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000479 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700480
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700481 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000482 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000483 drvs->rx_out_range_errors +
484 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000485
Sathya Perlaab1594e2011-07-25 19:10:15 +0000486 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487
488 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000489 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000490
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700491 /* receiver fifo overrun */
492 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000493 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000494 drvs->rx_input_fifo_overflow_drop +
495 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000496 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497}
498
Sathya Perlaea172a02011-08-02 19:57:42 +0000499void be_link_status_update(struct be_adapter *adapter, u32 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700500{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700501 struct net_device *netdev = adapter->netdev;
502
Sathya Perlaea172a02011-08-02 19:57:42 +0000503 /* when link status changes, link speed must be re-queried from card */
504 adapter->link_speed = -1;
505 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
506 netif_carrier_on(netdev);
507 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
508 } else {
509 netif_carrier_off(netdev);
510 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700511 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512}
513
Sathya Perla3c8def92011-06-12 20:01:58 +0000514static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000515 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700516{
Sathya Perla3c8def92011-06-12 20:01:58 +0000517 struct be_tx_stats *stats = tx_stats(txo);
518
Sathya Perlaab1594e2011-07-25 19:10:15 +0000519 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000520 stats->tx_reqs++;
521 stats->tx_wrbs += wrb_cnt;
522 stats->tx_bytes += copied;
523 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700524 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000526 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700527}
528
529/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000530static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
531 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700532{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700533 int cnt = (skb->len > skb->data_len);
534
535 cnt += skb_shinfo(skb)->nr_frags;
536
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700537 /* to account for hdr wrb */
538 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000539 if (lancer_chip(adapter) || !(cnt & 1)) {
540 *dummy = false;
541 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700542 /* add a dummy to make it an even num */
543 cnt++;
544 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700546 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
547 return cnt;
548}
549
550static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
551{
552 wrb->frag_pa_hi = upper_32_bits(addr);
553 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
554 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
555}
556
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000557static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
558 struct sk_buff *skb)
559{
560 u8 vlan_prio;
561 u16 vlan_tag;
562
563 vlan_tag = vlan_tx_tag_get(skb);
564 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
565 /* If vlan priority provided by OS is NOT in available bmap */
566 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
567 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
568 adapter->recommended_prio;
569
570 return vlan_tag;
571}
572
Somnath Koturcc4ce022010-10-21 07:11:14 -0700573static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
574 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700575{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000576 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700577
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700578 memset(hdr, 0, sizeof(*hdr));
579
580 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
581
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000582 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
584 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
585 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000586 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000588 if (lancer_chip(adapter) && adapter->sli_family ==
589 LANCER_A0_SLI_FAMILY) {
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
591 if (is_tcp_pkt(skb))
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
593 tcpcs, hdr, 1);
594 else if (is_udp_pkt(skb))
595 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
596 udpcs, hdr, 1);
597 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700598 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
599 if (is_tcp_pkt(skb))
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
601 else if (is_udp_pkt(skb))
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
603 }
604
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700605 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000607 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700609 }
610
611 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
615}
616
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000617static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000618 bool unmap_single)
619{
620 dma_addr_t dma;
621
622 be_dws_le_to_cpu(wrb, sizeof(*wrb));
623
624 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000625 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000626 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000627 dma_unmap_single(dev, dma, wrb->frag_len,
628 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000629 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000630 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000631 }
632}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633
Sathya Perla3c8def92011-06-12 20:01:58 +0000634static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700635 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
636{
Sathya Perla7101e112010-03-22 20:41:12 +0000637 dma_addr_t busaddr;
638 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000639 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700640 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 struct be_eth_wrb *wrb;
642 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000643 bool map_single = false;
644 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 hdr = queue_head_node(txq);
647 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000648 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700649
David S. Millerebc8d2a2009-06-09 01:01:31 -0700650 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700651 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000652 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
653 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000654 goto dma_err;
655 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700656 wrb = queue_head_node(txq);
657 wrb_fill(wrb, busaddr, len);
658 be_dws_cpu_to_le(wrb, sizeof(*wrb));
659 queue_head_inc(txq);
660 copied += len;
661 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662
David S. Millerebc8d2a2009-06-09 01:01:31 -0700663 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000664 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700665 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000666 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000667 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000668 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000669 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700670 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000671 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700672 be_dws_cpu_to_le(wrb, sizeof(*wrb));
673 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000674 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700675 }
676
677 if (dummy_wrb) {
678 wrb = queue_head_node(txq);
679 wrb_fill(wrb, 0, 0);
680 be_dws_cpu_to_le(wrb, sizeof(*wrb));
681 queue_head_inc(txq);
682 }
683
Somnath Koturcc4ce022010-10-21 07:11:14 -0700684 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685 be_dws_cpu_to_le(hdr, sizeof(*hdr));
686
687 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000688dma_err:
689 txq->head = map_head;
690 while (copied) {
691 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000692 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000693 map_single = false;
694 copied -= wrb->frag_len;
695 queue_head_inc(txq);
696 }
697 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698}
699
Stephen Hemminger613573252009-08-31 19:50:58 +0000700static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700701 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702{
703 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000704 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
705 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706 u32 wrb_cnt = 0, copied = 0;
707 u32 start = txq->head;
708 bool dummy_wrb, stopped = false;
709
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000710 /* For vlan tagged pkts, BE
711 * 1) calculates checksum even when CSO is not requested
712 * 2) calculates checksum wrongly for padded pkt less than
713 * 60 bytes long.
714 * As a workaround disable TX vlan offloading in such cases.
715 */
716 if (unlikely(vlan_tx_tag_present(skb) &&
717 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
718 skb = skb_share_check(skb, GFP_ATOMIC);
719 if (unlikely(!skb))
720 goto tx_drop;
721
722 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
723 if (unlikely(!skb))
724 goto tx_drop;
725
726 skb->vlan_tci = 0;
727 }
728
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000729 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700730
Sathya Perla3c8def92011-06-12 20:01:58 +0000731 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000732 if (copied) {
733 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000734 BUG_ON(txo->sent_skb_list[start]);
735 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000737 /* Ensure txq has space for the next skb; Else stop the queue
738 * *BEFORE* ringing the tx doorbell, so that we serialze the
739 * tx compls of the current transmit which'll wake up the queue
740 */
Sathya Perla7101e112010-03-22 20:41:12 +0000741 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000742 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
743 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000744 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000745 stopped = true;
746 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700747
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000748 be_txq_notify(adapter, txq->id, wrb_cnt);
749
Sathya Perla3c8def92011-06-12 20:01:58 +0000750 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000751 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000752 } else {
753 txq->head = start;
754 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700755 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000756tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757 return NETDEV_TX_OK;
758}
759
760static int be_change_mtu(struct net_device *netdev, int new_mtu)
761{
762 struct be_adapter *adapter = netdev_priv(netdev);
763 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000764 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
765 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766 dev_info(&adapter->pdev->dev,
767 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000768 BE_MIN_MTU,
769 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770 return -EINVAL;
771 }
772 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
773 netdev->mtu, new_mtu);
774 netdev->mtu = new_mtu;
775 return 0;
776}
777
778/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000779 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
780 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700781 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000782static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700783{
Sathya Perla11ac75e2011-12-13 00:58:50 +0000784 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785 u16 vtag[BE_NUM_VLANS_SUPPORTED];
786 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000787 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000788
789 if (vf) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000790 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
791 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
792 1, 1, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000793 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700794
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000795 /* No need to further configure vids if in promiscuous mode */
796 if (adapter->promiscuous)
797 return 0;
798
Ajit Khaparde82903e42010-02-09 01:34:57 +0000799 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000801 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802 if (adapter->vlan_tag[i]) {
803 vtag[ntags] = cpu_to_le16(i);
804 ntags++;
805 }
806 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700807 status = be_cmd_vlan_config(adapter, adapter->if_handle,
808 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700809 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700810 status = be_cmd_vlan_config(adapter, adapter->if_handle,
811 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700812 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000813
Sathya Perlab31c50a2009-09-17 10:30:13 -0700814 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815}
816
Jiri Pirko8e586132011-12-08 19:52:37 -0500817static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700818{
819 struct be_adapter *adapter = netdev_priv(netdev);
820
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000821 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000822 if (!be_physfn(adapter))
Jiri Pirko8e586132011-12-08 19:52:37 -0500823 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000824
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700825 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000826 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000827 be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500828
829 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700830}
831
Jiri Pirko8e586132011-12-08 19:52:37 -0500832static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833{
834 struct be_adapter *adapter = netdev_priv(netdev);
835
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000836 adapter->vlans_added--;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000837
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000838 if (!be_physfn(adapter))
Jiri Pirko8e586132011-12-08 19:52:37 -0500839 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000840
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700841 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000842 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000843 be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500844
845 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700846}
847
Sathya Perlaa54769f2011-10-24 02:45:00 +0000848static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700849{
850 struct be_adapter *adapter = netdev_priv(netdev);
851
852 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000853 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000854 adapter->promiscuous = true;
855 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700856 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000857
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300858 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000859 if (adapter->promiscuous) {
860 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000861 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000862
863 if (adapter->vlans_added)
864 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000865 }
866
Sathya Perlae7b909a2009-11-22 22:01:10 +0000867 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000868 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000869 netdev_mc_count(netdev) > BE_MAX_MC) {
870 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000871 goto done;
872 }
873
Sathya Perla5b8821b2011-08-02 19:57:44 +0000874 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000875done:
876 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700877}
878
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000879static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
880{
881 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000882 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000883 int status;
884
Sathya Perla11ac75e2011-12-13 00:58:50 +0000885 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000886 return -EPERM;
887
Sathya Perla11ac75e2011-12-13 00:58:50 +0000888 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000889 return -EINVAL;
890
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000891 if (lancer_chip(adapter)) {
892 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
893 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000894 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
895 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000896
Sathya Perla11ac75e2011-12-13 00:58:50 +0000897 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
898 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000899 }
900
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000901 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000902 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
903 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000904 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000905 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000906
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000907 return status;
908}
909
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000910static int be_get_vf_config(struct net_device *netdev, int vf,
911 struct ifla_vf_info *vi)
912{
913 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000914 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000915
Sathya Perla11ac75e2011-12-13 00:58:50 +0000916 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000917 return -EPERM;
918
Sathya Perla11ac75e2011-12-13 00:58:50 +0000919 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000920 return -EINVAL;
921
922 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000923 vi->tx_rate = vf_cfg->tx_rate;
924 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000925 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000926 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000927
928 return 0;
929}
930
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000931static int be_set_vf_vlan(struct net_device *netdev,
932 int vf, u16 vlan, u8 qos)
933{
934 struct be_adapter *adapter = netdev_priv(netdev);
935 int status = 0;
936
Sathya Perla11ac75e2011-12-13 00:58:50 +0000937 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000938 return -EPERM;
939
Sathya Perla11ac75e2011-12-13 00:58:50 +0000940 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000941 return -EINVAL;
942
943 if (vlan) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000944 adapter->vf_cfg[vf].vlan_tag = vlan;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000945 adapter->vlans_added++;
946 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000947 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000948 adapter->vlans_added--;
949 }
950
951 status = be_vid_config(adapter, true, vf);
952
953 if (status)
954 dev_info(&adapter->pdev->dev,
955 "VLAN %d config on VF %d failed\n", vlan, vf);
956 return status;
957}
958
Ajit Khapardee1d18732010-07-23 01:52:13 +0000959static int be_set_vf_tx_rate(struct net_device *netdev,
960 int vf, int rate)
961{
962 struct be_adapter *adapter = netdev_priv(netdev);
963 int status = 0;
964
Sathya Perla11ac75e2011-12-13 00:58:50 +0000965 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +0000966 return -EPERM;
967
Sathya Perla11ac75e2011-12-13 00:58:50 +0000968 if (vf >= adapter->num_vfs || rate < 0)
Ajit Khapardee1d18732010-07-23 01:52:13 +0000969 return -EINVAL;
970
971 if (rate > 10000)
972 rate = 10000;
973
Sathya Perla11ac75e2011-12-13 00:58:50 +0000974 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000975 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000976
977 if (status)
978 dev_info(&adapter->pdev->dev,
979 "tx rate %d on VF %d failed\n", rate, vf);
980 return status;
981}
982
Sathya Perlaac124ff2011-07-25 19:10:14 +0000983static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700984{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000985 struct be_eq_obj *rx_eq = &rxo->rx_eq;
986 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700987 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000988 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000989 u64 pkts;
990 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000991
992 if (!rx_eq->enable_aic)
993 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700994
Sathya Perla4097f662009-03-24 16:40:13 -0700995 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700996 if (time_before(now, stats->rx_jiffies)) {
997 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700998 return;
999 }
1000
Sathya Perlaac124ff2011-07-25 19:10:14 +00001001 /* Update once a second */
1002 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001003 return;
1004
Sathya Perlaab1594e2011-07-25 19:10:15 +00001005 do {
1006 start = u64_stats_fetch_begin_bh(&stats->sync);
1007 pkts = stats->rx_pkts;
1008 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1009
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001010 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001011 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001012 stats->rx_jiffies = now;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001013 eqd = stats->rx_pps / 110000;
1014 eqd = eqd << 3;
1015 if (eqd > rx_eq->max_eqd)
1016 eqd = rx_eq->max_eqd;
1017 if (eqd < rx_eq->min_eqd)
1018 eqd = rx_eq->min_eqd;
1019 if (eqd < 10)
1020 eqd = 0;
1021 if (eqd != rx_eq->cur_eqd) {
1022 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
1023 rx_eq->cur_eqd = eqd;
1024 }
Sathya Perla4097f662009-03-24 16:40:13 -07001025}
1026
Sathya Perla3abcded2010-10-03 22:12:27 -07001027static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001028 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001029{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001030 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001031
Sathya Perlaab1594e2011-07-25 19:10:15 +00001032 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001033 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001034 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001035 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001036 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001037 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001038 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001039 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001040 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041}
1042
Sathya Perla2e588f82011-03-11 02:49:26 +00001043static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001044{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001045 /* L4 checksum is not reliable for non TCP/UDP packets.
1046 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001047 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1048 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001049}
1050
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001051static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001052get_rx_page_info(struct be_adapter *adapter,
1053 struct be_rx_obj *rxo,
1054 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001055{
1056 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001057 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001058
Sathya Perla3abcded2010-10-03 22:12:27 -07001059 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001060 BUG_ON(!rx_page_info->page);
1061
Ajit Khaparde205859a2010-02-09 01:34:21 +00001062 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001063 dma_unmap_page(&adapter->pdev->dev,
1064 dma_unmap_addr(rx_page_info, bus),
1065 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001066 rx_page_info->last_page_user = false;
1067 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001068
1069 atomic_dec(&rxq->used);
1070 return rx_page_info;
1071}
1072
1073/* Throwaway the data in the Rx completion */
1074static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001075 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001076 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001077{
Sathya Perla3abcded2010-10-03 22:12:27 -07001078 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001079 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001080 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001081
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001082 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001083 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001084 put_page(page_info->page);
1085 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001086 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001087 }
1088}
1089
1090/*
1091 * skb_fill_rx_data forms a complete skb for an ether frame
1092 * indicated by rxcp.
1093 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001094static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001095 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001096{
Sathya Perla3abcded2010-10-03 22:12:27 -07001097 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001099 u16 i, j;
1100 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001101 u8 *start;
1102
Sathya Perla2e588f82011-03-11 02:49:26 +00001103 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001104 start = page_address(page_info->page) + page_info->page_offset;
1105 prefetch(start);
1106
1107 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001108 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001109
1110 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001111 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001112 memcpy(skb->data, start, hdr_len);
1113 skb->len = curr_frag_len;
1114 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1115 /* Complete packet has now been moved to data */
1116 put_page(page_info->page);
1117 skb->data_len = 0;
1118 skb->tail += curr_frag_len;
1119 } else {
1120 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001121 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001122 skb_shinfo(skb)->frags[0].page_offset =
1123 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001124 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001125 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001126 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001127 skb->tail += hdr_len;
1128 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001129 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001130
Sathya Perla2e588f82011-03-11 02:49:26 +00001131 if (rxcp->pkt_size <= rx_frag_size) {
1132 BUG_ON(rxcp->num_rcvd != 1);
1133 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001134 }
1135
1136 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001137 index_inc(&rxcp->rxq_idx, rxq->len);
1138 remaining = rxcp->pkt_size - curr_frag_len;
1139 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1140 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1141 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001142
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001143 /* Coalesce all frags from the same physical page in one slot */
1144 if (page_info->page_offset == 0) {
1145 /* Fresh page */
1146 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001147 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001148 skb_shinfo(skb)->frags[j].page_offset =
1149 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001150 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001151 skb_shinfo(skb)->nr_frags++;
1152 } else {
1153 put_page(page_info->page);
1154 }
1155
Eric Dumazet9e903e02011-10-18 21:00:24 +00001156 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001157 skb->len += curr_frag_len;
1158 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001159 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001160 remaining -= curr_frag_len;
1161 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001162 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001163 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001164 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001165}
1166
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001167/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001168static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001169 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001170 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001171{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001172 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001174
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001175 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001176 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001177 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001178 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001179 return;
1180 }
1181
Sathya Perla2e588f82011-03-11 02:49:26 +00001182 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001183
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001184 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001185 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001186 else
1187 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001188
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001189 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001190 if (adapter->netdev->features & NETIF_F_RXHASH)
1191 skb->rxhash = rxcp->rss_hash;
1192
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001193
Jiri Pirko343e43c2011-08-25 02:50:51 +00001194 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001195 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1196
1197 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001198}
1199
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001200/* Process the RX completion indicated by rxcp when GRO is enabled */
1201static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001202 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001203 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001204{
1205 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001206 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001207 struct be_queue_info *rxq = &rxo->q;
1208 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001209 u16 remaining, curr_frag_len;
1210 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001211
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001212 skb = napi_get_frags(&eq_obj->napi);
1213 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001214 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001215 return;
1216 }
1217
Sathya Perla2e588f82011-03-11 02:49:26 +00001218 remaining = rxcp->pkt_size;
1219 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1220 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001221
1222 curr_frag_len = min(remaining, rx_frag_size);
1223
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001224 /* Coalesce all frags from the same physical page in one slot */
1225 if (i == 0 || page_info->page_offset == 0) {
1226 /* First frag or Fresh page */
1227 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001228 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001229 skb_shinfo(skb)->frags[j].page_offset =
1230 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001231 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001232 } else {
1233 put_page(page_info->page);
1234 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001235 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001236 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001237 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001238 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001239 memset(page_info, 0, sizeof(*page_info));
1240 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001241 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001242
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001243 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001244 skb->len = rxcp->pkt_size;
1245 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001246 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001247 if (adapter->netdev->features & NETIF_F_RXHASH)
1248 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001249
Jiri Pirko343e43c2011-08-25 02:50:51 +00001250 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001251 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1252
1253 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001254}
1255
Sathya Perla2e588f82011-03-11 02:49:26 +00001256static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1257 struct be_eth_rx_compl *compl,
1258 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259{
Sathya Perla2e588f82011-03-11 02:49:26 +00001260 rxcp->pkt_size =
1261 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1262 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1263 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1264 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001265 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001266 rxcp->ip_csum =
1267 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1268 rxcp->l4_csum =
1269 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1270 rxcp->ipv6 =
1271 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1272 rxcp->rxq_idx =
1273 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1274 rxcp->num_rcvd =
1275 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1276 rxcp->pkt_type =
1277 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001278 rxcp->rss_hash =
1279 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001280 if (rxcp->vlanf) {
1281 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001282 compl);
1283 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1284 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001285 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001286 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001287}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001288
Sathya Perla2e588f82011-03-11 02:49:26 +00001289static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1290 struct be_eth_rx_compl *compl,
1291 struct be_rx_compl_info *rxcp)
1292{
1293 rxcp->pkt_size =
1294 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1295 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1296 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1297 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001298 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001299 rxcp->ip_csum =
1300 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1301 rxcp->l4_csum =
1302 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1303 rxcp->ipv6 =
1304 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1305 rxcp->rxq_idx =
1306 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1307 rxcp->num_rcvd =
1308 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1309 rxcp->pkt_type =
1310 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001311 rxcp->rss_hash =
1312 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001313 if (rxcp->vlanf) {
1314 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001315 compl);
1316 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1317 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001318 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001319 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001320}
1321
1322static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1323{
1324 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1325 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1326 struct be_adapter *adapter = rxo->adapter;
1327
1328 /* For checking the valid bit it is Ok to use either definition as the
1329 * valid bit is at the same position in both v0 and v1 Rx compl */
1330 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001331 return NULL;
1332
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001333 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001334 be_dws_le_to_cpu(compl, sizeof(*compl));
1335
1336 if (adapter->be3_native)
1337 be_parse_rx_compl_v1(adapter, compl, rxcp);
1338 else
1339 be_parse_rx_compl_v0(adapter, compl, rxcp);
1340
Sathya Perla15d72182011-03-21 20:49:26 +00001341 if (rxcp->vlanf) {
1342 /* vlanf could be wrongly set in some cards.
1343 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001344 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001345 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001346
Sathya Perla15d72182011-03-21 20:49:26 +00001347 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001348 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001349
Somnath Kotur939cf302011-08-18 21:51:49 -07001350 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001351 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001352 rxcp->vlanf = 0;
1353 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001354
1355 /* As the compl has been parsed, reset it; we wont touch it again */
1356 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001357
Sathya Perla3abcded2010-10-03 22:12:27 -07001358 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001359 return rxcp;
1360}
1361
Eric Dumazet1829b082011-03-01 05:48:12 +00001362static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001363{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001365
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001366 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001367 gfp |= __GFP_COMP;
1368 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001369}
1370
1371/*
1372 * Allocate a page, split it to fragments of size rx_frag_size and post as
1373 * receive buffers to BE
1374 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001375static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001376{
Sathya Perla3abcded2010-10-03 22:12:27 -07001377 struct be_adapter *adapter = rxo->adapter;
1378 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001379 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001380 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001381 struct page *pagep = NULL;
1382 struct be_eth_rx_d *rxd;
1383 u64 page_dmaaddr = 0, frag_dmaaddr;
1384 u32 posted, page_offset = 0;
1385
Sathya Perla3abcded2010-10-03 22:12:27 -07001386 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001387 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1388 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001389 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001390 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001391 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392 break;
1393 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001394 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1395 0, adapter->big_page_size,
1396 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001397 page_info->page_offset = 0;
1398 } else {
1399 get_page(pagep);
1400 page_info->page_offset = page_offset + rx_frag_size;
1401 }
1402 page_offset = page_info->page_offset;
1403 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001404 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1406
1407 rxd = queue_head_node(rxq);
1408 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1409 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410
1411 /* Any space left in the current big page for another frag? */
1412 if ((page_offset + rx_frag_size + rx_frag_size) >
1413 adapter->big_page_size) {
1414 pagep = NULL;
1415 page_info->last_page_user = true;
1416 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001417
1418 prev_page_info = page_info;
1419 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001420 page_info = &page_info_tbl[rxq->head];
1421 }
1422 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001423 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001424
1425 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001426 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001427 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001428 } else if (atomic_read(&rxq->used) == 0) {
1429 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001430 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001431 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001432}
1433
Sathya Perla5fb379e2009-06-18 00:02:59 +00001434static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001435{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001436 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1437
1438 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1439 return NULL;
1440
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001441 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001442 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1443
1444 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1445
1446 queue_tail_inc(tx_cq);
1447 return txcp;
1448}
1449
Sathya Perla3c8def92011-06-12 20:01:58 +00001450static u16 be_tx_compl_process(struct be_adapter *adapter,
1451 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452{
Sathya Perla3c8def92011-06-12 20:01:58 +00001453 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001454 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001455 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001457 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1458 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001459
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001460 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001461 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001462 sent_skbs[txq->tail] = NULL;
1463
1464 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001465 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001466
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001467 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001468 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001469 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001470 unmap_tx_frag(&adapter->pdev->dev, wrb,
1471 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001472 unmap_skb_hdr = false;
1473
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001474 num_wrbs++;
1475 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001476 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001479 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001480}
1481
Sathya Perla859b1e42009-08-10 03:43:51 +00001482static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1483{
1484 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1485
1486 if (!eqe->evt)
1487 return NULL;
1488
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001489 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001490 eqe->evt = le32_to_cpu(eqe->evt);
1491 queue_tail_inc(&eq_obj->q);
1492 return eqe;
1493}
1494
1495static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001496 struct be_eq_obj *eq_obj,
1497 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001498{
1499 struct be_eq_entry *eqe;
1500 u16 num = 0;
1501
1502 while ((eqe = event_get(eq_obj)) != NULL) {
1503 eqe->evt = 0;
1504 num++;
1505 }
1506
1507 /* Deal with any spurious interrupts that come
1508 * without events
1509 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001510 if (!num)
1511 rearm = true;
1512
1513 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001514 if (num)
1515 napi_schedule(&eq_obj->napi);
1516
1517 return num;
1518}
1519
1520/* Just read and notify events without processing them.
1521 * Used at the time of destroying event queues */
1522static void be_eq_clean(struct be_adapter *adapter,
1523 struct be_eq_obj *eq_obj)
1524{
1525 struct be_eq_entry *eqe;
1526 u16 num = 0;
1527
1528 while ((eqe = event_get(eq_obj)) != NULL) {
1529 eqe->evt = 0;
1530 num++;
1531 }
1532
1533 if (num)
1534 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1535}
1536
Sathya Perla3abcded2010-10-03 22:12:27 -07001537static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538{
1539 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001540 struct be_queue_info *rxq = &rxo->q;
1541 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001542 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543 u16 tail;
1544
1545 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001546 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1547 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001548 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549 }
1550
1551 /* Then free posted rx buffer that were not used */
1552 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001553 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001554 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555 put_page(page_info->page);
1556 memset(page_info, 0, sizeof(*page_info));
1557 }
1558 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001559 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001560}
1561
Sathya Perla3c8def92011-06-12 20:01:58 +00001562static void be_tx_compl_clean(struct be_adapter *adapter,
1563 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001564{
Sathya Perla3c8def92011-06-12 20:01:58 +00001565 struct be_queue_info *tx_cq = &txo->cq;
1566 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001567 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001568 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001569 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001570 struct sk_buff *sent_skb;
1571 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572
Sathya Perlaa8e91792009-08-10 03:42:43 +00001573 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1574 do {
1575 while ((txcp = be_tx_compl_get(tx_cq))) {
1576 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1577 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001578 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001579 cmpl++;
1580 }
1581 if (cmpl) {
1582 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001583 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001584 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001585 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001586 }
1587
1588 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1589 break;
1590
1591 mdelay(1);
1592 } while (true);
1593
1594 if (atomic_read(&txq->used))
1595 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1596 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001597
1598 /* free posted tx for which compls will never arrive */
1599 while (atomic_read(&txq->used)) {
1600 sent_skb = sent_skbs[txq->tail];
1601 end_idx = txq->tail;
1602 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001603 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1604 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001605 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001606 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001607 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001608}
1609
Sathya Perla5fb379e2009-06-18 00:02:59 +00001610static void be_mcc_queues_destroy(struct be_adapter *adapter)
1611{
1612 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001613
Sathya Perla8788fdc2009-07-27 22:52:03 +00001614 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001615 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001616 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001617 be_queue_free(adapter, q);
1618
Sathya Perla8788fdc2009-07-27 22:52:03 +00001619 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001620 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001621 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001622 be_queue_free(adapter, q);
1623}
1624
1625/* Must be called only after TX qs are created as MCC shares TX EQ */
1626static int be_mcc_queues_create(struct be_adapter *adapter)
1627{
1628 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001629
1630 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001631 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001632 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001633 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001634 goto err;
1635
1636 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001637 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001638 goto mcc_cq_free;
1639
1640 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001641 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001642 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1643 goto mcc_cq_destroy;
1644
1645 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001646 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001647 goto mcc_q_free;
1648
1649 return 0;
1650
1651mcc_q_free:
1652 be_queue_free(adapter, q);
1653mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001654 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001655mcc_cq_free:
1656 be_queue_free(adapter, cq);
1657err:
1658 return -1;
1659}
1660
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001661static void be_tx_queues_destroy(struct be_adapter *adapter)
1662{
1663 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001664 struct be_tx_obj *txo;
1665 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666
Sathya Perla3c8def92011-06-12 20:01:58 +00001667 for_all_tx_queues(adapter, txo, i) {
1668 q = &txo->q;
1669 if (q->created)
1670 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1671 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001672
Sathya Perla3c8def92011-06-12 20:01:58 +00001673 q = &txo->cq;
1674 if (q->created)
1675 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1676 be_queue_free(adapter, q);
1677 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001678
Sathya Perla859b1e42009-08-10 03:43:51 +00001679 /* Clear any residual events */
1680 be_eq_clean(adapter, &adapter->tx_eq);
1681
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682 q = &adapter->tx_eq.q;
1683 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001684 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001685 be_queue_free(adapter, q);
1686}
1687
Sathya Perladafc0fe2011-10-24 02:45:02 +00001688static int be_num_txqs_want(struct be_adapter *adapter)
1689{
Sathya Perla11ac75e2011-12-13 00:58:50 +00001690 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001691 lancer_chip(adapter) || !be_physfn(adapter) ||
1692 adapter->generation == BE_GEN2)
1693 return 1;
1694 else
1695 return MAX_TX_QS;
1696}
1697
Sathya Perla3c8def92011-06-12 20:01:58 +00001698/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001699static int be_tx_queues_create(struct be_adapter *adapter)
1700{
1701 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001702 struct be_tx_obj *txo;
1703 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001704
Sathya Perladafc0fe2011-10-24 02:45:02 +00001705 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001706 if (adapter->num_tx_qs != MAX_TX_QS) {
1707 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001708 netif_set_real_num_tx_queues(adapter->netdev,
1709 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001710 rtnl_unlock();
1711 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001712
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001713 adapter->tx_eq.max_eqd = 0;
1714 adapter->tx_eq.min_eqd = 0;
1715 adapter->tx_eq.cur_eqd = 96;
1716 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001717
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001718 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001719 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1720 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001721 return -1;
1722
Sathya Perla8788fdc2009-07-27 22:52:03 +00001723 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001724 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001725 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001726
Sathya Perla3c8def92011-06-12 20:01:58 +00001727 for_all_tx_queues(adapter, txo, i) {
1728 cq = &txo->cq;
1729 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001730 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001731 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001732
Sathya Perla3c8def92011-06-12 20:01:58 +00001733 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1734 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001735
Sathya Perla3c8def92011-06-12 20:01:58 +00001736 q = &txo->q;
1737 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1738 sizeof(struct be_eth_wrb)))
1739 goto err;
Sathya Perla3c8def92011-06-12 20:01:58 +00001740 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001741 return 0;
1742
Sathya Perla3c8def92011-06-12 20:01:58 +00001743err:
1744 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745 return -1;
1746}
1747
1748static void be_rx_queues_destroy(struct be_adapter *adapter)
1749{
1750 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001751 struct be_rx_obj *rxo;
1752 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753
Sathya Perla3abcded2010-10-03 22:12:27 -07001754 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001755 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001756
Sathya Perla3abcded2010-10-03 22:12:27 -07001757 q = &rxo->cq;
1758 if (q->created)
1759 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1760 be_queue_free(adapter, q);
1761
Sathya Perla3abcded2010-10-03 22:12:27 -07001762 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001763 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001764 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001765 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001767}
1768
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001769static u32 be_num_rxqs_want(struct be_adapter *adapter)
1770{
Sathya Perlac814fd32011-06-26 20:41:25 +00001771 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla11ac75e2011-12-13 00:58:50 +00001772 !sriov_enabled(adapter) && be_physfn(adapter) &&
1773 !be_is_mc(adapter)) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001774 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1775 } else {
1776 dev_warn(&adapter->pdev->dev,
1777 "No support for multiple RX queues\n");
1778 return 1;
1779 }
1780}
1781
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001782static int be_rx_queues_create(struct be_adapter *adapter)
1783{
1784 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001785 struct be_rx_obj *rxo;
1786 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001787
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001788 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1789 msix_enabled(adapter) ?
1790 adapter->num_msix_vec - 1 : 1);
1791 if (adapter->num_rx_qs != MAX_RX_QS)
1792 dev_warn(&adapter->pdev->dev,
1793 "Can create only %d RX queues", adapter->num_rx_qs);
1794
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001795 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001796 for_all_rx_queues(adapter, rxo, i) {
1797 rxo->adapter = adapter;
1798 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1799 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001800
Sathya Perla3abcded2010-10-03 22:12:27 -07001801 /* EQ */
1802 eq = &rxo->rx_eq.q;
1803 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1804 sizeof(struct be_eq_entry));
1805 if (rc)
1806 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001807
Sathya Perla3abcded2010-10-03 22:12:27 -07001808 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1809 if (rc)
1810 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001811
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001812 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001813
Sathya Perla3abcded2010-10-03 22:12:27 -07001814 /* CQ */
1815 cq = &rxo->cq;
1816 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1817 sizeof(struct be_eth_rx_compl));
1818 if (rc)
1819 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001820
Sathya Perla3abcded2010-10-03 22:12:27 -07001821 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1822 if (rc)
1823 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001824
1825 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001826 q = &rxo->q;
1827 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1828 sizeof(struct be_eth_rx_d));
1829 if (rc)
1830 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831
Sathya Perla3abcded2010-10-03 22:12:27 -07001832 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001833
1834 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001835err:
1836 be_rx_queues_destroy(adapter);
1837 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001838}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001840static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001841{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001842 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1843 if (!eqe->evt)
1844 return false;
1845 else
1846 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001847}
1848
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849static irqreturn_t be_intx(int irq, void *dev)
1850{
1851 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001852 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001853 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001855 if (lancer_chip(adapter)) {
1856 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001857 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001858 for_all_rx_queues(adapter, rxo, i) {
1859 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001860 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001861 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001863 if (!(tx || rx))
1864 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001865
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001866 } else {
1867 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1868 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1869 if (!isr)
1870 return IRQ_NONE;
1871
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001872 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001873 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001874
1875 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001876 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001877 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001878 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001879 }
Sathya Perlac001c212009-07-01 01:06:07 +00001880
Sathya Perla8788fdc2009-07-27 22:52:03 +00001881 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001882}
1883
1884static irqreturn_t be_msix_rx(int irq, void *dev)
1885{
Sathya Perla3abcded2010-10-03 22:12:27 -07001886 struct be_rx_obj *rxo = dev;
1887 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001888
Sathya Perla3c8def92011-06-12 20:01:58 +00001889 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001890
1891 return IRQ_HANDLED;
1892}
1893
Sathya Perla5fb379e2009-06-18 00:02:59 +00001894static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001895{
1896 struct be_adapter *adapter = dev;
1897
Sathya Perla3c8def92011-06-12 20:01:58 +00001898 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001899
1900 return IRQ_HANDLED;
1901}
1902
Sathya Perla2e588f82011-03-11 02:49:26 +00001903static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904{
Sathya Perla2e588f82011-03-11 02:49:26 +00001905 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001906}
1907
stephen hemminger49b05222010-10-21 07:50:48 +00001908static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909{
1910 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001911 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1912 struct be_adapter *adapter = rxo->adapter;
1913 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001914 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915 u32 work_done;
1916
Sathya Perlaac124ff2011-07-25 19:10:14 +00001917 rx_stats(rxo)->rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001919 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920 if (!rxcp)
1921 break;
1922
Sathya Perla12004ae2011-08-02 19:57:46 +00001923 /* Is it a flush compl that has no data */
1924 if (unlikely(rxcp->num_rcvd == 0))
1925 goto loop_continue;
1926
1927 /* Discard compl with partial DMA Lancer B0 */
1928 if (unlikely(!rxcp->pkt_size)) {
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001929 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001930 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001931 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001932
Sathya Perla12004ae2011-08-02 19:57:46 +00001933 /* On BE drop pkts that arrive due to imperfect filtering in
1934 * promiscuous mode on some skews
1935 */
1936 if (unlikely(rxcp->port != adapter->port_num &&
1937 !lancer_chip(adapter))) {
1938 be_rx_compl_discard(adapter, rxo, rxcp);
1939 goto loop_continue;
1940 }
1941
1942 if (do_gro(rxcp))
1943 be_rx_compl_process_gro(adapter, rxo, rxcp);
1944 else
1945 be_rx_compl_process(adapter, rxo, rxcp);
1946loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001947 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001948 }
1949
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001950 be_cq_notify(adapter, rx_cq->id, false, work_done);
1951
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001952 /* Refill the queue */
Sathya Perla857c9902011-08-22 19:41:51 +00001953 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001954 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001955
1956 /* All consumed */
1957 if (work_done < budget) {
1958 napi_complete(napi);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001959 /* Arm CQ */
1960 be_cq_notify(adapter, rx_cq->id, true, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001961 }
1962 return work_done;
1963}
1964
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001965/* As TX and MCC share the same EQ check for both TX and MCC completions.
1966 * For TX/MCC we don't honour budget; consume everything
1967 */
1968static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001969{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001970 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1971 struct be_adapter *adapter =
1972 container_of(tx_eq, struct be_adapter, tx_eq);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00001973 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perla3c8def92011-06-12 20:01:58 +00001974 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001975 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001976 int tx_compl, mcc_compl, status = 0;
1977 u8 i;
1978 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001979
Sathya Perla3c8def92011-06-12 20:01:58 +00001980 for_all_tx_queues(adapter, txo, i) {
1981 tx_compl = 0;
1982 num_wrbs = 0;
1983 while ((txcp = be_tx_compl_get(&txo->cq))) {
1984 num_wrbs += be_tx_compl_process(adapter, txo,
1985 AMAP_GET_BITS(struct amap_eth_tx_compl,
1986 wrb_index, txcp));
1987 tx_compl++;
1988 }
1989 if (tx_compl) {
1990 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1991
1992 atomic_sub(num_wrbs, &txo->q.used);
1993
1994 /* As Tx wrbs have been freed up, wake up netdev queue
1995 * if it was stopped due to lack of tx wrbs. */
1996 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1997 atomic_read(&txo->q.used) < txo->q.len / 2) {
1998 netif_wake_subqueue(adapter->netdev, i);
1999 }
2000
Sathya Perlaab1594e2011-07-25 19:10:15 +00002001 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
Sathya Perlaac124ff2011-07-25 19:10:14 +00002002 tx_stats(txo)->tx_compl += tx_compl;
Sathya Perlaab1594e2011-07-25 19:10:15 +00002003 u64_stats_update_end(&tx_stats(txo)->sync_compl);
Sathya Perla3c8def92011-06-12 20:01:58 +00002004 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002005 }
2006
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002007 mcc_compl = be_process_mcc(adapter, &status);
2008
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002009 if (mcc_compl) {
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002010 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2011 }
2012
Sathya Perla3c8def92011-06-12 20:01:58 +00002013 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002014
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002015 /* Arm CQ again to regenerate EQEs for Lancer in INTx mode */
2016 if (lancer_chip(adapter) && !msix_enabled(adapter)) {
2017 for_all_tx_queues(adapter, txo, i)
2018 be_cq_notify(adapter, txo->cq.id, true, 0);
2019
2020 be_cq_notify(adapter, mcc_obj->cq.id, true, 0);
2021 }
2022
Sathya Perla3c8def92011-06-12 20:01:58 +00002023 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perlaab1594e2011-07-25 19:10:15 +00002024 adapter->drv_stats.tx_events++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002025 return 1;
2026}
2027
Ajit Khaparded053de92010-09-03 06:23:30 +00002028void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002029{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002030 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2031 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002032 u32 i;
2033
Sathya Perla72f02482011-11-10 19:17:58 +00002034 if (adapter->eeh_err || adapter->ue_detected)
2035 return;
2036
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002037 if (lancer_chip(adapter)) {
2038 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2039 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2040 sliport_err1 = ioread32(adapter->db +
2041 SLIPORT_ERROR1_OFFSET);
2042 sliport_err2 = ioread32(adapter->db +
2043 SLIPORT_ERROR2_OFFSET);
2044 }
2045 } else {
2046 pci_read_config_dword(adapter->pdev,
2047 PCICFG_UE_STATUS_LOW, &ue_lo);
2048 pci_read_config_dword(adapter->pdev,
2049 PCICFG_UE_STATUS_HIGH, &ue_hi);
2050 pci_read_config_dword(adapter->pdev,
2051 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2052 pci_read_config_dword(adapter->pdev,
2053 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002054
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002055 ue_lo = (ue_lo & (~ue_lo_mask));
2056 ue_hi = (ue_hi & (~ue_hi_mask));
2057 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002058
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002059 if (ue_lo || ue_hi ||
2060 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002061 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002062 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002063 dev_err(&adapter->pdev->dev,
2064 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002065 }
2066
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002067 if (ue_lo) {
2068 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2069 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002070 dev_err(&adapter->pdev->dev,
2071 "UE: %s bit set\n", ue_status_low_desc[i]);
2072 }
2073 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002074 if (ue_hi) {
2075 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2076 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002077 dev_err(&adapter->pdev->dev,
2078 "UE: %s bit set\n", ue_status_hi_desc[i]);
2079 }
2080 }
2081
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002082 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2083 dev_err(&adapter->pdev->dev,
2084 "sliport status 0x%x\n", sliport_status);
2085 dev_err(&adapter->pdev->dev,
2086 "sliport error1 0x%x\n", sliport_err1);
2087 dev_err(&adapter->pdev->dev,
2088 "sliport error2 0x%x\n", sliport_err2);
2089 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002090}
2091
Sathya Perla8d56ff12009-11-22 22:02:26 +00002092static void be_msix_disable(struct be_adapter *adapter)
2093{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002094 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002095 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002096 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002097 }
2098}
2099
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002100static void be_msix_enable(struct be_adapter *adapter)
2101{
Sathya Perla3abcded2010-10-03 22:12:27 -07002102#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002103 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002104
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002105 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002106
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002107 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002108 adapter->msix_entries[i].entry = i;
2109
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002110 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002111 if (status == 0) {
2112 goto done;
2113 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002114 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002115 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002116 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002117 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002118 }
2119 return;
2120done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002121 adapter->num_msix_vec = num_vec;
2122 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002123}
2124
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002125static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002126{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002127 be_check_sriov_fn_type(adapter);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002128
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002129#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002130 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002131 int status, pos;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002132 u16 dev_vfs;
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002133
2134 pos = pci_find_ext_capability(adapter->pdev,
2135 PCI_EXT_CAP_ID_SRIOV);
2136 pci_read_config_word(adapter->pdev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002137 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002138
Sathya Perla11ac75e2011-12-13 00:58:50 +00002139 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2140 if (adapter->num_vfs != num_vfs)
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002141 dev_info(&adapter->pdev->dev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002142 "Device supports %d VFs and not %d\n",
2143 adapter->num_vfs, num_vfs);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002144
Sathya Perla11ac75e2011-12-13 00:58:50 +00002145 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2146 if (status)
2147 adapter->num_vfs = 0;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002148
Sathya Perla11ac75e2011-12-13 00:58:50 +00002149 if (adapter->num_vfs) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002150 adapter->vf_cfg = kcalloc(num_vfs,
2151 sizeof(struct be_vf_cfg),
2152 GFP_KERNEL);
2153 if (!adapter->vf_cfg)
2154 return -ENOMEM;
2155 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002156 }
2157#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002158 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002159}
2160
2161static void be_sriov_disable(struct be_adapter *adapter)
2162{
2163#ifdef CONFIG_PCI_IOV
Sathya Perla11ac75e2011-12-13 00:58:50 +00002164 if (sriov_enabled(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002165 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002166 kfree(adapter->vf_cfg);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002167 adapter->num_vfs = 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002168 }
2169#endif
2170}
2171
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002172static inline int be_msix_vec_get(struct be_adapter *adapter,
2173 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002174{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002175 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002176}
2177
2178static int be_request_irq(struct be_adapter *adapter,
2179 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002180 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002181{
2182 struct net_device *netdev = adapter->netdev;
2183 int vec;
2184
2185 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002186 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002187 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002188}
2189
Sathya Perla3abcded2010-10-03 22:12:27 -07002190static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2191 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002192{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002193 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002194 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002195}
2196
2197static int be_msix_register(struct be_adapter *adapter)
2198{
Sathya Perla3abcded2010-10-03 22:12:27 -07002199 struct be_rx_obj *rxo;
2200 int status, i;
2201 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002202
Sathya Perla3abcded2010-10-03 22:12:27 -07002203 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2204 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002205 if (status)
2206 goto err;
2207
Sathya Perla3abcded2010-10-03 22:12:27 -07002208 for_all_rx_queues(adapter, rxo, i) {
2209 sprintf(qname, "rxq%d", i);
2210 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2211 qname, rxo);
2212 if (status)
2213 goto err_msix;
2214 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002215
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002216 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002217
Sathya Perla3abcded2010-10-03 22:12:27 -07002218err_msix:
2219 be_free_irq(adapter, &adapter->tx_eq, adapter);
2220
2221 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2222 be_free_irq(adapter, &rxo->rx_eq, rxo);
2223
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002224err:
2225 dev_warn(&adapter->pdev->dev,
2226 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002227 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002228 return status;
2229}
2230
2231static int be_irq_register(struct be_adapter *adapter)
2232{
2233 struct net_device *netdev = adapter->netdev;
2234 int status;
2235
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002236 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002237 status = be_msix_register(adapter);
2238 if (status == 0)
2239 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002240 /* INTx is not supported for VF */
2241 if (!be_physfn(adapter))
2242 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002243 }
2244
2245 /* INTx */
2246 netdev->irq = adapter->pdev->irq;
2247 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2248 adapter);
2249 if (status) {
2250 dev_err(&adapter->pdev->dev,
2251 "INTx request IRQ failed - err %d\n", status);
2252 return status;
2253 }
2254done:
2255 adapter->isr_registered = true;
2256 return 0;
2257}
2258
2259static void be_irq_unregister(struct be_adapter *adapter)
2260{
2261 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002262 struct be_rx_obj *rxo;
2263 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002264
2265 if (!adapter->isr_registered)
2266 return;
2267
2268 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002269 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002270 free_irq(netdev->irq, adapter);
2271 goto done;
2272 }
2273
2274 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002275 be_free_irq(adapter, &adapter->tx_eq, adapter);
2276
2277 for_all_rx_queues(adapter, rxo, i)
2278 be_free_irq(adapter, &rxo->rx_eq, rxo);
2279
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002280done:
2281 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002282}
2283
Sathya Perla482c9e72011-06-29 23:33:17 +00002284static void be_rx_queues_clear(struct be_adapter *adapter)
2285{
2286 struct be_queue_info *q;
2287 struct be_rx_obj *rxo;
2288 int i;
2289
2290 for_all_rx_queues(adapter, rxo, i) {
2291 q = &rxo->q;
2292 if (q->created) {
2293 be_cmd_rxq_destroy(adapter, q);
2294 /* After the rxq is invalidated, wait for a grace time
2295 * of 1ms for all dma to end and the flush compl to
2296 * arrive
2297 */
2298 mdelay(1);
2299 be_rx_q_clean(adapter, rxo);
2300 }
2301
2302 /* Clear any residual events */
2303 q = &rxo->rx_eq.q;
2304 if (q->created)
2305 be_eq_clean(adapter, &rxo->rx_eq);
2306 }
2307}
2308
Sathya Perla889cd4b2010-05-30 23:33:45 +00002309static int be_close(struct net_device *netdev)
2310{
2311 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002312 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002313 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002314 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002315 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002316
Sathya Perla889cd4b2010-05-30 23:33:45 +00002317 be_async_mcc_disable(adapter);
2318
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002319 if (!lancer_chip(adapter))
2320 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002321
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002322 for_all_rx_queues(adapter, rxo, i)
2323 napi_disable(&rxo->rx_eq.napi);
2324
2325 napi_disable(&tx_eq->napi);
2326
2327 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002328 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2329 for_all_rx_queues(adapter, rxo, i)
2330 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002331 for_all_tx_queues(adapter, txo, i)
2332 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002333 }
2334
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002335 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002336 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002337 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002338
2339 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002340 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002341 synchronize_irq(vec);
2342 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002343 } else {
2344 synchronize_irq(netdev->irq);
2345 }
2346 be_irq_unregister(adapter);
2347
Sathya Perla889cd4b2010-05-30 23:33:45 +00002348 /* Wait for all pending tx completions to arrive so that
2349 * all tx skbs are freed.
2350 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002351 for_all_tx_queues(adapter, txo, i)
2352 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002353
Sathya Perla482c9e72011-06-29 23:33:17 +00002354 be_rx_queues_clear(adapter);
2355 return 0;
2356}
2357
2358static int be_rx_queues_setup(struct be_adapter *adapter)
2359{
2360 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002361 int rc, i, j;
2362 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002363
2364 for_all_rx_queues(adapter, rxo, i) {
2365 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2366 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2367 adapter->if_handle,
2368 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2369 if (rc)
2370 return rc;
2371 }
2372
2373 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002374 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2375 for_all_rss_queues(adapter, rxo, i) {
2376 if ((j + i) >= 128)
2377 break;
2378 rsstable[j + i] = rxo->rss_id;
2379 }
2380 }
2381 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002382
Sathya Perla482c9e72011-06-29 23:33:17 +00002383 if (rc)
2384 return rc;
2385 }
2386
2387 /* First time posting */
2388 for_all_rx_queues(adapter, rxo, i) {
2389 be_post_rx_frags(rxo, GFP_KERNEL);
2390 napi_enable(&rxo->rx_eq.napi);
2391 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002392 return 0;
2393}
2394
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002395static int be_open(struct net_device *netdev)
2396{
2397 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002398 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002399 struct be_rx_obj *rxo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002400 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002401
Sathya Perla482c9e72011-06-29 23:33:17 +00002402 status = be_rx_queues_setup(adapter);
2403 if (status)
2404 goto err;
2405
Sathya Perla5fb379e2009-06-18 00:02:59 +00002406 napi_enable(&tx_eq->napi);
2407
2408 be_irq_register(adapter);
2409
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002410 if (!lancer_chip(adapter))
2411 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002412
2413 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002414 for_all_rx_queues(adapter, rxo, i) {
2415 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2416 be_cq_notify(adapter, rxo->cq.id, true, 0);
2417 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002418 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002419
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002420 /* Now that interrupts are on we can process async mcc */
2421 be_async_mcc_enable(adapter);
2422
Sathya Perla889cd4b2010-05-30 23:33:45 +00002423 return 0;
2424err:
2425 be_close(adapter->netdev);
2426 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002427}
2428
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002429static int be_setup_wol(struct be_adapter *adapter, bool enable)
2430{
2431 struct be_dma_mem cmd;
2432 int status = 0;
2433 u8 mac[ETH_ALEN];
2434
2435 memset(mac, 0, ETH_ALEN);
2436
2437 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002438 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2439 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002440 if (cmd.va == NULL)
2441 return -1;
2442 memset(cmd.va, 0, cmd.size);
2443
2444 if (enable) {
2445 status = pci_write_config_dword(adapter->pdev,
2446 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2447 if (status) {
2448 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002449 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002450 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2451 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002452 return status;
2453 }
2454 status = be_cmd_enable_magic_wol(adapter,
2455 adapter->netdev->dev_addr, &cmd);
2456 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2457 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2458 } else {
2459 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2460 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2461 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2462 }
2463
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002464 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002465 return status;
2466}
2467
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002468/*
2469 * Generate a seed MAC address from the PF MAC Address using jhash.
2470 * MAC Address for VFs are assigned incrementally starting from the seed.
2471 * These addresses are programmed in the ASIC by the PF and the VF driver
2472 * queries for the MAC address during its probe.
2473 */
2474static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2475{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002476 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002477 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002478 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002479 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002480
2481 be_vf_eth_addr_generate(adapter, mac);
2482
Sathya Perla11ac75e2011-12-13 00:58:50 +00002483 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002484 if (lancer_chip(adapter)) {
2485 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2486 } else {
2487 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002488 vf_cfg->if_handle,
2489 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002490 }
2491
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002492 if (status)
2493 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002494 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002495 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002496 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002497
2498 mac[5] += 1;
2499 }
2500 return status;
2501}
2502
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002503static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002504{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002505 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002506 u32 vf;
2507
Sathya Perla11ac75e2011-12-13 00:58:50 +00002508 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002509 if (lancer_chip(adapter))
2510 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2511 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002512 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2513 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002514
Sathya Perla11ac75e2011-12-13 00:58:50 +00002515 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2516 }
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002517}
2518
Sathya Perlaa54769f2011-10-24 02:45:00 +00002519static int be_clear(struct be_adapter *adapter)
2520{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002521 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002522 be_vf_clear(adapter);
2523
2524 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002525
2526 be_mcc_queues_destroy(adapter);
2527 be_rx_queues_destroy(adapter);
2528 be_tx_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002529
2530 /* tell fw we're done with firing cmds */
2531 be_cmd_fw_clean(adapter);
2532 return 0;
2533}
2534
Sathya Perla30128032011-11-10 19:17:57 +00002535static void be_vf_setup_init(struct be_adapter *adapter)
2536{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002537 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002538 int vf;
2539
Sathya Perla11ac75e2011-12-13 00:58:50 +00002540 for_all_vfs(adapter, vf_cfg, vf) {
2541 vf_cfg->if_handle = -1;
2542 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002543 }
2544}
2545
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002546static int be_vf_setup(struct be_adapter *adapter)
2547{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002548 struct be_vf_cfg *vf_cfg;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002549 u32 cap_flags, en_flags, vf;
2550 u16 lnk_speed;
2551 int status;
2552
Sathya Perla30128032011-11-10 19:17:57 +00002553 be_vf_setup_init(adapter);
2554
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002555 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2556 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002557 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002558 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002559 &vf_cfg->if_handle, NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002560 if (status)
2561 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002562 }
2563
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002564 status = be_vf_eth_addr_config(adapter);
2565 if (status)
2566 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002567
Sathya Perla11ac75e2011-12-13 00:58:50 +00002568 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002569 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002570 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002571 if (status)
2572 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002573 vf_cfg->tx_rate = lnk_speed * 10;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002574 }
2575 return 0;
2576err:
2577 return status;
2578}
2579
Sathya Perla30128032011-11-10 19:17:57 +00002580static void be_setup_init(struct be_adapter *adapter)
2581{
2582 adapter->vlan_prio_bmap = 0xff;
2583 adapter->link_speed = -1;
2584 adapter->if_handle = -1;
2585 adapter->be3_native = false;
2586 adapter->promiscuous = false;
2587 adapter->eq_next_idx = 0;
2588}
2589
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002590static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2591{
2592 u32 pmac_id;
2593 int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2594 if (status != 0)
2595 goto do_none;
2596 status = be_cmd_mac_addr_query(adapter, mac,
2597 MAC_ADDRESS_TYPE_NETWORK,
2598 false, adapter->if_handle, pmac_id);
2599 if (status != 0)
2600 goto do_none;
2601 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2602 &adapter->pmac_id, 0);
2603do_none:
2604 return status;
2605}
2606
Sathya Perla5fb379e2009-06-18 00:02:59 +00002607static int be_setup(struct be_adapter *adapter)
2608{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002609 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002610 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002611 u32 tx_fc, rx_fc;
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002612 int status, i;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002613 u8 mac[ETH_ALEN];
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002614 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002615
Sathya Perla30128032011-11-10 19:17:57 +00002616 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002617
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002618 be_cmd_req_native_mode(adapter);
2619
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002620 status = be_tx_queues_create(adapter);
2621 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002622 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002623
2624 status = be_rx_queues_create(adapter);
2625 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002626 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002627
Sathya Perla5fb379e2009-06-18 00:02:59 +00002628 status = be_mcc_queues_create(adapter);
2629 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002630 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002631
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002632 memset(mac, 0, ETH_ALEN);
2633 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002634 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002635 if (status)
2636 return status;
2637 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2638 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2639
2640 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2641 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2642 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002643 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2644
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002645 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2646 cap_flags |= BE_IF_FLAGS_RSS;
2647 en_flags |= BE_IF_FLAGS_RSS;
2648 }
2649 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2650 netdev->dev_addr, &adapter->if_handle,
2651 &adapter->pmac_id, 0);
2652 if (status != 0)
2653 goto err;
2654
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002655 for_all_tx_queues(adapter, txo, i) {
2656 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2657 if (status)
2658 goto err;
2659 }
2660
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002661 /* The VF's permanent mac queried from card is incorrect.
2662 * For BEx: Query the mac configued by the PF using if_handle
2663 * For Lancer: Get and use mac_list to obtain mac address.
2664 */
2665 if (!be_physfn(adapter)) {
2666 if (lancer_chip(adapter))
2667 status = be_configure_mac_from_list(adapter, mac);
2668 else
2669 status = be_cmd_mac_addr_query(adapter, mac,
2670 MAC_ADDRESS_TYPE_NETWORK, false,
2671 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002672 if (!status) {
2673 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2674 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2675 }
2676 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002677
Sathya Perla04b71172011-09-27 13:30:27 -04002678 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002679
Sathya Perlaa54769f2011-10-24 02:45:00 +00002680 status = be_vid_config(adapter, false, 0);
2681 if (status)
2682 goto err;
2683
2684 be_set_rx_mode(adapter->netdev);
2685
2686 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002687 /* For Lancer: It is legal for this cmd to fail on VF */
2688 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002689 goto err;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002690
Sathya Perlaa54769f2011-10-24 02:45:00 +00002691 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2692 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2693 adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002694 /* For Lancer: It is legal for this cmd to fail on VF */
2695 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002696 goto err;
2697 }
2698
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002699 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002700
Sathya Perla11ac75e2011-12-13 00:58:50 +00002701 if (sriov_enabled(adapter)) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002702 status = be_vf_setup(adapter);
2703 if (status)
2704 goto err;
2705 }
2706
2707 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002708err:
2709 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002710 return status;
2711}
2712
Ivan Vecera66268732011-12-08 01:31:21 +00002713#ifdef CONFIG_NET_POLL_CONTROLLER
2714static void be_netpoll(struct net_device *netdev)
2715{
2716 struct be_adapter *adapter = netdev_priv(netdev);
2717 struct be_rx_obj *rxo;
2718 int i;
2719
2720 event_handle(adapter, &adapter->tx_eq, false);
2721 for_all_rx_queues(adapter, rxo, i)
2722 event_handle(adapter, &rxo->rx_eq, true);
2723}
2724#endif
2725
Ajit Khaparde84517482009-09-04 03:12:16 +00002726#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002727static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002728 const u8 *p, u32 img_start, int image_size,
2729 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002730{
2731 u32 crc_offset;
2732 u8 flashed_crc[4];
2733 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002734
2735 crc_offset = hdr_size + img_start + image_size - 4;
2736
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002737 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002738
2739 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002740 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002741 if (status) {
2742 dev_err(&adapter->pdev->dev,
2743 "could not get crc from flash, not flashing redboot\n");
2744 return false;
2745 }
2746
2747 /*update redboot only if crc does not match*/
2748 if (!memcmp(flashed_crc, p, 4))
2749 return false;
2750 else
2751 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002752}
2753
Sathya Perla306f1342011-08-02 19:57:45 +00002754static bool phy_flashing_required(struct be_adapter *adapter)
2755{
2756 int status = 0;
2757 struct be_phy_info phy_info;
2758
2759 status = be_cmd_get_phy_info(adapter, &phy_info);
2760 if (status)
2761 return false;
2762 if ((phy_info.phy_type == TN_8022) &&
2763 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2764 return true;
2765 }
2766 return false;
2767}
2768
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002769static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002770 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002771 struct be_dma_mem *flash_cmd, int num_of_images)
2772
Ajit Khaparde84517482009-09-04 03:12:16 +00002773{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002774 int status = 0, i, filehdr_size = 0;
2775 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002776 int num_bytes;
2777 const u8 *p = fw->data;
2778 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002779 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002780 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002781
Sathya Perla306f1342011-08-02 19:57:45 +00002782 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002783 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2784 FLASH_IMAGE_MAX_SIZE_g3},
2785 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2786 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2787 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2788 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2789 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2790 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2791 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2792 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2793 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2794 FLASH_IMAGE_MAX_SIZE_g3},
2795 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2796 FLASH_IMAGE_MAX_SIZE_g3},
2797 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002798 FLASH_IMAGE_MAX_SIZE_g3},
2799 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002800 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2801 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2802 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002803 };
Joe Perches215faf92010-12-21 02:16:10 -08002804 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002805 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2806 FLASH_IMAGE_MAX_SIZE_g2},
2807 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2808 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2809 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2810 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2811 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2812 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2813 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2814 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2815 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2816 FLASH_IMAGE_MAX_SIZE_g2},
2817 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2818 FLASH_IMAGE_MAX_SIZE_g2},
2819 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2820 FLASH_IMAGE_MAX_SIZE_g2}
2821 };
2822
2823 if (adapter->generation == BE_GEN3) {
2824 pflashcomp = gen3_flash_types;
2825 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002826 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002827 } else {
2828 pflashcomp = gen2_flash_types;
2829 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002830 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002831 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002832 for (i = 0; i < num_comp; i++) {
2833 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2834 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2835 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002836 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2837 if (!phy_flashing_required(adapter))
2838 continue;
2839 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002840 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2841 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002842 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2843 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002844 continue;
2845 p = fw->data;
2846 p += filehdr_size + pflashcomp[i].offset
2847 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002848 if (p + pflashcomp[i].size > fw->data + fw->size)
2849 return -1;
2850 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002851 while (total_bytes) {
2852 if (total_bytes > 32*1024)
2853 num_bytes = 32*1024;
2854 else
2855 num_bytes = total_bytes;
2856 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002857 if (!total_bytes) {
2858 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2859 flash_op = FLASHROM_OPER_PHY_FLASH;
2860 else
2861 flash_op = FLASHROM_OPER_FLASH;
2862 } else {
2863 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2864 flash_op = FLASHROM_OPER_PHY_SAVE;
2865 else
2866 flash_op = FLASHROM_OPER_SAVE;
2867 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002868 memcpy(req->params.data_buf, p, num_bytes);
2869 p += num_bytes;
2870 status = be_cmd_write_flashrom(adapter, flash_cmd,
2871 pflashcomp[i].optype, flash_op, num_bytes);
2872 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002873 if ((status == ILLEGAL_IOCTL_REQ) &&
2874 (pflashcomp[i].optype ==
2875 IMG_TYPE_PHY_FW))
2876 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002877 dev_err(&adapter->pdev->dev,
2878 "cmd to write to flash rom failed.\n");
2879 return -1;
2880 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002881 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002882 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002883 return 0;
2884}
2885
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002886static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2887{
2888 if (fhdr == NULL)
2889 return 0;
2890 if (fhdr->build[0] == '3')
2891 return BE_GEN3;
2892 else if (fhdr->build[0] == '2')
2893 return BE_GEN2;
2894 else
2895 return 0;
2896}
2897
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002898static int lancer_fw_download(struct be_adapter *adapter,
2899 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002900{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002901#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2902#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2903 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002904 const u8 *data_ptr = NULL;
2905 u8 *dest_image_ptr = NULL;
2906 size_t image_size = 0;
2907 u32 chunk_size = 0;
2908 u32 data_written = 0;
2909 u32 offset = 0;
2910 int status = 0;
2911 u8 add_status = 0;
2912
2913 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2914 dev_err(&adapter->pdev->dev,
2915 "FW Image not properly aligned. "
2916 "Length must be 4 byte aligned.\n");
2917 status = -EINVAL;
2918 goto lancer_fw_exit;
2919 }
2920
2921 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2922 + LANCER_FW_DOWNLOAD_CHUNK;
2923 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2924 &flash_cmd.dma, GFP_KERNEL);
2925 if (!flash_cmd.va) {
2926 status = -ENOMEM;
2927 dev_err(&adapter->pdev->dev,
2928 "Memory allocation failure while flashing\n");
2929 goto lancer_fw_exit;
2930 }
2931
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002932 dest_image_ptr = flash_cmd.va +
2933 sizeof(struct lancer_cmd_req_write_object);
2934 image_size = fw->size;
2935 data_ptr = fw->data;
2936
2937 while (image_size) {
2938 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2939
2940 /* Copy the image chunk content. */
2941 memcpy(dest_image_ptr, data_ptr, chunk_size);
2942
2943 status = lancer_cmd_write_object(adapter, &flash_cmd,
2944 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2945 &data_written, &add_status);
2946
2947 if (status)
2948 break;
2949
2950 offset += data_written;
2951 data_ptr += data_written;
2952 image_size -= data_written;
2953 }
2954
2955 if (!status) {
2956 /* Commit the FW written */
2957 status = lancer_cmd_write_object(adapter, &flash_cmd,
2958 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2959 &data_written, &add_status);
2960 }
2961
2962 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2963 flash_cmd.dma);
2964 if (status) {
2965 dev_err(&adapter->pdev->dev,
2966 "Firmware load error. "
2967 "Status code: 0x%x Additional Status: 0x%x\n",
2968 status, add_status);
2969 goto lancer_fw_exit;
2970 }
2971
2972 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2973lancer_fw_exit:
2974 return status;
2975}
2976
2977static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2978{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002979 struct flash_file_hdr_g2 *fhdr;
2980 struct flash_file_hdr_g3 *fhdr3;
2981 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002982 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002983 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002984 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002985
2986 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002987 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002988
Ajit Khaparde84517482009-09-04 03:12:16 +00002989 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002990 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2991 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002992 if (!flash_cmd.va) {
2993 status = -ENOMEM;
2994 dev_err(&adapter->pdev->dev,
2995 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002996 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002997 }
2998
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002999 if ((adapter->generation == BE_GEN3) &&
3000 (get_ufigen_type(fhdr) == BE_GEN3)) {
3001 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003002 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3003 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003004 img_hdr_ptr = (struct image_hdr *) (fw->data +
3005 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003006 i * sizeof(struct image_hdr)));
3007 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3008 status = be_flash_data(adapter, fw, &flash_cmd,
3009 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003010 }
3011 } else if ((adapter->generation == BE_GEN2) &&
3012 (get_ufigen_type(fhdr) == BE_GEN2)) {
3013 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3014 } else {
3015 dev_err(&adapter->pdev->dev,
3016 "UFI and Interface are not compatible for flashing\n");
3017 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003018 }
3019
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003020 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3021 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003022 if (status) {
3023 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003024 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003025 }
3026
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003027 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003028
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003029be_fw_exit:
3030 return status;
3031}
3032
3033int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3034{
3035 const struct firmware *fw;
3036 int status;
3037
3038 if (!netif_running(adapter->netdev)) {
3039 dev_err(&adapter->pdev->dev,
3040 "Firmware load not allowed (interface is down)\n");
3041 return -1;
3042 }
3043
3044 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3045 if (status)
3046 goto fw_exit;
3047
3048 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3049
3050 if (lancer_chip(adapter))
3051 status = lancer_fw_download(adapter, fw);
3052 else
3053 status = be_fw_download(adapter, fw);
3054
Ajit Khaparde84517482009-09-04 03:12:16 +00003055fw_exit:
3056 release_firmware(fw);
3057 return status;
3058}
3059
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003060static struct net_device_ops be_netdev_ops = {
3061 .ndo_open = be_open,
3062 .ndo_stop = be_close,
3063 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003064 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003065 .ndo_set_mac_address = be_mac_addr_set,
3066 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003067 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003068 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003069 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3070 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003071 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003072 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003073 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003074 .ndo_get_vf_config = be_get_vf_config,
3075#ifdef CONFIG_NET_POLL_CONTROLLER
3076 .ndo_poll_controller = be_netpoll,
3077#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003078};
3079
3080static void be_netdev_init(struct net_device *netdev)
3081{
3082 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07003083 struct be_rx_obj *rxo;
3084 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003085
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003086 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003087 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3088 NETIF_F_HW_VLAN_TX;
3089 if (be_multi_rxq(adapter))
3090 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003091
3092 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003093 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003094
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003095 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003096 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003097
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003098 netdev->flags |= IFF_MULTICAST;
3099
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003100 netif_set_gso_max_size(netdev, 65535);
3101
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003102 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3103
3104 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3105
Sathya Perla3abcded2010-10-03 22:12:27 -07003106 for_all_rx_queues(adapter, rxo, i)
3107 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3108 BE_NAPI_WEIGHT);
3109
Sathya Perla5fb379e2009-06-18 00:02:59 +00003110 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003111 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003112}
3113
3114static void be_unmap_pci_bars(struct be_adapter *adapter)
3115{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003116 if (adapter->csr)
3117 iounmap(adapter->csr);
3118 if (adapter->db)
3119 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003120}
3121
3122static int be_map_pci_bars(struct be_adapter *adapter)
3123{
3124 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003125 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003126
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003127 if (lancer_chip(adapter)) {
3128 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3129 pci_resource_len(adapter->pdev, 0));
3130 if (addr == NULL)
3131 return -ENOMEM;
3132 adapter->db = addr;
3133 return 0;
3134 }
3135
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003136 if (be_physfn(adapter)) {
3137 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3138 pci_resource_len(adapter->pdev, 2));
3139 if (addr == NULL)
3140 return -ENOMEM;
3141 adapter->csr = addr;
3142 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003143
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003144 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003145 db_reg = 4;
3146 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003147 if (be_physfn(adapter))
3148 db_reg = 4;
3149 else
3150 db_reg = 0;
3151 }
3152 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3153 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003154 if (addr == NULL)
3155 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003156 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003157
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003158 return 0;
3159pci_map_err:
3160 be_unmap_pci_bars(adapter);
3161 return -ENOMEM;
3162}
3163
3164
3165static void be_ctrl_cleanup(struct be_adapter *adapter)
3166{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003167 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003168
3169 be_unmap_pci_bars(adapter);
3170
3171 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003172 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3173 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003174
Sathya Perla5b8821b2011-08-02 19:57:44 +00003175 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003176 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003177 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3178 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003179}
3180
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003181static int be_ctrl_init(struct be_adapter *adapter)
3182{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003183 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3184 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003185 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003186 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003187
3188 status = be_map_pci_bars(adapter);
3189 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003190 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003191
3192 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003193 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3194 mbox_mem_alloc->size,
3195 &mbox_mem_alloc->dma,
3196 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003197 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003198 status = -ENOMEM;
3199 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003200 }
3201 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3202 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3203 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3204 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003205
Sathya Perla5b8821b2011-08-02 19:57:44 +00003206 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3207 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3208 &rx_filter->dma, GFP_KERNEL);
3209 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003210 status = -ENOMEM;
3211 goto free_mbox;
3212 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003213 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003214
Ivan Vecera29849612010-12-14 05:43:19 +00003215 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003216 spin_lock_init(&adapter->mcc_lock);
3217 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003218
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003219 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003220 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003221 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003222
3223free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003224 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3225 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003226
3227unmap_pci_bars:
3228 be_unmap_pci_bars(adapter);
3229
3230done:
3231 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003232}
3233
3234static void be_stats_cleanup(struct be_adapter *adapter)
3235{
Sathya Perla3abcded2010-10-03 22:12:27 -07003236 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003237
3238 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003239 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3240 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003241}
3242
3243static int be_stats_init(struct be_adapter *adapter)
3244{
Sathya Perla3abcded2010-10-03 22:12:27 -07003245 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003246
Selvin Xavier005d5692011-05-16 07:36:35 +00003247 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003248 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003249 } else {
3250 if (lancer_chip(adapter))
3251 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3252 else
3253 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3254 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003255 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3256 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003257 if (cmd->va == NULL)
3258 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003259 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003260 return 0;
3261}
3262
3263static void __devexit be_remove(struct pci_dev *pdev)
3264{
3265 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003266
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003267 if (!adapter)
3268 return;
3269
Somnath Koturf203af72010-10-25 23:01:03 +00003270 cancel_delayed_work_sync(&adapter->work);
3271
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003272 unregister_netdev(adapter->netdev);
3273
Sathya Perla5fb379e2009-06-18 00:02:59 +00003274 be_clear(adapter);
3275
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003276 be_stats_cleanup(adapter);
3277
3278 be_ctrl_cleanup(adapter);
3279
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003280 be_sriov_disable(adapter);
3281
Sathya Perla8d56ff12009-11-22 22:02:26 +00003282 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003283
3284 pci_set_drvdata(pdev, NULL);
3285 pci_release_regions(pdev);
3286 pci_disable_device(pdev);
3287
3288 free_netdev(adapter->netdev);
3289}
3290
Sathya Perla2243e2e2009-11-22 22:02:03 +00003291static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003292{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003293 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003294
Sathya Perla3abcded2010-10-03 22:12:27 -07003295 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3296 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003297 if (status)
3298 return status;
3299
Sathya Perla752961a2011-10-24 02:45:03 +00003300 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003301 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3302 else
3303 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3304
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003305 status = be_cmd_get_cntl_attributes(adapter);
3306 if (status)
3307 return status;
3308
Sathya Perla2243e2e2009-11-22 22:02:03 +00003309 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003310}
3311
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003312static int be_dev_family_check(struct be_adapter *adapter)
3313{
3314 struct pci_dev *pdev = adapter->pdev;
3315 u32 sli_intf = 0, if_type;
3316
3317 switch (pdev->device) {
3318 case BE_DEVICE_ID1:
3319 case OC_DEVICE_ID1:
3320 adapter->generation = BE_GEN2;
3321 break;
3322 case BE_DEVICE_ID2:
3323 case OC_DEVICE_ID2:
Ajit Khapardeecedb6a2011-12-15 06:31:38 +00003324 case OC_DEVICE_ID5:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003325 adapter->generation = BE_GEN3;
3326 break;
3327 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003328 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003329 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3330 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3331 SLI_INTF_IF_TYPE_SHIFT;
3332
3333 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3334 if_type != 0x02) {
3335 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3336 return -EINVAL;
3337 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003338 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3339 SLI_INTF_FAMILY_SHIFT);
3340 adapter->generation = BE_GEN3;
3341 break;
3342 default:
3343 adapter->generation = 0;
3344 }
3345 return 0;
3346}
3347
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003348static int lancer_wait_ready(struct be_adapter *adapter)
3349{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003350#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003351 u32 sliport_status;
3352 int status = 0, i;
3353
3354 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3355 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3356 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3357 break;
3358
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003359 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003360 }
3361
3362 if (i == SLIPORT_READY_TIMEOUT)
3363 status = -1;
3364
3365 return status;
3366}
3367
3368static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3369{
3370 int status;
3371 u32 sliport_status, err, reset_needed;
3372 status = lancer_wait_ready(adapter);
3373 if (!status) {
3374 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3375 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3376 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3377 if (err && reset_needed) {
3378 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3379 adapter->db + SLIPORT_CONTROL_OFFSET);
3380
3381 /* check adapter has corrected the error */
3382 status = lancer_wait_ready(adapter);
3383 sliport_status = ioread32(adapter->db +
3384 SLIPORT_STATUS_OFFSET);
3385 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3386 SLIPORT_STATUS_RN_MASK);
3387 if (status || sliport_status)
3388 status = -1;
3389 } else if (err || reset_needed) {
3390 status = -1;
3391 }
3392 }
3393 return status;
3394}
3395
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003396static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3397{
3398 int status;
3399 u32 sliport_status;
3400
3401 if (adapter->eeh_err || adapter->ue_detected)
3402 return;
3403
3404 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3405
3406 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3407 dev_err(&adapter->pdev->dev,
3408 "Adapter in error state."
3409 "Trying to recover.\n");
3410
3411 status = lancer_test_and_set_rdy_state(adapter);
3412 if (status)
3413 goto err;
3414
3415 netif_device_detach(adapter->netdev);
3416
3417 if (netif_running(adapter->netdev))
3418 be_close(adapter->netdev);
3419
3420 be_clear(adapter);
3421
3422 adapter->fw_timeout = false;
3423
3424 status = be_setup(adapter);
3425 if (status)
3426 goto err;
3427
3428 if (netif_running(adapter->netdev)) {
3429 status = be_open(adapter->netdev);
3430 if (status)
3431 goto err;
3432 }
3433
3434 netif_device_attach(adapter->netdev);
3435
3436 dev_err(&adapter->pdev->dev,
3437 "Adapter error recovery succeeded\n");
3438 }
3439 return;
3440err:
3441 dev_err(&adapter->pdev->dev,
3442 "Adapter error recovery failed\n");
3443}
3444
3445static void be_worker(struct work_struct *work)
3446{
3447 struct be_adapter *adapter =
3448 container_of(work, struct be_adapter, work.work);
3449 struct be_rx_obj *rxo;
3450 int i;
3451
3452 if (lancer_chip(adapter))
3453 lancer_test_and_recover_fn_err(adapter);
3454
3455 be_detect_dump_ue(adapter);
3456
3457 /* when interrupts are not yet enabled, just reap any pending
3458 * mcc completions */
3459 if (!netif_running(adapter->netdev)) {
3460 int mcc_compl, status = 0;
3461
3462 mcc_compl = be_process_mcc(adapter, &status);
3463
3464 if (mcc_compl) {
3465 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3466 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3467 }
3468
3469 goto reschedule;
3470 }
3471
3472 if (!adapter->stats_cmd_sent) {
3473 if (lancer_chip(adapter))
3474 lancer_cmd_get_pport_stats(adapter,
3475 &adapter->stats_cmd);
3476 else
3477 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3478 }
3479
3480 for_all_rx_queues(adapter, rxo, i) {
3481 be_rx_eqd_update(adapter, rxo);
3482
3483 if (rxo->rx_post_starved) {
3484 rxo->rx_post_starved = false;
3485 be_post_rx_frags(rxo, GFP_KERNEL);
3486 }
3487 }
3488
3489reschedule:
3490 adapter->work_counter++;
3491 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3492}
3493
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003494static int __devinit be_probe(struct pci_dev *pdev,
3495 const struct pci_device_id *pdev_id)
3496{
3497 int status = 0;
3498 struct be_adapter *adapter;
3499 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003500
3501 status = pci_enable_device(pdev);
3502 if (status)
3503 goto do_none;
3504
3505 status = pci_request_regions(pdev, DRV_NAME);
3506 if (status)
3507 goto disable_dev;
3508 pci_set_master(pdev);
3509
Sathya Perla3c8def92011-06-12 20:01:58 +00003510 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003511 if (netdev == NULL) {
3512 status = -ENOMEM;
3513 goto rel_reg;
3514 }
3515 adapter = netdev_priv(netdev);
3516 adapter->pdev = pdev;
3517 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003518
3519 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003520 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003521 goto free_netdev;
3522
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003523 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003524 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003525
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003526 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003527 if (!status) {
3528 netdev->features |= NETIF_F_HIGHDMA;
3529 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003530 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003531 if (status) {
3532 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3533 goto free_netdev;
3534 }
3535 }
3536
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003537 status = be_sriov_enable(adapter);
3538 if (status)
3539 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003540
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003541 status = be_ctrl_init(adapter);
3542 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003543 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003544
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003545 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003546 status = lancer_wait_ready(adapter);
3547 if (!status) {
3548 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3549 adapter->db + SLIPORT_CONTROL_OFFSET);
3550 status = lancer_test_and_set_rdy_state(adapter);
3551 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003552 if (status) {
3553 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003554 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003555 }
3556 }
3557
Sathya Perla2243e2e2009-11-22 22:02:03 +00003558 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003559 if (be_physfn(adapter)) {
3560 status = be_cmd_POST(adapter);
3561 if (status)
3562 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003563 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003564
3565 /* tell fw we're ready to fire cmds */
3566 status = be_cmd_fw_init(adapter);
3567 if (status)
3568 goto ctrl_clean;
3569
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003570 status = be_cmd_reset_function(adapter);
3571 if (status)
3572 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003573
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003574 status = be_stats_init(adapter);
3575 if (status)
3576 goto ctrl_clean;
3577
Sathya Perla2243e2e2009-11-22 22:02:03 +00003578 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003579 if (status)
3580 goto stats_clean;
3581
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003582 /* The INTR bit may be set in the card when probed by a kdump kernel
3583 * after a crash.
3584 */
3585 if (!lancer_chip(adapter))
3586 be_intr_set(adapter, false);
3587
Sathya Perla3abcded2010-10-03 22:12:27 -07003588 be_msix_enable(adapter);
3589
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003590 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003591 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003592
Sathya Perla5fb379e2009-06-18 00:02:59 +00003593 status = be_setup(adapter);
3594 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003595 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003596
Sathya Perla3abcded2010-10-03 22:12:27 -07003597 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003598 status = register_netdev(netdev);
3599 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003600 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003601
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003602 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003603
Somnath Koturf203af72010-10-25 23:01:03 +00003604 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003605 return 0;
3606
Sathya Perla5fb379e2009-06-18 00:02:59 +00003607unsetup:
3608 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003609msix_disable:
3610 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003611stats_clean:
3612 be_stats_cleanup(adapter);
3613ctrl_clean:
3614 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003615disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003616 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003617free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003618 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003619 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003620rel_reg:
3621 pci_release_regions(pdev);
3622disable_dev:
3623 pci_disable_device(pdev);
3624do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003625 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003626 return status;
3627}
3628
3629static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3630{
3631 struct be_adapter *adapter = pci_get_drvdata(pdev);
3632 struct net_device *netdev = adapter->netdev;
3633
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003634 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003635 if (adapter->wol)
3636 be_setup_wol(adapter, true);
3637
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003638 netif_device_detach(netdev);
3639 if (netif_running(netdev)) {
3640 rtnl_lock();
3641 be_close(netdev);
3642 rtnl_unlock();
3643 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003644 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003645
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003646 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003647 pci_save_state(pdev);
3648 pci_disable_device(pdev);
3649 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3650 return 0;
3651}
3652
3653static int be_resume(struct pci_dev *pdev)
3654{
3655 int status = 0;
3656 struct be_adapter *adapter = pci_get_drvdata(pdev);
3657 struct net_device *netdev = adapter->netdev;
3658
3659 netif_device_detach(netdev);
3660
3661 status = pci_enable_device(pdev);
3662 if (status)
3663 return status;
3664
3665 pci_set_power_state(pdev, 0);
3666 pci_restore_state(pdev);
3667
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003668 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003669 /* tell fw we're ready to fire cmds */
3670 status = be_cmd_fw_init(adapter);
3671 if (status)
3672 return status;
3673
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003674 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003675 if (netif_running(netdev)) {
3676 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003677 be_open(netdev);
3678 rtnl_unlock();
3679 }
3680 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003681
3682 if (adapter->wol)
3683 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003684
3685 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003686 return 0;
3687}
3688
Sathya Perla82456b02010-02-17 01:35:37 +00003689/*
3690 * An FLR will stop BE from DMAing any data.
3691 */
3692static void be_shutdown(struct pci_dev *pdev)
3693{
3694 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003695
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003696 if (!adapter)
3697 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003698
Sathya Perla0f4a6822011-03-21 20:49:28 +00003699 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003700
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003701 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003702
Sathya Perla82456b02010-02-17 01:35:37 +00003703 if (adapter->wol)
3704 be_setup_wol(adapter, true);
3705
Ajit Khaparde57841862011-04-06 18:08:43 +00003706 be_cmd_reset_function(adapter);
3707
Sathya Perla82456b02010-02-17 01:35:37 +00003708 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003709}
3710
Sathya Perlacf588472010-02-14 21:22:01 +00003711static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3712 pci_channel_state_t state)
3713{
3714 struct be_adapter *adapter = pci_get_drvdata(pdev);
3715 struct net_device *netdev = adapter->netdev;
3716
3717 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3718
3719 adapter->eeh_err = true;
3720
3721 netif_device_detach(netdev);
3722
3723 if (netif_running(netdev)) {
3724 rtnl_lock();
3725 be_close(netdev);
3726 rtnl_unlock();
3727 }
3728 be_clear(adapter);
3729
3730 if (state == pci_channel_io_perm_failure)
3731 return PCI_ERS_RESULT_DISCONNECT;
3732
3733 pci_disable_device(pdev);
3734
3735 return PCI_ERS_RESULT_NEED_RESET;
3736}
3737
3738static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3739{
3740 struct be_adapter *adapter = pci_get_drvdata(pdev);
3741 int status;
3742
3743 dev_info(&adapter->pdev->dev, "EEH reset\n");
3744 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003745 adapter->ue_detected = false;
3746 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003747
3748 status = pci_enable_device(pdev);
3749 if (status)
3750 return PCI_ERS_RESULT_DISCONNECT;
3751
3752 pci_set_master(pdev);
3753 pci_set_power_state(pdev, 0);
3754 pci_restore_state(pdev);
3755
3756 /* Check if card is ok and fw is ready */
3757 status = be_cmd_POST(adapter);
3758 if (status)
3759 return PCI_ERS_RESULT_DISCONNECT;
3760
3761 return PCI_ERS_RESULT_RECOVERED;
3762}
3763
3764static void be_eeh_resume(struct pci_dev *pdev)
3765{
3766 int status = 0;
3767 struct be_adapter *adapter = pci_get_drvdata(pdev);
3768 struct net_device *netdev = adapter->netdev;
3769
3770 dev_info(&adapter->pdev->dev, "EEH resume\n");
3771
3772 pci_save_state(pdev);
3773
3774 /* tell fw we're ready to fire cmds */
3775 status = be_cmd_fw_init(adapter);
3776 if (status)
3777 goto err;
3778
3779 status = be_setup(adapter);
3780 if (status)
3781 goto err;
3782
3783 if (netif_running(netdev)) {
3784 status = be_open(netdev);
3785 if (status)
3786 goto err;
3787 }
3788 netif_device_attach(netdev);
3789 return;
3790err:
3791 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003792}
3793
3794static struct pci_error_handlers be_eeh_handlers = {
3795 .error_detected = be_eeh_err_detected,
3796 .slot_reset = be_eeh_reset,
3797 .resume = be_eeh_resume,
3798};
3799
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003800static struct pci_driver be_driver = {
3801 .name = DRV_NAME,
3802 .id_table = be_dev_ids,
3803 .probe = be_probe,
3804 .remove = be_remove,
3805 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003806 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003807 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003808 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003809};
3810
3811static int __init be_init_module(void)
3812{
Joe Perches8e95a202009-12-03 07:58:21 +00003813 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3814 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003815 printk(KERN_WARNING DRV_NAME
3816 " : Module param rx_frag_size must be 2048/4096/8192."
3817 " Using 2048\n");
3818 rx_frag_size = 2048;
3819 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003820
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003821 return pci_register_driver(&be_driver);
3822}
3823module_init(be_init_module);
3824
3825static void __exit be_exit_module(void)
3826{
3827 pci_unregister_driver(&be_driver);
3828}
3829module_exit(be_exit_module);