blob: 32a5b1100114d461ad26c1f37de78dd19b9c942e [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070019#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000020#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070021#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070022
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
Sathya Perla2e588f82011-03-11 02:49:26 +000029static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000031module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070037 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070038 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000041 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000042 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070043 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000046/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070047static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000048 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070082static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000083 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700106 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700116
117static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118{
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700123}
124
125static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140}
141
Sathya Perla8788fdc2009-07-27 22:52:03 +0000142static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700143{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000144 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700145 u32 reg = ioread32(addr);
146 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000147
Sathya Perlacf588472010-02-14 21:22:01 +0000148 if (adapter->eeh_err)
149 return;
150
Sathya Perla5f0b8492009-07-27 22:52:56 +0000151 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158 iowrite32(reg, addr);
159}
160
Sathya Perla8788fdc2009-07-27 22:52:03 +0000161static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162{
163 u32 val = 0;
164 val |= qid & DB_RQ_RING_ID_MASK;
165 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000166
167 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000168 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Sathya Perla8788fdc2009-07-27 22:52:03 +0000171static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172{
173 u32 val = 0;
174 val |= qid & DB_TXULP_RING_ID_MASK;
175 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000176
177 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000178 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700179}
180
Sathya Perla8788fdc2009-07-27 22:52:03 +0000181static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182 bool arm, bool clear_int, u16 num_popped)
183{
184 u32 val = 0;
185 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000186 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000188
189 if (adapter->eeh_err)
190 return;
191
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 if (arm)
193 val |= 1 << DB_EQ_REARM_SHIFT;
194 if (clear_int)
195 val |= 1 << DB_EQ_CLR_SHIFT;
196 val |= 1 << DB_EQ_EVNT_SHIFT;
197 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199}
200
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
204 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000205 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000207
208 if (adapter->eeh_err)
209 return;
210
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211 if (arm)
212 val |= 1 << DB_CQ_REARM_SHIFT;
213 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215}
216
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217static int be_mac_addr_set(struct net_device *netdev, void *p)
218{
219 struct be_adapter *adapter = netdev_priv(netdev);
220 struct sockaddr *addr = p;
221 int status = 0;
222
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000223 if (!is_valid_ether_addr(addr->sa_data))
224 return -EADDRNOTAVAIL;
225
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000226 /* MAC addr configuration will be done in hardware for VFs
227 * by their corresponding PFs. Just copy to netdev addr here
228 */
229 if (!be_physfn(adapter))
230 goto netdev_addr;
231
Ajit Khapardef8617e02011-02-11 13:36:37 +0000232 status = be_cmd_pmac_del(adapter, adapter->if_handle,
233 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000234 if (status)
235 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236
Sathya Perlaa65027e2009-08-17 00:58:04 +0000237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000238 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000239netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243 return status;
244}
245
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000246static void populate_be2_stats(struct be_adapter *adapter)
247{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000248 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
249 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
250 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000251 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000252 &rxf_stats->port[adapter->port_num];
253 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000254
Sathya Perlaac124ff2011-07-25 19:10:14 +0000255 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000256 drvs->rx_pause_frames = port_stats->rx_pause_frames;
257 drvs->rx_crc_errors = port_stats->rx_crc_errors;
258 drvs->rx_control_frames = port_stats->rx_control_frames;
259 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000271 drvs->rx_dropped_header_too_small =
272 port_stats->rx_dropped_header_too_small;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_alignment_symbol_errors =
275 port_stats->rx_alignment_symbol_errors;
276
277 drvs->tx_pauseframes = port_stats->tx_pauseframes;
278 drvs->tx_controlframes = port_stats->tx_controlframes;
279
280 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000281 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000282 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000283 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000284 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
285 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
286 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
287 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
288 drvs->forwarded_packets = rxf_stats->forwarded_packets;
289 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000290 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
291 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000292 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
293}
294
295static void populate_be3_stats(struct be_adapter *adapter)
296{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000297 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
298 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
299 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000300 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 &rxf_stats->port[adapter->port_num];
302 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000303
Sathya Perlaac124ff2011-07-25 19:10:14 +0000304 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_pause_frames = port_stats->rx_pause_frames;
306 drvs->rx_crc_errors = port_stats->rx_crc_errors;
307 drvs->rx_control_frames = port_stats->rx_control_frames;
308 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
309 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
310 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
311 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
312 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
313 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
314 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
315 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
316 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
317 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
318 drvs->rx_dropped_header_too_small =
319 port_stats->rx_dropped_header_too_small;
320 drvs->rx_input_fifo_overflow_drop =
321 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000323 drvs->rx_alignment_symbol_errors =
324 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000325 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000326 drvs->tx_pauseframes = port_stats->tx_pauseframes;
327 drvs->tx_controlframes = port_stats->tx_controlframes;
328 drvs->jabber_events = port_stats->jabber_events;
329 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
330 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
331 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
332 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
333 drvs->forwarded_packets = rxf_stats->forwarded_packets;
334 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
336 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000337 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
338}
339
Selvin Xavier005d5692011-05-16 07:36:35 +0000340static void populate_lancer_stats(struct be_adapter *adapter)
341{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000342
Selvin Xavier005d5692011-05-16 07:36:35 +0000343 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000344 struct lancer_pport_stats *pport_stats =
345 pport_stats_from_cmd(adapter);
346
347 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
348 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
349 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
350 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000351 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000352 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000353 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
354 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
355 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
356 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
357 drvs->rx_dropped_tcp_length =
358 pport_stats->rx_dropped_invalid_tcp_length;
359 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
360 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
361 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
362 drvs->rx_dropped_header_too_small =
363 pport_stats->rx_dropped_header_too_small;
364 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
365 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000367 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
369 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000370 drvs->jabber_events = pport_stats->rx_jabbers;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000372 drvs->forwarded_packets = pport_stats->num_forwards_lo;
373 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000374 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000375 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000376}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000377
378void be_parse_stats(struct be_adapter *adapter)
379{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
381 struct be_rx_obj *rxo;
382 int i;
383
Selvin Xavier005d5692011-05-16 07:36:35 +0000384 if (adapter->generation == BE_GEN3) {
385 if (lancer_chip(adapter))
386 populate_lancer_stats(adapter);
387 else
388 populate_be3_stats(adapter);
389 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000391 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000392
393 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
394 for_all_rx_queues(adapter, rxo, i)
395 rx_stats(rxo)->rx_drops_no_frags =
396 erx->rx_drops_no_fragments[rxo->q.id];
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397}
398
Sathya Perlaab1594e2011-07-25 19:10:15 +0000399static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
400 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700401{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000402 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000403 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700404 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000405 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000406 u64 pkts, bytes;
407 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700408 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700409
Sathya Perla3abcded2010-10-03 22:12:27 -0700410 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000411 const struct be_rx_stats *rx_stats = rx_stats(rxo);
412 do {
413 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
414 pkts = rx_stats(rxo)->rx_pkts;
415 bytes = rx_stats(rxo)->rx_bytes;
416 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
417 stats->rx_packets += pkts;
418 stats->rx_bytes += bytes;
419 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
420 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
421 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700422 }
423
Sathya Perla3c8def92011-06-12 20:01:58 +0000424 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000425 const struct be_tx_stats *tx_stats = tx_stats(txo);
426 do {
427 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
428 pkts = tx_stats(txo)->tx_pkts;
429 bytes = tx_stats(txo)->tx_bytes;
430 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
431 stats->tx_packets += pkts;
432 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000433 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700434
435 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000436 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 drvs->rx_alignment_symbol_errors +
438 drvs->rx_in_range_errors +
439 drvs->rx_out_range_errors +
440 drvs->rx_frame_too_long +
441 drvs->rx_dropped_too_small +
442 drvs->rx_dropped_too_short +
443 drvs->rx_dropped_header_too_small +
444 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000445 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700446
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700447 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000448 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000449 drvs->rx_out_range_errors +
450 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000451
Sathya Perlaab1594e2011-07-25 19:10:15 +0000452 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700453
454 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000455 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000456
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700457 /* receiver fifo overrun */
458 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000459 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000460 drvs->rx_input_fifo_overflow_drop +
461 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000462 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700463}
464
Sathya Perlaea172a02011-08-02 19:57:42 +0000465void be_link_status_update(struct be_adapter *adapter, u32 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700466{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700467 struct net_device *netdev = adapter->netdev;
468
Sathya Perlaea172a02011-08-02 19:57:42 +0000469 /* when link status changes, link speed must be re-queried from card */
470 adapter->link_speed = -1;
471 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
472 netif_carrier_on(netdev);
473 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
474 } else {
475 netif_carrier_off(netdev);
476 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700477 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700478}
479
Sathya Perla3c8def92011-06-12 20:01:58 +0000480static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000481 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700482{
Sathya Perla3c8def92011-06-12 20:01:58 +0000483 struct be_tx_stats *stats = tx_stats(txo);
484
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000486 stats->tx_reqs++;
487 stats->tx_wrbs += wrb_cnt;
488 stats->tx_bytes += copied;
489 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700490 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000491 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493}
494
495/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000496static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
497 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700499 int cnt = (skb->len > skb->data_len);
500
501 cnt += skb_shinfo(skb)->nr_frags;
502
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503 /* to account for hdr wrb */
504 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000505 if (lancer_chip(adapter) || !(cnt & 1)) {
506 *dummy = false;
507 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700508 /* add a dummy to make it an even num */
509 cnt++;
510 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000511 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
513 return cnt;
514}
515
516static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
517{
518 wrb->frag_pa_hi = upper_32_bits(addr);
519 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
520 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
521}
522
Somnath Koturcc4ce022010-10-21 07:11:14 -0700523static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
524 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700525{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700526 u8 vlan_prio = 0;
527 u16 vlan_tag = 0;
528
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700529 memset(hdr, 0, sizeof(*hdr));
530
531 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
532
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000533 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700534 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
535 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
536 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000537 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000538 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000539 if (lancer_chip(adapter) && adapter->sli_family ==
540 LANCER_A0_SLI_FAMILY) {
541 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
542 if (is_tcp_pkt(skb))
543 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
544 tcpcs, hdr, 1);
545 else if (is_udp_pkt(skb))
546 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
547 udpcs, hdr, 1);
548 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700549 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
550 if (is_tcp_pkt(skb))
551 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
552 else if (is_udp_pkt(skb))
553 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
554 }
555
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700556 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700557 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700558 vlan_tag = vlan_tx_tag_get(skb);
559 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
560 /* If vlan priority provided by OS is NOT in available bmap */
561 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
562 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
563 adapter->recommended_prio;
564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700565 }
566
567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
568 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
569 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
571}
572
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000573static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000574 bool unmap_single)
575{
576 dma_addr_t dma;
577
578 be_dws_le_to_cpu(wrb, sizeof(*wrb));
579
580 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000581 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000582 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000583 dma_unmap_single(dev, dma, wrb->frag_len,
584 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000585 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000586 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000587 }
588}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700589
Sathya Perla3c8def92011-06-12 20:01:58 +0000590static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
592{
Sathya Perla7101e112010-03-22 20:41:12 +0000593 dma_addr_t busaddr;
594 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000595 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597 struct be_eth_wrb *wrb;
598 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000599 bool map_single = false;
600 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700601
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700602 hdr = queue_head_node(txq);
603 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000604 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605
David S. Millerebc8d2a2009-06-09 01:01:31 -0700606 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700607 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000608 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
609 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000610 goto dma_err;
611 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700612 wrb = queue_head_node(txq);
613 wrb_fill(wrb, busaddr, len);
614 be_dws_cpu_to_le(wrb, sizeof(*wrb));
615 queue_head_inc(txq);
616 copied += len;
617 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618
David S. Millerebc8d2a2009-06-09 01:01:31 -0700619 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
620 struct skb_frag_struct *frag =
621 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000622 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
623 frag->size, DMA_TO_DEVICE);
624 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000625 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700626 wrb = queue_head_node(txq);
627 wrb_fill(wrb, busaddr, frag->size);
628 be_dws_cpu_to_le(wrb, sizeof(*wrb));
629 queue_head_inc(txq);
630 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631 }
632
633 if (dummy_wrb) {
634 wrb = queue_head_node(txq);
635 wrb_fill(wrb, 0, 0);
636 be_dws_cpu_to_le(wrb, sizeof(*wrb));
637 queue_head_inc(txq);
638 }
639
Somnath Koturcc4ce022010-10-21 07:11:14 -0700640 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 be_dws_cpu_to_le(hdr, sizeof(*hdr));
642
643 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000644dma_err:
645 txq->head = map_head;
646 while (copied) {
647 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000648 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000649 map_single = false;
650 copied -= wrb->frag_len;
651 queue_head_inc(txq);
652 }
653 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700654}
655
Stephen Hemminger613573252009-08-31 19:50:58 +0000656static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700657 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658{
659 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000660 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
661 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662 u32 wrb_cnt = 0, copied = 0;
663 u32 start = txq->head;
664 bool dummy_wrb, stopped = false;
665
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000666 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700667
Sathya Perla3c8def92011-06-12 20:01:58 +0000668 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000669 if (copied) {
670 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000671 BUG_ON(txo->sent_skb_list[start]);
672 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000674 /* Ensure txq has space for the next skb; Else stop the queue
675 * *BEFORE* ringing the tx doorbell, so that we serialze the
676 * tx compls of the current transmit which'll wake up the queue
677 */
Sathya Perla7101e112010-03-22 20:41:12 +0000678 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000679 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
680 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000681 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000682 stopped = true;
683 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000685 be_txq_notify(adapter, txq->id, wrb_cnt);
686
Sathya Perla3c8def92011-06-12 20:01:58 +0000687 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000688 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000689 } else {
690 txq->head = start;
691 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693 return NETDEV_TX_OK;
694}
695
696static int be_change_mtu(struct net_device *netdev, int new_mtu)
697{
698 struct be_adapter *adapter = netdev_priv(netdev);
699 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000700 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
701 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702 dev_info(&adapter->pdev->dev,
703 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000704 BE_MIN_MTU,
705 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706 return -EINVAL;
707 }
708 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
709 netdev->mtu, new_mtu);
710 netdev->mtu = new_mtu;
711 return 0;
712}
713
714/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000715 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
716 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700717 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000718static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700720 u16 vtag[BE_NUM_VLANS_SUPPORTED];
721 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000722 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000723 u32 if_handle;
724
725 if (vf) {
726 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
727 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
728 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
729 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700730
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000731 /* No need to further configure vids if in promiscuous mode */
732 if (adapter->promiscuous)
733 return 0;
734
Ajit Khaparde82903e42010-02-09 01:34:57 +0000735 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000737 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700738 if (adapter->vlan_tag[i]) {
739 vtag[ntags] = cpu_to_le16(i);
740 ntags++;
741 }
742 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700743 status = be_cmd_vlan_config(adapter, adapter->if_handle,
744 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700745 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700746 status = be_cmd_vlan_config(adapter, adapter->if_handle,
747 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000749
Sathya Perlab31c50a2009-09-17 10:30:13 -0700750 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700751}
752
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
754{
755 struct be_adapter *adapter = netdev_priv(netdev);
756
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000757 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000758 if (!be_physfn(adapter))
759 return;
760
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000762 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000763 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700764}
765
766static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
767{
768 struct be_adapter *adapter = netdev_priv(netdev);
769
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000770 adapter->vlans_added--;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000771
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000772 if (!be_physfn(adapter))
773 return;
774
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700775 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000776 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000777 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700778}
779
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780static void be_set_multicast_list(struct net_device *netdev)
781{
782 struct be_adapter *adapter = netdev_priv(netdev);
783
784 if (netdev->flags & IFF_PROMISC) {
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000785 be_cmd_promiscuous_config(adapter, true);
Sathya Perla24307ee2009-06-18 00:09:25 +0000786 adapter->promiscuous = true;
787 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000789
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300790 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000791 if (adapter->promiscuous) {
792 adapter->promiscuous = false;
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000793 be_cmd_promiscuous_config(adapter, false);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000794
795 if (adapter->vlans_added)
796 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000797 }
798
Sathya Perlae7b909a2009-11-22 22:01:10 +0000799 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000800 if (netdev->flags & IFF_ALLMULTI ||
801 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000802 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000803 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000804 goto done;
805 }
806
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000807 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800808 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000809done:
810 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700811}
812
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000813static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
814{
815 struct be_adapter *adapter = netdev_priv(netdev);
816 int status;
817
818 if (!adapter->sriov_enabled)
819 return -EPERM;
820
821 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
822 return -EINVAL;
823
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000824 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
825 status = be_cmd_pmac_del(adapter,
826 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000827 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000828
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000829 status = be_cmd_pmac_add(adapter, mac,
830 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000831 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000832
833 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000834 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
835 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000836 else
837 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
838
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000839 return status;
840}
841
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000842static int be_get_vf_config(struct net_device *netdev, int vf,
843 struct ifla_vf_info *vi)
844{
845 struct be_adapter *adapter = netdev_priv(netdev);
846
847 if (!adapter->sriov_enabled)
848 return -EPERM;
849
850 if (vf >= num_vfs)
851 return -EINVAL;
852
853 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000854 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000855 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000856 vi->qos = 0;
857 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
858
859 return 0;
860}
861
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000862static int be_set_vf_vlan(struct net_device *netdev,
863 int vf, u16 vlan, u8 qos)
864{
865 struct be_adapter *adapter = netdev_priv(netdev);
866 int status = 0;
867
868 if (!adapter->sriov_enabled)
869 return -EPERM;
870
871 if ((vf >= num_vfs) || (vlan > 4095))
872 return -EINVAL;
873
874 if (vlan) {
875 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
876 adapter->vlans_added++;
877 } else {
878 adapter->vf_cfg[vf].vf_vlan_tag = 0;
879 adapter->vlans_added--;
880 }
881
882 status = be_vid_config(adapter, true, vf);
883
884 if (status)
885 dev_info(&adapter->pdev->dev,
886 "VLAN %d config on VF %d failed\n", vlan, vf);
887 return status;
888}
889
Ajit Khapardee1d18732010-07-23 01:52:13 +0000890static int be_set_vf_tx_rate(struct net_device *netdev,
891 int vf, int rate)
892{
893 struct be_adapter *adapter = netdev_priv(netdev);
894 int status = 0;
895
896 if (!adapter->sriov_enabled)
897 return -EPERM;
898
899 if ((vf >= num_vfs) || (rate < 0))
900 return -EINVAL;
901
902 if (rate > 10000)
903 rate = 10000;
904
905 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000906 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000907
908 if (status)
909 dev_info(&adapter->pdev->dev,
910 "tx rate %d on VF %d failed\n", rate, vf);
911 return status;
912}
913
Sathya Perlaac124ff2011-07-25 19:10:14 +0000914static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700915{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000916 struct be_eq_obj *rx_eq = &rxo->rx_eq;
917 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700918 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000919 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000920 u64 pkts;
921 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000922
923 if (!rx_eq->enable_aic)
924 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700925
Sathya Perla4097f662009-03-24 16:40:13 -0700926 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700927 if (time_before(now, stats->rx_jiffies)) {
928 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700929 return;
930 }
931
Sathya Perlaac124ff2011-07-25 19:10:14 +0000932 /* Update once a second */
933 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700934 return;
935
Sathya Perlaab1594e2011-07-25 19:10:15 +0000936 do {
937 start = u64_stats_fetch_begin_bh(&stats->sync);
938 pkts = stats->rx_pkts;
939 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
940
941 stats->rx_pps = (pkts - stats->rx_pkts_prev) / (delta / HZ);
942 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -0700943 stats->rx_jiffies = now;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000944 eqd = stats->rx_pps / 110000;
945 eqd = eqd << 3;
946 if (eqd > rx_eq->max_eqd)
947 eqd = rx_eq->max_eqd;
948 if (eqd < rx_eq->min_eqd)
949 eqd = rx_eq->min_eqd;
950 if (eqd < 10)
951 eqd = 0;
952 if (eqd != rx_eq->cur_eqd) {
953 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
954 rx_eq->cur_eqd = eqd;
955 }
Sathya Perla4097f662009-03-24 16:40:13 -0700956}
957
Sathya Perla3abcded2010-10-03 22:12:27 -0700958static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000959 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -0700960{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000961 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700962
Sathya Perlaab1594e2011-07-25 19:10:15 +0000963 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -0700964 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000965 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -0700966 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000967 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700968 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000969 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000970 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000971 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700972}
973
Sathya Perla2e588f82011-03-11 02:49:26 +0000974static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -0700975{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +0000976 /* L4 checksum is not reliable for non TCP/UDP packets.
977 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +0000978 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
979 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700980}
981
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700982static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -0700983get_rx_page_info(struct be_adapter *adapter,
984 struct be_rx_obj *rxo,
985 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700986{
987 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -0700988 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700989
Sathya Perla3abcded2010-10-03 22:12:27 -0700990 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700991 BUG_ON(!rx_page_info->page);
992
Ajit Khaparde205859a2010-02-09 01:34:21 +0000993 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000994 dma_unmap_page(&adapter->pdev->dev,
995 dma_unmap_addr(rx_page_info, bus),
996 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000997 rx_page_info->last_page_user = false;
998 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700999
1000 atomic_dec(&rxq->used);
1001 return rx_page_info;
1002}
1003
1004/* Throwaway the data in the Rx completion */
1005static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001006 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001007 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001008{
Sathya Perla3abcded2010-10-03 22:12:27 -07001009 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001010 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001011 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001012
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001013 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001014 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001015 put_page(page_info->page);
1016 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001017 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001018 }
1019}
1020
1021/*
1022 * skb_fill_rx_data forms a complete skb for an ether frame
1023 * indicated by rxcp.
1024 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001025static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001026 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001027{
Sathya Perla3abcded2010-10-03 22:12:27 -07001028 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001029 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001030 u16 i, j;
1031 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001032 u8 *start;
1033
Sathya Perla2e588f82011-03-11 02:49:26 +00001034 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001035 start = page_address(page_info->page) + page_info->page_offset;
1036 prefetch(start);
1037
1038 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001039 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001040
1041 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001042 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001043 memcpy(skb->data, start, hdr_len);
1044 skb->len = curr_frag_len;
1045 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1046 /* Complete packet has now been moved to data */
1047 put_page(page_info->page);
1048 skb->data_len = 0;
1049 skb->tail += curr_frag_len;
1050 } else {
1051 skb_shinfo(skb)->nr_frags = 1;
1052 skb_shinfo(skb)->frags[0].page = page_info->page;
1053 skb_shinfo(skb)->frags[0].page_offset =
1054 page_info->page_offset + hdr_len;
1055 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1056 skb->data_len = curr_frag_len - hdr_len;
1057 skb->tail += hdr_len;
1058 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001059 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001060
Sathya Perla2e588f82011-03-11 02:49:26 +00001061 if (rxcp->pkt_size <= rx_frag_size) {
1062 BUG_ON(rxcp->num_rcvd != 1);
1063 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001064 }
1065
1066 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001067 index_inc(&rxcp->rxq_idx, rxq->len);
1068 remaining = rxcp->pkt_size - curr_frag_len;
1069 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1070 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1071 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001072
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001073 /* Coalesce all frags from the same physical page in one slot */
1074 if (page_info->page_offset == 0) {
1075 /* Fresh page */
1076 j++;
1077 skb_shinfo(skb)->frags[j].page = page_info->page;
1078 skb_shinfo(skb)->frags[j].page_offset =
1079 page_info->page_offset;
1080 skb_shinfo(skb)->frags[j].size = 0;
1081 skb_shinfo(skb)->nr_frags++;
1082 } else {
1083 put_page(page_info->page);
1084 }
1085
1086 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001087 skb->len += curr_frag_len;
1088 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001089
Sathya Perla2e588f82011-03-11 02:49:26 +00001090 remaining -= curr_frag_len;
1091 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001092 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001094 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095}
1096
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001097/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001099 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001100 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001101{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001102 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001103 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001104
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001105 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001106 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001107 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001108 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001109 return;
1110 }
1111
Sathya Perla2e588f82011-03-11 02:49:26 +00001112 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001113
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001114 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001115 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001116 else
1117 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001118
1119 skb->truesize = skb->len + sizeof(struct sk_buff);
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001120 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001121 if (adapter->netdev->features & NETIF_F_RXHASH)
1122 skb->rxhash = rxcp->rss_hash;
1123
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001124
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001125 if (unlikely(rxcp->vlanf))
1126 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1127
1128 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001129}
1130
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001131/* Process the RX completion indicated by rxcp when GRO is enabled */
1132static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001133 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001134 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001135{
1136 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001137 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001138 struct be_queue_info *rxq = &rxo->q;
1139 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001140 u16 remaining, curr_frag_len;
1141 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001142
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001143 skb = napi_get_frags(&eq_obj->napi);
1144 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001145 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001146 return;
1147 }
1148
Sathya Perla2e588f82011-03-11 02:49:26 +00001149 remaining = rxcp->pkt_size;
1150 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1151 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001152
1153 curr_frag_len = min(remaining, rx_frag_size);
1154
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001155 /* Coalesce all frags from the same physical page in one slot */
1156 if (i == 0 || page_info->page_offset == 0) {
1157 /* First frag or Fresh page */
1158 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001159 skb_shinfo(skb)->frags[j].page = page_info->page;
1160 skb_shinfo(skb)->frags[j].page_offset =
1161 page_info->page_offset;
1162 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001163 } else {
1164 put_page(page_info->page);
1165 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001166 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001167
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001168 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001169 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170 memset(page_info, 0, sizeof(*page_info));
1171 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001172 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001174 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001175 skb->len = rxcp->pkt_size;
1176 skb->data_len = rxcp->pkt_size;
1177 skb->truesize += rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001178 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001179 if (adapter->netdev->features & NETIF_F_RXHASH)
1180 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001181
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001182 if (unlikely(rxcp->vlanf))
1183 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1184
1185 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001186}
1187
Sathya Perla2e588f82011-03-11 02:49:26 +00001188static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1189 struct be_eth_rx_compl *compl,
1190 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191{
Sathya Perla2e588f82011-03-11 02:49:26 +00001192 rxcp->pkt_size =
1193 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1194 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1195 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1196 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001197 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001198 rxcp->ip_csum =
1199 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1200 rxcp->l4_csum =
1201 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1202 rxcp->ipv6 =
1203 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1204 rxcp->rxq_idx =
1205 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1206 rxcp->num_rcvd =
1207 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1208 rxcp->pkt_type =
1209 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001210 rxcp->rss_hash =
1211 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001212 if (rxcp->vlanf) {
1213 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001214 compl);
1215 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1216 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001217 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001218}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001219
Sathya Perla2e588f82011-03-11 02:49:26 +00001220static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1221 struct be_eth_rx_compl *compl,
1222 struct be_rx_compl_info *rxcp)
1223{
1224 rxcp->pkt_size =
1225 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1226 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1227 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1228 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001229 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001230 rxcp->ip_csum =
1231 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1232 rxcp->l4_csum =
1233 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1234 rxcp->ipv6 =
1235 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1236 rxcp->rxq_idx =
1237 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1238 rxcp->num_rcvd =
1239 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1240 rxcp->pkt_type =
1241 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001242 rxcp->rss_hash =
1243 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001244 if (rxcp->vlanf) {
1245 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001246 compl);
1247 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1248 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001249 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001250}
1251
1252static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1253{
1254 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1255 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1256 struct be_adapter *adapter = rxo->adapter;
1257
1258 /* For checking the valid bit it is Ok to use either definition as the
1259 * valid bit is at the same position in both v0 and v1 Rx compl */
1260 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001261 return NULL;
1262
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001263 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001264 be_dws_le_to_cpu(compl, sizeof(*compl));
1265
1266 if (adapter->be3_native)
1267 be_parse_rx_compl_v1(adapter, compl, rxcp);
1268 else
1269 be_parse_rx_compl_v0(adapter, compl, rxcp);
1270
Sathya Perla15d72182011-03-21 20:49:26 +00001271 if (rxcp->vlanf) {
1272 /* vlanf could be wrongly set in some cards.
1273 * ignore if vtm is not set */
1274 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1275 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001276
Sathya Perla15d72182011-03-21 20:49:26 +00001277 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001278 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001279
David S. Miller3c709f82011-05-11 14:26:15 -04001280 if (((adapter->pvid & VLAN_VID_MASK) ==
1281 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1282 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001283 rxcp->vlanf = 0;
1284 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001285
1286 /* As the compl has been parsed, reset it; we wont touch it again */
1287 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001288
Sathya Perla3abcded2010-10-03 22:12:27 -07001289 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001290 return rxcp;
1291}
1292
Eric Dumazet1829b082011-03-01 05:48:12 +00001293static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001294{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001295 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001296
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001297 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001298 gfp |= __GFP_COMP;
1299 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001300}
1301
1302/*
1303 * Allocate a page, split it to fragments of size rx_frag_size and post as
1304 * receive buffers to BE
1305 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001306static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001307{
Sathya Perla3abcded2010-10-03 22:12:27 -07001308 struct be_adapter *adapter = rxo->adapter;
1309 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001310 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001311 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001312 struct page *pagep = NULL;
1313 struct be_eth_rx_d *rxd;
1314 u64 page_dmaaddr = 0, frag_dmaaddr;
1315 u32 posted, page_offset = 0;
1316
Sathya Perla3abcded2010-10-03 22:12:27 -07001317 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001318 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1319 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001320 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001321 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001322 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001323 break;
1324 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001325 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1326 0, adapter->big_page_size,
1327 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001328 page_info->page_offset = 0;
1329 } else {
1330 get_page(pagep);
1331 page_info->page_offset = page_offset + rx_frag_size;
1332 }
1333 page_offset = page_info->page_offset;
1334 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001335 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001336 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1337
1338 rxd = queue_head_node(rxq);
1339 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1340 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001341
1342 /* Any space left in the current big page for another frag? */
1343 if ((page_offset + rx_frag_size + rx_frag_size) >
1344 adapter->big_page_size) {
1345 pagep = NULL;
1346 page_info->last_page_user = true;
1347 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001348
1349 prev_page_info = page_info;
1350 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001351 page_info = &page_info_tbl[rxq->head];
1352 }
1353 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001354 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001355
1356 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001357 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001358 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001359 } else if (atomic_read(&rxq->used) == 0) {
1360 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001361 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001362 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001363}
1364
Sathya Perla5fb379e2009-06-18 00:02:59 +00001365static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001366{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001367 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1368
1369 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1370 return NULL;
1371
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001372 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001373 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1374
1375 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1376
1377 queue_tail_inc(tx_cq);
1378 return txcp;
1379}
1380
Sathya Perla3c8def92011-06-12 20:01:58 +00001381static u16 be_tx_compl_process(struct be_adapter *adapter,
1382 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001383{
Sathya Perla3c8def92011-06-12 20:01:58 +00001384 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001385 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001386 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001387 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001388 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1389 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001390
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001391 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001393 sent_skbs[txq->tail] = NULL;
1394
1395 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001396 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001397
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001398 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001399 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001400 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001401 unmap_tx_frag(&adapter->pdev->dev, wrb,
1402 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001403 unmap_skb_hdr = false;
1404
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405 num_wrbs++;
1406 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001407 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001408
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001410 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001411}
1412
Sathya Perla859b1e42009-08-10 03:43:51 +00001413static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1414{
1415 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1416
1417 if (!eqe->evt)
1418 return NULL;
1419
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001420 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001421 eqe->evt = le32_to_cpu(eqe->evt);
1422 queue_tail_inc(&eq_obj->q);
1423 return eqe;
1424}
1425
1426static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001427 struct be_eq_obj *eq_obj,
1428 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001429{
1430 struct be_eq_entry *eqe;
1431 u16 num = 0;
1432
1433 while ((eqe = event_get(eq_obj)) != NULL) {
1434 eqe->evt = 0;
1435 num++;
1436 }
1437
1438 /* Deal with any spurious interrupts that come
1439 * without events
1440 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001441 if (!num)
1442 rearm = true;
1443
1444 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001445 if (num)
1446 napi_schedule(&eq_obj->napi);
1447
1448 return num;
1449}
1450
1451/* Just read and notify events without processing them.
1452 * Used at the time of destroying event queues */
1453static void be_eq_clean(struct be_adapter *adapter,
1454 struct be_eq_obj *eq_obj)
1455{
1456 struct be_eq_entry *eqe;
1457 u16 num = 0;
1458
1459 while ((eqe = event_get(eq_obj)) != NULL) {
1460 eqe->evt = 0;
1461 num++;
1462 }
1463
1464 if (num)
1465 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1466}
1467
Sathya Perla3abcded2010-10-03 22:12:27 -07001468static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001469{
1470 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001471 struct be_queue_info *rxq = &rxo->q;
1472 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001473 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001474 u16 tail;
1475
1476 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001477 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1478 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001479 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001480 }
1481
1482 /* Then free posted rx buffer that were not used */
1483 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001484 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001485 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001486 put_page(page_info->page);
1487 memset(page_info, 0, sizeof(*page_info));
1488 }
1489 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001490 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001491}
1492
Sathya Perla3c8def92011-06-12 20:01:58 +00001493static void be_tx_compl_clean(struct be_adapter *adapter,
1494 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495{
Sathya Perla3c8def92011-06-12 20:01:58 +00001496 struct be_queue_info *tx_cq = &txo->cq;
1497 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001498 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001499 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001500 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001501 struct sk_buff *sent_skb;
1502 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001503
Sathya Perlaa8e91792009-08-10 03:42:43 +00001504 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1505 do {
1506 while ((txcp = be_tx_compl_get(tx_cq))) {
1507 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1508 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001509 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001510 cmpl++;
1511 }
1512 if (cmpl) {
1513 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001514 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001515 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001516 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001517 }
1518
1519 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1520 break;
1521
1522 mdelay(1);
1523 } while (true);
1524
1525 if (atomic_read(&txq->used))
1526 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1527 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001528
1529 /* free posted tx for which compls will never arrive */
1530 while (atomic_read(&txq->used)) {
1531 sent_skb = sent_skbs[txq->tail];
1532 end_idx = txq->tail;
1533 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001534 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1535 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001536 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001537 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001538 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001539}
1540
Sathya Perla5fb379e2009-06-18 00:02:59 +00001541static void be_mcc_queues_destroy(struct be_adapter *adapter)
1542{
1543 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001544
Sathya Perla8788fdc2009-07-27 22:52:03 +00001545 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001546 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001547 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001548 be_queue_free(adapter, q);
1549
Sathya Perla8788fdc2009-07-27 22:52:03 +00001550 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001551 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001552 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001553 be_queue_free(adapter, q);
1554}
1555
1556/* Must be called only after TX qs are created as MCC shares TX EQ */
1557static int be_mcc_queues_create(struct be_adapter *adapter)
1558{
1559 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001560
1561 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001562 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001563 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001564 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001565 goto err;
1566
1567 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001568 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001569 goto mcc_cq_free;
1570
1571 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001572 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001573 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1574 goto mcc_cq_destroy;
1575
1576 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001577 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001578 goto mcc_q_free;
1579
1580 return 0;
1581
1582mcc_q_free:
1583 be_queue_free(adapter, q);
1584mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001585 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001586mcc_cq_free:
1587 be_queue_free(adapter, cq);
1588err:
1589 return -1;
1590}
1591
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001592static void be_tx_queues_destroy(struct be_adapter *adapter)
1593{
1594 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001595 struct be_tx_obj *txo;
1596 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597
Sathya Perla3c8def92011-06-12 20:01:58 +00001598 for_all_tx_queues(adapter, txo, i) {
1599 q = &txo->q;
1600 if (q->created)
1601 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1602 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001603
Sathya Perla3c8def92011-06-12 20:01:58 +00001604 q = &txo->cq;
1605 if (q->created)
1606 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1607 be_queue_free(adapter, q);
1608 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001609
Sathya Perla859b1e42009-08-10 03:43:51 +00001610 /* Clear any residual events */
1611 be_eq_clean(adapter, &adapter->tx_eq);
1612
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001613 q = &adapter->tx_eq.q;
1614 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001615 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001616 be_queue_free(adapter, q);
1617}
1618
Sathya Perla3c8def92011-06-12 20:01:58 +00001619/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001620static int be_tx_queues_create(struct be_adapter *adapter)
1621{
1622 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001623 struct be_tx_obj *txo;
1624 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625
1626 adapter->tx_eq.max_eqd = 0;
1627 adapter->tx_eq.min_eqd = 0;
1628 adapter->tx_eq.cur_eqd = 96;
1629 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001630
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001631 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001632 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1633 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001634 return -1;
1635
Sathya Perla8788fdc2009-07-27 22:52:03 +00001636 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001637 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001638 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001639
Sathya Perla3c8def92011-06-12 20:01:58 +00001640 for_all_tx_queues(adapter, txo, i) {
1641 cq = &txo->cq;
1642 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001643 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001644 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001645
Sathya Perla3c8def92011-06-12 20:01:58 +00001646 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1647 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648
Sathya Perla3c8def92011-06-12 20:01:58 +00001649 q = &txo->q;
1650 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1651 sizeof(struct be_eth_wrb)))
1652 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001653
Sathya Perla3c8def92011-06-12 20:01:58 +00001654 if (be_cmd_txq_create(adapter, q, cq))
1655 goto err;
1656 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001657 return 0;
1658
Sathya Perla3c8def92011-06-12 20:01:58 +00001659err:
1660 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001661 return -1;
1662}
1663
1664static void be_rx_queues_destroy(struct be_adapter *adapter)
1665{
1666 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001667 struct be_rx_obj *rxo;
1668 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001669
Sathya Perla3abcded2010-10-03 22:12:27 -07001670 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001671 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001672
Sathya Perla3abcded2010-10-03 22:12:27 -07001673 q = &rxo->cq;
1674 if (q->created)
1675 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1676 be_queue_free(adapter, q);
1677
Sathya Perla3abcded2010-10-03 22:12:27 -07001678 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001679 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001680 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001681 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001683}
1684
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001685static u32 be_num_rxqs_want(struct be_adapter *adapter)
1686{
Sathya Perlac814fd32011-06-26 20:41:25 +00001687 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001688 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1689 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1690 } else {
1691 dev_warn(&adapter->pdev->dev,
1692 "No support for multiple RX queues\n");
1693 return 1;
1694 }
1695}
1696
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001697static int be_rx_queues_create(struct be_adapter *adapter)
1698{
1699 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001700 struct be_rx_obj *rxo;
1701 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001703 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1704 msix_enabled(adapter) ?
1705 adapter->num_msix_vec - 1 : 1);
1706 if (adapter->num_rx_qs != MAX_RX_QS)
1707 dev_warn(&adapter->pdev->dev,
1708 "Can create only %d RX queues", adapter->num_rx_qs);
1709
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001710 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001711 for_all_rx_queues(adapter, rxo, i) {
1712 rxo->adapter = adapter;
1713 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1714 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715
Sathya Perla3abcded2010-10-03 22:12:27 -07001716 /* EQ */
1717 eq = &rxo->rx_eq.q;
1718 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1719 sizeof(struct be_eq_entry));
1720 if (rc)
1721 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722
Sathya Perla3abcded2010-10-03 22:12:27 -07001723 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1724 if (rc)
1725 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001726
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001727 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001728
Sathya Perla3abcded2010-10-03 22:12:27 -07001729 /* CQ */
1730 cq = &rxo->cq;
1731 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1732 sizeof(struct be_eth_rx_compl));
1733 if (rc)
1734 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001735
Sathya Perla3abcded2010-10-03 22:12:27 -07001736 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1737 if (rc)
1738 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001739
1740 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001741 q = &rxo->q;
1742 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1743 sizeof(struct be_eth_rx_d));
1744 if (rc)
1745 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001746
Sathya Perla3abcded2010-10-03 22:12:27 -07001747 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748
1749 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001750err:
1751 be_rx_queues_destroy(adapter);
1752 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001754
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001755static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001756{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001757 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1758 if (!eqe->evt)
1759 return false;
1760 else
1761 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001762}
1763
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001764static irqreturn_t be_intx(int irq, void *dev)
1765{
1766 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001767 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001768 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001769
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001770 if (lancer_chip(adapter)) {
1771 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001772 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001773 for_all_rx_queues(adapter, rxo, i) {
1774 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001775 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001776 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001777
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001778 if (!(tx || rx))
1779 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001780
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001781 } else {
1782 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1783 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1784 if (!isr)
1785 return IRQ_NONE;
1786
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001787 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001788 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001789
1790 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001791 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001792 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001793 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001794 }
Sathya Perlac001c212009-07-01 01:06:07 +00001795
Sathya Perla8788fdc2009-07-27 22:52:03 +00001796 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001797}
1798
1799static irqreturn_t be_msix_rx(int irq, void *dev)
1800{
Sathya Perla3abcded2010-10-03 22:12:27 -07001801 struct be_rx_obj *rxo = dev;
1802 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001803
Sathya Perla3c8def92011-06-12 20:01:58 +00001804 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001805
1806 return IRQ_HANDLED;
1807}
1808
Sathya Perla5fb379e2009-06-18 00:02:59 +00001809static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001810{
1811 struct be_adapter *adapter = dev;
1812
Sathya Perla3c8def92011-06-12 20:01:58 +00001813 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001814
1815 return IRQ_HANDLED;
1816}
1817
Sathya Perla2e588f82011-03-11 02:49:26 +00001818static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001819{
Sathya Perla2e588f82011-03-11 02:49:26 +00001820 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821}
1822
stephen hemminger49b05222010-10-21 07:50:48 +00001823static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824{
1825 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001826 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1827 struct be_adapter *adapter = rxo->adapter;
1828 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001829 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001830 u32 work_done;
1831
Sathya Perlaac124ff2011-07-25 19:10:14 +00001832 rx_stats(rxo)->rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001833 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001834 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001835 if (!rxcp)
1836 break;
1837
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001838 /* Ignore flush completions */
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001839 if (rxcp->num_rcvd && rxcp->pkt_size) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001840 if (do_gro(rxcp))
Sathya Perla64642812010-12-01 01:04:17 +00001841 be_rx_compl_process_gro(adapter, rxo, rxcp);
1842 else
1843 be_rx_compl_process(adapter, rxo, rxcp);
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001844 } else if (rxcp->pkt_size == 0) {
1845 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001846 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001847
Sathya Perla2e588f82011-03-11 02:49:26 +00001848 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849 }
1850
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001852 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001853 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854
1855 /* All consumed */
1856 if (work_done < budget) {
1857 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001858 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001859 } else {
1860 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001861 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862 }
1863 return work_done;
1864}
1865
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001866/* As TX and MCC share the same EQ check for both TX and MCC completions.
1867 * For TX/MCC we don't honour budget; consume everything
1868 */
1869static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001871 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1872 struct be_adapter *adapter =
1873 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla3c8def92011-06-12 20:01:58 +00001874 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001876 int tx_compl, mcc_compl, status = 0;
1877 u8 i;
1878 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879
Sathya Perla3c8def92011-06-12 20:01:58 +00001880 for_all_tx_queues(adapter, txo, i) {
1881 tx_compl = 0;
1882 num_wrbs = 0;
1883 while ((txcp = be_tx_compl_get(&txo->cq))) {
1884 num_wrbs += be_tx_compl_process(adapter, txo,
1885 AMAP_GET_BITS(struct amap_eth_tx_compl,
1886 wrb_index, txcp));
1887 tx_compl++;
1888 }
1889 if (tx_compl) {
1890 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1891
1892 atomic_sub(num_wrbs, &txo->q.used);
1893
1894 /* As Tx wrbs have been freed up, wake up netdev queue
1895 * if it was stopped due to lack of tx wrbs. */
1896 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1897 atomic_read(&txo->q.used) < txo->q.len / 2) {
1898 netif_wake_subqueue(adapter->netdev, i);
1899 }
1900
Sathya Perlaab1594e2011-07-25 19:10:15 +00001901 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001902 tx_stats(txo)->tx_compl += tx_compl;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001903 u64_stats_update_end(&tx_stats(txo)->sync_compl);
Sathya Perla3c8def92011-06-12 20:01:58 +00001904 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905 }
1906
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001907 mcc_compl = be_process_mcc(adapter, &status);
1908
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001909 if (mcc_compl) {
1910 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1911 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1912 }
1913
Sathya Perla3c8def92011-06-12 20:01:58 +00001914 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001915
Sathya Perla3c8def92011-06-12 20:01:58 +00001916 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001917 adapter->drv_stats.tx_events++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918 return 1;
1919}
1920
Ajit Khaparded053de92010-09-03 06:23:30 +00001921void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001922{
1923 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1924 u32 i;
1925
1926 pci_read_config_dword(adapter->pdev,
1927 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1928 pci_read_config_dword(adapter->pdev,
1929 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1930 pci_read_config_dword(adapter->pdev,
1931 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1932 pci_read_config_dword(adapter->pdev,
1933 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1934
1935 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1936 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1937
Ajit Khaparded053de92010-09-03 06:23:30 +00001938 if (ue_status_lo || ue_status_hi) {
1939 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00001940 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00001941 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1942 }
1943
Ajit Khaparde7c185272010-07-29 06:16:33 +00001944 if (ue_status_lo) {
1945 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1946 if (ue_status_lo & 1)
1947 dev_err(&adapter->pdev->dev,
1948 "UE: %s bit set\n", ue_status_low_desc[i]);
1949 }
1950 }
1951 if (ue_status_hi) {
1952 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1953 if (ue_status_hi & 1)
1954 dev_err(&adapter->pdev->dev,
1955 "UE: %s bit set\n", ue_status_hi_desc[i]);
1956 }
1957 }
1958
1959}
1960
Sathya Perlaea1dae12009-03-19 23:56:20 -07001961static void be_worker(struct work_struct *work)
1962{
1963 struct be_adapter *adapter =
1964 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07001965 struct be_rx_obj *rxo;
1966 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07001967
Sathya Perla16da8252011-03-21 20:49:27 +00001968 if (!adapter->ue_detected && !lancer_chip(adapter))
1969 be_detect_dump_ue(adapter);
1970
Somnath Koturf203af72010-10-25 23:01:03 +00001971 /* when interrupts are not yet enabled, just reap any pending
1972 * mcc completions */
1973 if (!netif_running(adapter->netdev)) {
1974 int mcc_compl, status = 0;
1975
1976 mcc_compl = be_process_mcc(adapter, &status);
1977
1978 if (mcc_compl) {
1979 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1980 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1981 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00001982
Somnath Koturf203af72010-10-25 23:01:03 +00001983 goto reschedule;
1984 }
1985
Selvin Xavier005d5692011-05-16 07:36:35 +00001986 if (!adapter->stats_cmd_sent) {
1987 if (lancer_chip(adapter))
1988 lancer_cmd_get_pport_stats(adapter,
1989 &adapter->stats_cmd);
1990 else
1991 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1992 }
Sathya Perla3c8def92011-06-12 20:01:58 +00001993
Sathya Perla3abcded2010-10-03 22:12:27 -07001994 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001995 be_rx_eqd_update(adapter, rxo);
1996
1997 if (rxo->rx_post_starved) {
1998 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00001999 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002000 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07002001 }
2002
Somnath Koturf203af72010-10-25 23:01:03 +00002003reschedule:
Ivan Vecerae74fbd032011-04-21 00:20:04 +00002004 adapter->work_counter++;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002005 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2006}
2007
Sathya Perla8d56ff12009-11-22 22:02:26 +00002008static void be_msix_disable(struct be_adapter *adapter)
2009{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002010 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002011 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002012 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002013 }
2014}
2015
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002016static void be_msix_enable(struct be_adapter *adapter)
2017{
Sathya Perla3abcded2010-10-03 22:12:27 -07002018#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002019 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002020
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002021 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002022
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002023 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002024 adapter->msix_entries[i].entry = i;
2025
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002026 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002027 if (status == 0) {
2028 goto done;
2029 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002030 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002031 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002032 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002033 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002034 }
2035 return;
2036done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002037 adapter->num_msix_vec = num_vec;
2038 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002039}
2040
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002041static void be_sriov_enable(struct be_adapter *adapter)
2042{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002043 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002044#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002045 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002046 int status, pos;
2047 u16 nvfs;
2048
2049 pos = pci_find_ext_capability(adapter->pdev,
2050 PCI_EXT_CAP_ID_SRIOV);
2051 pci_read_config_word(adapter->pdev,
2052 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2053
2054 if (num_vfs > nvfs) {
2055 dev_info(&adapter->pdev->dev,
2056 "Device supports %d VFs and not %d\n",
2057 nvfs, num_vfs);
2058 num_vfs = nvfs;
2059 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002060
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002061 status = pci_enable_sriov(adapter->pdev, num_vfs);
2062 adapter->sriov_enabled = status ? false : true;
2063 }
2064#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002065}
2066
2067static void be_sriov_disable(struct be_adapter *adapter)
2068{
2069#ifdef CONFIG_PCI_IOV
2070 if (adapter->sriov_enabled) {
2071 pci_disable_sriov(adapter->pdev);
2072 adapter->sriov_enabled = false;
2073 }
2074#endif
2075}
2076
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002077static inline int be_msix_vec_get(struct be_adapter *adapter,
2078 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002079{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002080 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002081}
2082
2083static int be_request_irq(struct be_adapter *adapter,
2084 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002085 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002086{
2087 struct net_device *netdev = adapter->netdev;
2088 int vec;
2089
2090 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002091 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002092 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002093}
2094
Sathya Perla3abcded2010-10-03 22:12:27 -07002095static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2096 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002097{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002098 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002099 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002100}
2101
2102static int be_msix_register(struct be_adapter *adapter)
2103{
Sathya Perla3abcded2010-10-03 22:12:27 -07002104 struct be_rx_obj *rxo;
2105 int status, i;
2106 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002107
Sathya Perla3abcded2010-10-03 22:12:27 -07002108 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2109 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002110 if (status)
2111 goto err;
2112
Sathya Perla3abcded2010-10-03 22:12:27 -07002113 for_all_rx_queues(adapter, rxo, i) {
2114 sprintf(qname, "rxq%d", i);
2115 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2116 qname, rxo);
2117 if (status)
2118 goto err_msix;
2119 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002120
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002122
Sathya Perla3abcded2010-10-03 22:12:27 -07002123err_msix:
2124 be_free_irq(adapter, &adapter->tx_eq, adapter);
2125
2126 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2127 be_free_irq(adapter, &rxo->rx_eq, rxo);
2128
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002129err:
2130 dev_warn(&adapter->pdev->dev,
2131 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002132 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002133 return status;
2134}
2135
2136static int be_irq_register(struct be_adapter *adapter)
2137{
2138 struct net_device *netdev = adapter->netdev;
2139 int status;
2140
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002141 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002142 status = be_msix_register(adapter);
2143 if (status == 0)
2144 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002145 /* INTx is not supported for VF */
2146 if (!be_physfn(adapter))
2147 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002148 }
2149
2150 /* INTx */
2151 netdev->irq = adapter->pdev->irq;
2152 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2153 adapter);
2154 if (status) {
2155 dev_err(&adapter->pdev->dev,
2156 "INTx request IRQ failed - err %d\n", status);
2157 return status;
2158 }
2159done:
2160 adapter->isr_registered = true;
2161 return 0;
2162}
2163
2164static void be_irq_unregister(struct be_adapter *adapter)
2165{
2166 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002167 struct be_rx_obj *rxo;
2168 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002169
2170 if (!adapter->isr_registered)
2171 return;
2172
2173 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002174 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002175 free_irq(netdev->irq, adapter);
2176 goto done;
2177 }
2178
2179 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002180 be_free_irq(adapter, &adapter->tx_eq, adapter);
2181
2182 for_all_rx_queues(adapter, rxo, i)
2183 be_free_irq(adapter, &rxo->rx_eq, rxo);
2184
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002185done:
2186 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002187}
2188
Sathya Perla482c9e72011-06-29 23:33:17 +00002189static void be_rx_queues_clear(struct be_adapter *adapter)
2190{
2191 struct be_queue_info *q;
2192 struct be_rx_obj *rxo;
2193 int i;
2194
2195 for_all_rx_queues(adapter, rxo, i) {
2196 q = &rxo->q;
2197 if (q->created) {
2198 be_cmd_rxq_destroy(adapter, q);
2199 /* After the rxq is invalidated, wait for a grace time
2200 * of 1ms for all dma to end and the flush compl to
2201 * arrive
2202 */
2203 mdelay(1);
2204 be_rx_q_clean(adapter, rxo);
2205 }
2206
2207 /* Clear any residual events */
2208 q = &rxo->rx_eq.q;
2209 if (q->created)
2210 be_eq_clean(adapter, &rxo->rx_eq);
2211 }
2212}
2213
Sathya Perla889cd4b2010-05-30 23:33:45 +00002214static int be_close(struct net_device *netdev)
2215{
2216 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002217 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002218 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002219 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002220 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002221
Sathya Perla889cd4b2010-05-30 23:33:45 +00002222 be_async_mcc_disable(adapter);
2223
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002224 if (!lancer_chip(adapter))
2225 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002226
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002227 for_all_rx_queues(adapter, rxo, i)
2228 napi_disable(&rxo->rx_eq.napi);
2229
2230 napi_disable(&tx_eq->napi);
2231
2232 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002233 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2234 for_all_rx_queues(adapter, rxo, i)
2235 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002236 for_all_tx_queues(adapter, txo, i)
2237 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002238 }
2239
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002240 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002241 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002242 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002243
2244 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002245 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002246 synchronize_irq(vec);
2247 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002248 } else {
2249 synchronize_irq(netdev->irq);
2250 }
2251 be_irq_unregister(adapter);
2252
Sathya Perla889cd4b2010-05-30 23:33:45 +00002253 /* Wait for all pending tx completions to arrive so that
2254 * all tx skbs are freed.
2255 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002256 for_all_tx_queues(adapter, txo, i)
2257 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002258
Sathya Perla482c9e72011-06-29 23:33:17 +00002259 be_rx_queues_clear(adapter);
2260 return 0;
2261}
2262
2263static int be_rx_queues_setup(struct be_adapter *adapter)
2264{
2265 struct be_rx_obj *rxo;
2266 int rc, i;
2267 u8 rsstable[MAX_RSS_QS];
2268
2269 for_all_rx_queues(adapter, rxo, i) {
2270 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2271 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2272 adapter->if_handle,
2273 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2274 if (rc)
2275 return rc;
2276 }
2277
2278 if (be_multi_rxq(adapter)) {
2279 for_all_rss_queues(adapter, rxo, i)
2280 rsstable[i] = rxo->rss_id;
2281
2282 rc = be_cmd_rss_config(adapter, rsstable,
2283 adapter->num_rx_qs - 1);
2284 if (rc)
2285 return rc;
2286 }
2287
2288 /* First time posting */
2289 for_all_rx_queues(adapter, rxo, i) {
2290 be_post_rx_frags(rxo, GFP_KERNEL);
2291 napi_enable(&rxo->rx_eq.napi);
2292 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002293 return 0;
2294}
2295
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002296static int be_open(struct net_device *netdev)
2297{
2298 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002299 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002300 struct be_rx_obj *rxo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002301 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002302
Sathya Perla482c9e72011-06-29 23:33:17 +00002303 status = be_rx_queues_setup(adapter);
2304 if (status)
2305 goto err;
2306
Sathya Perla5fb379e2009-06-18 00:02:59 +00002307 napi_enable(&tx_eq->napi);
2308
2309 be_irq_register(adapter);
2310
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002311 if (!lancer_chip(adapter))
2312 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002313
2314 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002315 for_all_rx_queues(adapter, rxo, i) {
2316 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2317 be_cq_notify(adapter, rxo->cq.id, true, 0);
2318 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002319 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002320
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002321 /* Now that interrupts are on we can process async mcc */
2322 be_async_mcc_enable(adapter);
2323
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002324 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002325 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002326 if (status)
2327 goto err;
2328
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002329 status = be_cmd_set_flow_control(adapter,
2330 adapter->tx_fc, adapter->rx_fc);
2331 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002332 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002333 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002334
Sathya Perla889cd4b2010-05-30 23:33:45 +00002335 return 0;
2336err:
2337 be_close(adapter->netdev);
2338 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002339}
2340
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002341static int be_setup_wol(struct be_adapter *adapter, bool enable)
2342{
2343 struct be_dma_mem cmd;
2344 int status = 0;
2345 u8 mac[ETH_ALEN];
2346
2347 memset(mac, 0, ETH_ALEN);
2348
2349 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002350 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2351 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002352 if (cmd.va == NULL)
2353 return -1;
2354 memset(cmd.va, 0, cmd.size);
2355
2356 if (enable) {
2357 status = pci_write_config_dword(adapter->pdev,
2358 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2359 if (status) {
2360 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002361 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002362 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2363 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002364 return status;
2365 }
2366 status = be_cmd_enable_magic_wol(adapter,
2367 adapter->netdev->dev_addr, &cmd);
2368 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2369 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2370 } else {
2371 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2372 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2373 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2374 }
2375
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002376 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002377 return status;
2378}
2379
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002380/*
2381 * Generate a seed MAC address from the PF MAC Address using jhash.
2382 * MAC Address for VFs are assigned incrementally starting from the seed.
2383 * These addresses are programmed in the ASIC by the PF and the VF driver
2384 * queries for the MAC address during its probe.
2385 */
2386static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2387{
2388 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002389 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002390 u8 mac[ETH_ALEN];
2391
2392 be_vf_eth_addr_generate(adapter, mac);
2393
2394 for (vf = 0; vf < num_vfs; vf++) {
2395 status = be_cmd_pmac_add(adapter, mac,
2396 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002397 &adapter->vf_cfg[vf].vf_pmac_id,
2398 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002399 if (status)
2400 dev_err(&adapter->pdev->dev,
2401 "Mac address add failed for VF %d\n", vf);
2402 else
2403 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2404
2405 mac[5] += 1;
2406 }
2407 return status;
2408}
2409
2410static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2411{
2412 u32 vf;
2413
2414 for (vf = 0; vf < num_vfs; vf++) {
2415 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2416 be_cmd_pmac_del(adapter,
2417 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002418 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002419 }
2420}
2421
Sathya Perla5fb379e2009-06-18 00:02:59 +00002422static int be_setup(struct be_adapter *adapter)
2423{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002424 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002425 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002426 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002427 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002428
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002429 be_cmd_req_native_mode(adapter);
2430
Padmanabh Ratnakarf21b5382011-03-07 03:09:36 +00002431 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2432 BE_IF_FLAGS_BROADCAST |
2433 BE_IF_FLAGS_MULTICAST;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002434
2435 if (be_physfn(adapter)) {
2436 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2437 BE_IF_FLAGS_PROMISCUOUS |
2438 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2439 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002440
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002441 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002442 cap_flags |= BE_IF_FLAGS_RSS;
2443 en_flags |= BE_IF_FLAGS_RSS;
2444 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002445 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002446
2447 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2448 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002449 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002450 if (status != 0)
2451 goto do_none;
2452
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002453 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002454 if (adapter->sriov_enabled) {
2455 while (vf < num_vfs) {
2456 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2457 BE_IF_FLAGS_BROADCAST;
2458 status = be_cmd_if_create(adapter, cap_flags,
2459 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002460 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002461 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002462 if (status) {
2463 dev_err(&adapter->pdev->dev,
2464 "Interface Create failed for VF %d\n",
2465 vf);
2466 goto if_destroy;
2467 }
2468 adapter->vf_cfg[vf].vf_pmac_id =
2469 BE_INVALID_PMAC_ID;
2470 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002471 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002472 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002473 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002474 status = be_cmd_mac_addr_query(adapter, mac,
2475 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2476 if (!status) {
2477 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2478 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2479 }
2480 }
2481
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002482 status = be_tx_queues_create(adapter);
2483 if (status != 0)
2484 goto if_destroy;
2485
2486 status = be_rx_queues_create(adapter);
2487 if (status != 0)
2488 goto tx_qs_destroy;
2489
Sathya Perla2903dd62011-06-26 20:41:53 +00002490 /* Allow all priorities by default. A GRP5 evt may modify this */
2491 adapter->vlan_prio_bmap = 0xff;
2492
Sathya Perla5fb379e2009-06-18 00:02:59 +00002493 status = be_mcc_queues_create(adapter);
2494 if (status != 0)
2495 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002496
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002497 adapter->link_speed = -1;
2498
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002499 return 0;
2500
Sathya Perla5fb379e2009-06-18 00:02:59 +00002501rx_qs_destroy:
2502 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002503tx_qs_destroy:
2504 be_tx_queues_destroy(adapter);
2505if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002506 if (be_physfn(adapter) && adapter->sriov_enabled)
2507 for (vf = 0; vf < num_vfs; vf++)
2508 if (adapter->vf_cfg[vf].vf_if_handle)
2509 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002510 adapter->vf_cfg[vf].vf_if_handle,
2511 vf + 1);
2512 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002513do_none:
2514 return status;
2515}
2516
Sathya Perla5fb379e2009-06-18 00:02:59 +00002517static int be_clear(struct be_adapter *adapter)
2518{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002519 int vf;
2520
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002521 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002522 be_vf_eth_addr_rem(adapter);
2523
Sathya Perla1a8887d2009-08-17 00:58:41 +00002524 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002525 be_rx_queues_destroy(adapter);
2526 be_tx_queues_destroy(adapter);
Padmanabh Ratnakar1f5db832011-04-03 01:54:39 +00002527 adapter->eq_next_idx = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002528
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002529 if (be_physfn(adapter) && adapter->sriov_enabled)
2530 for (vf = 0; vf < num_vfs; vf++)
2531 if (adapter->vf_cfg[vf].vf_if_handle)
2532 be_cmd_if_destroy(adapter,
2533 adapter->vf_cfg[vf].vf_if_handle,
2534 vf + 1);
2535
Ajit Khaparde658681f2011-02-11 13:34:46 +00002536 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002537
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002538 adapter->be3_native = 0;
2539
Sathya Perla2243e2e2009-11-22 22:02:03 +00002540 /* tell fw we're done with firing cmds */
2541 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002542 return 0;
2543}
2544
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002545
Ajit Khaparde84517482009-09-04 03:12:16 +00002546#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002547static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002548 const u8 *p, u32 img_start, int image_size,
2549 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002550{
2551 u32 crc_offset;
2552 u8 flashed_crc[4];
2553 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002554
2555 crc_offset = hdr_size + img_start + image_size - 4;
2556
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002557 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002558
2559 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002560 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002561 if (status) {
2562 dev_err(&adapter->pdev->dev,
2563 "could not get crc from flash, not flashing redboot\n");
2564 return false;
2565 }
2566
2567 /*update redboot only if crc does not match*/
2568 if (!memcmp(flashed_crc, p, 4))
2569 return false;
2570 else
2571 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002572}
2573
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002574static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002575 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002576 struct be_dma_mem *flash_cmd, int num_of_images)
2577
Ajit Khaparde84517482009-09-04 03:12:16 +00002578{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002579 int status = 0, i, filehdr_size = 0;
2580 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002581 int num_bytes;
2582 const u8 *p = fw->data;
2583 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002584 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002585 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002586
Joe Perches215faf92010-12-21 02:16:10 -08002587 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002588 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2589 FLASH_IMAGE_MAX_SIZE_g3},
2590 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2591 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2592 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2593 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2594 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2595 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2596 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2597 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2598 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2599 FLASH_IMAGE_MAX_SIZE_g3},
2600 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2601 FLASH_IMAGE_MAX_SIZE_g3},
2602 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002603 FLASH_IMAGE_MAX_SIZE_g3},
2604 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2605 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002606 };
Joe Perches215faf92010-12-21 02:16:10 -08002607 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002608 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2609 FLASH_IMAGE_MAX_SIZE_g2},
2610 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2611 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2612 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2613 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2614 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2615 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2616 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2617 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2618 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2619 FLASH_IMAGE_MAX_SIZE_g2},
2620 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2621 FLASH_IMAGE_MAX_SIZE_g2},
2622 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2623 FLASH_IMAGE_MAX_SIZE_g2}
2624 };
2625
2626 if (adapter->generation == BE_GEN3) {
2627 pflashcomp = gen3_flash_types;
2628 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002629 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002630 } else {
2631 pflashcomp = gen2_flash_types;
2632 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002633 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002634 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002635 for (i = 0; i < num_comp; i++) {
2636 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2637 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2638 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002639 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2640 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002641 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2642 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002643 continue;
2644 p = fw->data;
2645 p += filehdr_size + pflashcomp[i].offset
2646 + (num_of_images * sizeof(struct image_hdr));
2647 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002648 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002649 total_bytes = pflashcomp[i].size;
2650 while (total_bytes) {
2651 if (total_bytes > 32*1024)
2652 num_bytes = 32*1024;
2653 else
2654 num_bytes = total_bytes;
2655 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002656
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002657 if (!total_bytes)
2658 flash_op = FLASHROM_OPER_FLASH;
2659 else
2660 flash_op = FLASHROM_OPER_SAVE;
2661 memcpy(req->params.data_buf, p, num_bytes);
2662 p += num_bytes;
2663 status = be_cmd_write_flashrom(adapter, flash_cmd,
2664 pflashcomp[i].optype, flash_op, num_bytes);
2665 if (status) {
2666 dev_err(&adapter->pdev->dev,
2667 "cmd to write to flash rom failed.\n");
2668 return -1;
2669 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002670 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002671 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002672 return 0;
2673}
2674
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002675static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2676{
2677 if (fhdr == NULL)
2678 return 0;
2679 if (fhdr->build[0] == '3')
2680 return BE_GEN3;
2681 else if (fhdr->build[0] == '2')
2682 return BE_GEN2;
2683 else
2684 return 0;
2685}
2686
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002687static int lancer_fw_download(struct be_adapter *adapter,
2688 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002689{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002690#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2691#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2692 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002693 const u8 *data_ptr = NULL;
2694 u8 *dest_image_ptr = NULL;
2695 size_t image_size = 0;
2696 u32 chunk_size = 0;
2697 u32 data_written = 0;
2698 u32 offset = 0;
2699 int status = 0;
2700 u8 add_status = 0;
2701
2702 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2703 dev_err(&adapter->pdev->dev,
2704 "FW Image not properly aligned. "
2705 "Length must be 4 byte aligned.\n");
2706 status = -EINVAL;
2707 goto lancer_fw_exit;
2708 }
2709
2710 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2711 + LANCER_FW_DOWNLOAD_CHUNK;
2712 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2713 &flash_cmd.dma, GFP_KERNEL);
2714 if (!flash_cmd.va) {
2715 status = -ENOMEM;
2716 dev_err(&adapter->pdev->dev,
2717 "Memory allocation failure while flashing\n");
2718 goto lancer_fw_exit;
2719 }
2720
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002721 dest_image_ptr = flash_cmd.va +
2722 sizeof(struct lancer_cmd_req_write_object);
2723 image_size = fw->size;
2724 data_ptr = fw->data;
2725
2726 while (image_size) {
2727 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2728
2729 /* Copy the image chunk content. */
2730 memcpy(dest_image_ptr, data_ptr, chunk_size);
2731
2732 status = lancer_cmd_write_object(adapter, &flash_cmd,
2733 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2734 &data_written, &add_status);
2735
2736 if (status)
2737 break;
2738
2739 offset += data_written;
2740 data_ptr += data_written;
2741 image_size -= data_written;
2742 }
2743
2744 if (!status) {
2745 /* Commit the FW written */
2746 status = lancer_cmd_write_object(adapter, &flash_cmd,
2747 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2748 &data_written, &add_status);
2749 }
2750
2751 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2752 flash_cmd.dma);
2753 if (status) {
2754 dev_err(&adapter->pdev->dev,
2755 "Firmware load error. "
2756 "Status code: 0x%x Additional Status: 0x%x\n",
2757 status, add_status);
2758 goto lancer_fw_exit;
2759 }
2760
2761 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2762lancer_fw_exit:
2763 return status;
2764}
2765
2766static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2767{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002768 struct flash_file_hdr_g2 *fhdr;
2769 struct flash_file_hdr_g3 *fhdr3;
2770 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002771 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002772 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002773 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002774
2775 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002776 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002777
Ajit Khaparde84517482009-09-04 03:12:16 +00002778 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002779 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2780 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002781 if (!flash_cmd.va) {
2782 status = -ENOMEM;
2783 dev_err(&adapter->pdev->dev,
2784 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002785 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002786 }
2787
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002788 if ((adapter->generation == BE_GEN3) &&
2789 (get_ufigen_type(fhdr) == BE_GEN3)) {
2790 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002791 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2792 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002793 img_hdr_ptr = (struct image_hdr *) (fw->data +
2794 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002795 i * sizeof(struct image_hdr)));
2796 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2797 status = be_flash_data(adapter, fw, &flash_cmd,
2798 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002799 }
2800 } else if ((adapter->generation == BE_GEN2) &&
2801 (get_ufigen_type(fhdr) == BE_GEN2)) {
2802 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2803 } else {
2804 dev_err(&adapter->pdev->dev,
2805 "UFI and Interface are not compatible for flashing\n");
2806 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002807 }
2808
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002809 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2810 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002811 if (status) {
2812 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002813 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002814 }
2815
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002816 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002817
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002818be_fw_exit:
2819 return status;
2820}
2821
2822int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2823{
2824 const struct firmware *fw;
2825 int status;
2826
2827 if (!netif_running(adapter->netdev)) {
2828 dev_err(&adapter->pdev->dev,
2829 "Firmware load not allowed (interface is down)\n");
2830 return -1;
2831 }
2832
2833 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2834 if (status)
2835 goto fw_exit;
2836
2837 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2838
2839 if (lancer_chip(adapter))
2840 status = lancer_fw_download(adapter, fw);
2841 else
2842 status = be_fw_download(adapter, fw);
2843
Ajit Khaparde84517482009-09-04 03:12:16 +00002844fw_exit:
2845 release_firmware(fw);
2846 return status;
2847}
2848
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002849static struct net_device_ops be_netdev_ops = {
2850 .ndo_open = be_open,
2851 .ndo_stop = be_close,
2852 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002853 .ndo_set_rx_mode = be_set_multicast_list,
2854 .ndo_set_mac_address = be_mac_addr_set,
2855 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00002856 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002857 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002858 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2859 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002860 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002861 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002862 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002863 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002864};
2865
2866static void be_netdev_init(struct net_device *netdev)
2867{
2868 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002869 struct be_rx_obj *rxo;
2870 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002871
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002872 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002873 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2874 NETIF_F_HW_VLAN_TX;
2875 if (be_multi_rxq(adapter))
2876 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002877
2878 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002879 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00002880
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07002881 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00002882 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002883
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002884 netdev->flags |= IFF_MULTICAST;
2885
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002886 /* Default settings for Rx and Tx flow control */
2887 adapter->rx_fc = true;
2888 adapter->tx_fc = true;
2889
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002890 netif_set_gso_max_size(netdev, 65535);
2891
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002892 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2893
2894 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2895
Sathya Perla3abcded2010-10-03 22:12:27 -07002896 for_all_rx_queues(adapter, rxo, i)
2897 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2898 BE_NAPI_WEIGHT);
2899
Sathya Perla5fb379e2009-06-18 00:02:59 +00002900 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002901 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002902}
2903
2904static void be_unmap_pci_bars(struct be_adapter *adapter)
2905{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002906 if (adapter->csr)
2907 iounmap(adapter->csr);
2908 if (adapter->db)
2909 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002910 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002911 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002912}
2913
2914static int be_map_pci_bars(struct be_adapter *adapter)
2915{
2916 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002917 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002918
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002919 if (lancer_chip(adapter)) {
2920 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2921 pci_resource_len(adapter->pdev, 0));
2922 if (addr == NULL)
2923 return -ENOMEM;
2924 adapter->db = addr;
2925 return 0;
2926 }
2927
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002928 if (be_physfn(adapter)) {
2929 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2930 pci_resource_len(adapter->pdev, 2));
2931 if (addr == NULL)
2932 return -ENOMEM;
2933 adapter->csr = addr;
2934 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002935
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002936 if (adapter->generation == BE_GEN2) {
2937 pcicfg_reg = 1;
2938 db_reg = 4;
2939 } else {
2940 pcicfg_reg = 0;
2941 if (be_physfn(adapter))
2942 db_reg = 4;
2943 else
2944 db_reg = 0;
2945 }
2946 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2947 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002948 if (addr == NULL)
2949 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002950 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002951
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002952 if (be_physfn(adapter)) {
2953 addr = ioremap_nocache(
2954 pci_resource_start(adapter->pdev, pcicfg_reg),
2955 pci_resource_len(adapter->pdev, pcicfg_reg));
2956 if (addr == NULL)
2957 goto pci_map_err;
2958 adapter->pcicfg = addr;
2959 } else
2960 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002961
2962 return 0;
2963pci_map_err:
2964 be_unmap_pci_bars(adapter);
2965 return -ENOMEM;
2966}
2967
2968
2969static void be_ctrl_cleanup(struct be_adapter *adapter)
2970{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002971 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002972
2973 be_unmap_pci_bars(adapter);
2974
2975 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002976 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2977 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002978
2979 mem = &adapter->mc_cmd_mem;
2980 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002981 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2982 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002983}
2984
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002985static int be_ctrl_init(struct be_adapter *adapter)
2986{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002987 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2988 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002989 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002990 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002991
2992 status = be_map_pci_bars(adapter);
2993 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00002994 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002995
2996 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002997 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2998 mbox_mem_alloc->size,
2999 &mbox_mem_alloc->dma,
3000 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003001 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003002 status = -ENOMEM;
3003 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003004 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00003005
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003006 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3007 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3008 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3009 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003010
3011 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003012 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3013 mc_cmd_mem->size, &mc_cmd_mem->dma,
3014 GFP_KERNEL);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003015 if (mc_cmd_mem->va == NULL) {
3016 status = -ENOMEM;
3017 goto free_mbox;
3018 }
3019 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3020
Ivan Vecera29849612010-12-14 05:43:19 +00003021 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003022 spin_lock_init(&adapter->mcc_lock);
3023 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003024
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003025 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003026 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003027 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003028
3029free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003030 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3031 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003032
3033unmap_pci_bars:
3034 be_unmap_pci_bars(adapter);
3035
3036done:
3037 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003038}
3039
3040static void be_stats_cleanup(struct be_adapter *adapter)
3041{
Sathya Perla3abcded2010-10-03 22:12:27 -07003042 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003043
3044 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003045 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3046 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003047}
3048
3049static int be_stats_init(struct be_adapter *adapter)
3050{
Sathya Perla3abcded2010-10-03 22:12:27 -07003051 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003052
Selvin Xavier005d5692011-05-16 07:36:35 +00003053 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003054 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003055 } else {
3056 if (lancer_chip(adapter))
3057 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3058 else
3059 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3060 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003061 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3062 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003063 if (cmd->va == NULL)
3064 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003065 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003066 return 0;
3067}
3068
3069static void __devexit be_remove(struct pci_dev *pdev)
3070{
3071 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003072
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003073 if (!adapter)
3074 return;
3075
Somnath Koturf203af72010-10-25 23:01:03 +00003076 cancel_delayed_work_sync(&adapter->work);
3077
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003078 unregister_netdev(adapter->netdev);
3079
Sathya Perla5fb379e2009-06-18 00:02:59 +00003080 be_clear(adapter);
3081
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003082 be_stats_cleanup(adapter);
3083
3084 be_ctrl_cleanup(adapter);
3085
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003086 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003087 be_sriov_disable(adapter);
3088
Sathya Perla8d56ff12009-11-22 22:02:26 +00003089 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003090
3091 pci_set_drvdata(pdev, NULL);
3092 pci_release_regions(pdev);
3093 pci_disable_device(pdev);
3094
3095 free_netdev(adapter->netdev);
3096}
3097
Sathya Perla2243e2e2009-11-22 22:02:03 +00003098static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003099{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003100 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003101 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003102
Sathya Perla8788fdc2009-07-27 22:52:03 +00003103 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003104 if (status)
3105 return status;
3106
Sathya Perla3abcded2010-10-03 22:12:27 -07003107 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3108 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003109 if (status)
3110 return status;
3111
3112 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003113
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003114 /* A default permanent address is given to each VF for Lancer*/
3115 if (be_physfn(adapter) || lancer_chip(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003116 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00003117 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003118
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003119 if (status)
3120 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003121
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003122 if (!is_valid_ether_addr(mac))
3123 return -EADDRNOTAVAIL;
3124
3125 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3126 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3127 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003128
Ajit Khaparde3486be22010-07-23 02:04:54 +00003129 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003130 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3131 else
3132 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3133
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003134 status = be_cmd_get_cntl_attributes(adapter);
3135 if (status)
3136 return status;
3137
Sathya Perla3c8def92011-06-12 20:01:58 +00003138 if ((num_vfs && adapter->sriov_enabled) ||
3139 (adapter->function_mode & 0x400) ||
3140 lancer_chip(adapter) || !be_physfn(adapter)) {
3141 adapter->num_tx_qs = 1;
3142 netif_set_real_num_tx_queues(adapter->netdev,
3143 adapter->num_tx_qs);
3144 } else {
3145 adapter->num_tx_qs = MAX_TX_QS;
3146 }
3147
Sathya Perla2243e2e2009-11-22 22:02:03 +00003148 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003149}
3150
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003151static int be_dev_family_check(struct be_adapter *adapter)
3152{
3153 struct pci_dev *pdev = adapter->pdev;
3154 u32 sli_intf = 0, if_type;
3155
3156 switch (pdev->device) {
3157 case BE_DEVICE_ID1:
3158 case OC_DEVICE_ID1:
3159 adapter->generation = BE_GEN2;
3160 break;
3161 case BE_DEVICE_ID2:
3162 case OC_DEVICE_ID2:
3163 adapter->generation = BE_GEN3;
3164 break;
3165 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003166 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003167 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3168 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3169 SLI_INTF_IF_TYPE_SHIFT;
3170
3171 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3172 if_type != 0x02) {
3173 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3174 return -EINVAL;
3175 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003176 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3177 SLI_INTF_FAMILY_SHIFT);
3178 adapter->generation = BE_GEN3;
3179 break;
3180 default:
3181 adapter->generation = 0;
3182 }
3183 return 0;
3184}
3185
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003186static int lancer_wait_ready(struct be_adapter *adapter)
3187{
3188#define SLIPORT_READY_TIMEOUT 500
3189 u32 sliport_status;
3190 int status = 0, i;
3191
3192 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3193 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3194 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3195 break;
3196
3197 msleep(20);
3198 }
3199
3200 if (i == SLIPORT_READY_TIMEOUT)
3201 status = -1;
3202
3203 return status;
3204}
3205
3206static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3207{
3208 int status;
3209 u32 sliport_status, err, reset_needed;
3210 status = lancer_wait_ready(adapter);
3211 if (!status) {
3212 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3213 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3214 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3215 if (err && reset_needed) {
3216 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3217 adapter->db + SLIPORT_CONTROL_OFFSET);
3218
3219 /* check adapter has corrected the error */
3220 status = lancer_wait_ready(adapter);
3221 sliport_status = ioread32(adapter->db +
3222 SLIPORT_STATUS_OFFSET);
3223 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3224 SLIPORT_STATUS_RN_MASK);
3225 if (status || sliport_status)
3226 status = -1;
3227 } else if (err || reset_needed) {
3228 status = -1;
3229 }
3230 }
3231 return status;
3232}
3233
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003234static int __devinit be_probe(struct pci_dev *pdev,
3235 const struct pci_device_id *pdev_id)
3236{
3237 int status = 0;
3238 struct be_adapter *adapter;
3239 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003240
3241 status = pci_enable_device(pdev);
3242 if (status)
3243 goto do_none;
3244
3245 status = pci_request_regions(pdev, DRV_NAME);
3246 if (status)
3247 goto disable_dev;
3248 pci_set_master(pdev);
3249
Sathya Perla3c8def92011-06-12 20:01:58 +00003250 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003251 if (netdev == NULL) {
3252 status = -ENOMEM;
3253 goto rel_reg;
3254 }
3255 adapter = netdev_priv(netdev);
3256 adapter->pdev = pdev;
3257 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003258
3259 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003260 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003261 goto free_netdev;
3262
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003263 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003264 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003265
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003266 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003267 if (!status) {
3268 netdev->features |= NETIF_F_HIGHDMA;
3269 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003270 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003271 if (status) {
3272 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3273 goto free_netdev;
3274 }
3275 }
3276
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003277 be_sriov_enable(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003278 if (adapter->sriov_enabled) {
3279 adapter->vf_cfg = kcalloc(num_vfs,
3280 sizeof(struct be_vf_cfg), GFP_KERNEL);
3281
3282 if (!adapter->vf_cfg)
3283 goto free_netdev;
3284 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003285
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003286 status = be_ctrl_init(adapter);
3287 if (status)
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003288 goto free_vf_cfg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003289
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003290 if (lancer_chip(adapter)) {
3291 status = lancer_test_and_set_rdy_state(adapter);
3292 if (status) {
3293 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003294 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003295 }
3296 }
3297
Sathya Perla2243e2e2009-11-22 22:02:03 +00003298 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003299 if (be_physfn(adapter)) {
3300 status = be_cmd_POST(adapter);
3301 if (status)
3302 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003303 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003304
3305 /* tell fw we're ready to fire cmds */
3306 status = be_cmd_fw_init(adapter);
3307 if (status)
3308 goto ctrl_clean;
3309
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003310 status = be_cmd_reset_function(adapter);
3311 if (status)
3312 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003313
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003314 status = be_stats_init(adapter);
3315 if (status)
3316 goto ctrl_clean;
3317
Sathya Perla2243e2e2009-11-22 22:02:03 +00003318 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003319 if (status)
3320 goto stats_clean;
3321
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003322 /* The INTR bit may be set in the card when probed by a kdump kernel
3323 * after a crash.
3324 */
3325 if (!lancer_chip(adapter))
3326 be_intr_set(adapter, false);
3327
Sathya Perla3abcded2010-10-03 22:12:27 -07003328 be_msix_enable(adapter);
3329
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003330 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003331
Sathya Perla5fb379e2009-06-18 00:02:59 +00003332 status = be_setup(adapter);
3333 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003334 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003335
Sathya Perla3abcded2010-10-03 22:12:27 -07003336 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003337 status = register_netdev(netdev);
3338 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003339 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003340
Ajit Khapardee6319362011-02-11 13:35:41 +00003341 if (be_physfn(adapter) && adapter->sriov_enabled) {
Ajit Khaparded0381c42011-04-19 12:11:55 +00003342 u8 mac_speed;
Ajit Khaparded0381c42011-04-19 12:11:55 +00003343 u16 vf, lnk_speed;
3344
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003345 if (!lancer_chip(adapter)) {
3346 status = be_vf_eth_addr_config(adapter);
3347 if (status)
3348 goto unreg_netdev;
3349 }
Ajit Khaparded0381c42011-04-19 12:11:55 +00003350
3351 for (vf = 0; vf < num_vfs; vf++) {
Sathya Perlaea172a02011-08-02 19:57:42 +00003352 status = be_cmd_link_status_query(adapter, &mac_speed,
3353 &lnk_speed, vf + 1);
Ajit Khaparded0381c42011-04-19 12:11:55 +00003354 if (!status)
3355 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3356 else
3357 goto unreg_netdev;
3358 }
Ajit Khapardee6319362011-02-11 13:35:41 +00003359 }
3360
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003361 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003362
Somnath Koturf203af72010-10-25 23:01:03 +00003363 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003364 return 0;
3365
Ajit Khapardee6319362011-02-11 13:35:41 +00003366unreg_netdev:
3367 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003368unsetup:
3369 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003370msix_disable:
3371 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003372stats_clean:
3373 be_stats_cleanup(adapter);
3374ctrl_clean:
3375 be_ctrl_cleanup(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003376free_vf_cfg:
3377 kfree(adapter->vf_cfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003378free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003379 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003380 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003381 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003382rel_reg:
3383 pci_release_regions(pdev);
3384disable_dev:
3385 pci_disable_device(pdev);
3386do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003387 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003388 return status;
3389}
3390
3391static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3392{
3393 struct be_adapter *adapter = pci_get_drvdata(pdev);
3394 struct net_device *netdev = adapter->netdev;
3395
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003396 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003397 if (adapter->wol)
3398 be_setup_wol(adapter, true);
3399
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003400 netif_device_detach(netdev);
3401 if (netif_running(netdev)) {
3402 rtnl_lock();
3403 be_close(netdev);
3404 rtnl_unlock();
3405 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003406 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003407 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003408
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003409 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003410 pci_save_state(pdev);
3411 pci_disable_device(pdev);
3412 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3413 return 0;
3414}
3415
3416static int be_resume(struct pci_dev *pdev)
3417{
3418 int status = 0;
3419 struct be_adapter *adapter = pci_get_drvdata(pdev);
3420 struct net_device *netdev = adapter->netdev;
3421
3422 netif_device_detach(netdev);
3423
3424 status = pci_enable_device(pdev);
3425 if (status)
3426 return status;
3427
3428 pci_set_power_state(pdev, 0);
3429 pci_restore_state(pdev);
3430
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003431 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003432 /* tell fw we're ready to fire cmds */
3433 status = be_cmd_fw_init(adapter);
3434 if (status)
3435 return status;
3436
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003437 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003438 if (netif_running(netdev)) {
3439 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003440 be_open(netdev);
3441 rtnl_unlock();
3442 }
3443 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003444
3445 if (adapter->wol)
3446 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003447
3448 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003449 return 0;
3450}
3451
Sathya Perla82456b02010-02-17 01:35:37 +00003452/*
3453 * An FLR will stop BE from DMAing any data.
3454 */
3455static void be_shutdown(struct pci_dev *pdev)
3456{
3457 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003458
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003459 if (!adapter)
3460 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003461
Sathya Perla0f4a6822011-03-21 20:49:28 +00003462 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003463
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003464 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003465
Sathya Perla82456b02010-02-17 01:35:37 +00003466 if (adapter->wol)
3467 be_setup_wol(adapter, true);
3468
Ajit Khaparde57841862011-04-06 18:08:43 +00003469 be_cmd_reset_function(adapter);
3470
Sathya Perla82456b02010-02-17 01:35:37 +00003471 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003472}
3473
Sathya Perlacf588472010-02-14 21:22:01 +00003474static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3475 pci_channel_state_t state)
3476{
3477 struct be_adapter *adapter = pci_get_drvdata(pdev);
3478 struct net_device *netdev = adapter->netdev;
3479
3480 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3481
3482 adapter->eeh_err = true;
3483
3484 netif_device_detach(netdev);
3485
3486 if (netif_running(netdev)) {
3487 rtnl_lock();
3488 be_close(netdev);
3489 rtnl_unlock();
3490 }
3491 be_clear(adapter);
3492
3493 if (state == pci_channel_io_perm_failure)
3494 return PCI_ERS_RESULT_DISCONNECT;
3495
3496 pci_disable_device(pdev);
3497
3498 return PCI_ERS_RESULT_NEED_RESET;
3499}
3500
3501static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3502{
3503 struct be_adapter *adapter = pci_get_drvdata(pdev);
3504 int status;
3505
3506 dev_info(&adapter->pdev->dev, "EEH reset\n");
3507 adapter->eeh_err = false;
3508
3509 status = pci_enable_device(pdev);
3510 if (status)
3511 return PCI_ERS_RESULT_DISCONNECT;
3512
3513 pci_set_master(pdev);
3514 pci_set_power_state(pdev, 0);
3515 pci_restore_state(pdev);
3516
3517 /* Check if card is ok and fw is ready */
3518 status = be_cmd_POST(adapter);
3519 if (status)
3520 return PCI_ERS_RESULT_DISCONNECT;
3521
3522 return PCI_ERS_RESULT_RECOVERED;
3523}
3524
3525static void be_eeh_resume(struct pci_dev *pdev)
3526{
3527 int status = 0;
3528 struct be_adapter *adapter = pci_get_drvdata(pdev);
3529 struct net_device *netdev = adapter->netdev;
3530
3531 dev_info(&adapter->pdev->dev, "EEH resume\n");
3532
3533 pci_save_state(pdev);
3534
3535 /* tell fw we're ready to fire cmds */
3536 status = be_cmd_fw_init(adapter);
3537 if (status)
3538 goto err;
3539
3540 status = be_setup(adapter);
3541 if (status)
3542 goto err;
3543
3544 if (netif_running(netdev)) {
3545 status = be_open(netdev);
3546 if (status)
3547 goto err;
3548 }
3549 netif_device_attach(netdev);
3550 return;
3551err:
3552 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003553}
3554
3555static struct pci_error_handlers be_eeh_handlers = {
3556 .error_detected = be_eeh_err_detected,
3557 .slot_reset = be_eeh_reset,
3558 .resume = be_eeh_resume,
3559};
3560
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003561static struct pci_driver be_driver = {
3562 .name = DRV_NAME,
3563 .id_table = be_dev_ids,
3564 .probe = be_probe,
3565 .remove = be_remove,
3566 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003567 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003568 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003569 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003570};
3571
3572static int __init be_init_module(void)
3573{
Joe Perches8e95a202009-12-03 07:58:21 +00003574 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3575 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003576 printk(KERN_WARNING DRV_NAME
3577 " : Module param rx_frag_size must be 2048/4096/8192."
3578 " Using 2048\n");
3579 rx_frag_size = 2048;
3580 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003581
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003582 return pci_register_driver(&be_driver);
3583}
3584module_init(be_init_module);
3585
3586static void __exit be_exit_module(void)
3587{
3588 pci_unregister_driver(&be_driver);
3589}
3590module_exit(be_exit_module);