blob: 706fc598993935187b33de36b74deecd041054dc [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070019#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000020#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070021#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070022
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
Sathya Perla2e588f82011-03-11 02:49:26 +000029static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000031module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070037 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070038 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000041 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000042 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070043 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000046/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070047static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000048 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070082static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000083 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700106 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700116
117static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118{
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700123}
124
125static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140}
141
Sathya Perla8788fdc2009-07-27 22:52:03 +0000142static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700143{
Sathya Perladb3ea782011-08-22 19:41:52 +0000144 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000145
Sathya Perlacf588472010-02-14 21:22:01 +0000146 if (adapter->eeh_err)
147 return;
148
Sathya Perladb3ea782011-08-22 19:41:52 +0000149 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
150 &reg);
151 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
152
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159
Sathya Perladb3ea782011-08-22 19:41:52 +0000160 pci_write_config_dword(adapter->pdev,
161 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162}
163
Sathya Perla8788fdc2009-07-27 22:52:03 +0000164static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165{
166 u32 val = 0;
167 val |= qid & DB_RQ_RING_ID_MASK;
168 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000169
170 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000171 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Sathya Perla8788fdc2009-07-27 22:52:03 +0000174static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175{
176 u32 val = 0;
177 val |= qid & DB_TXULP_RING_ID_MASK;
178 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000179
180 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000181 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182}
183
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185 bool arm, bool clear_int, u16 num_popped)
186{
187 u32 val = 0;
188 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000189 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
190 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000191
192 if (adapter->eeh_err)
193 return;
194
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195 if (arm)
196 val |= 1 << DB_EQ_REARM_SHIFT;
197 if (clear_int)
198 val |= 1 << DB_EQ_CLR_SHIFT;
199 val |= 1 << DB_EQ_EVNT_SHIFT;
200 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202}
203
Sathya Perla8788fdc2009-07-27 22:52:03 +0000204void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205{
206 u32 val = 0;
207 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000208 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
209 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000210
211 if (adapter->eeh_err)
212 return;
213
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214 if (arm)
215 val |= 1 << DB_CQ_REARM_SHIFT;
216 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000217 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218}
219
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700220static int be_mac_addr_set(struct net_device *netdev, void *p)
221{
222 struct be_adapter *adapter = netdev_priv(netdev);
223 struct sockaddr *addr = p;
224 int status = 0;
225
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000226 if (!is_valid_ether_addr(addr->sa_data))
227 return -EADDRNOTAVAIL;
228
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000229 /* MAC addr configuration will be done in hardware for VFs
230 * by their corresponding PFs. Just copy to netdev addr here
231 */
232 if (!be_physfn(adapter))
233 goto netdev_addr;
234
Ajit Khapardef8617e02011-02-11 13:36:37 +0000235 status = be_cmd_pmac_del(adapter, adapter->if_handle,
236 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000237 if (status)
238 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239
Sathya Perlaa65027e2009-08-17 00:58:04 +0000240 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000241 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000242netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243 if (!status)
244 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
245
246 return status;
247}
248
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000249static void populate_be2_stats(struct be_adapter *adapter)
250{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000251 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
252 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
253 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000254 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000255 &rxf_stats->port[adapter->port_num];
256 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000257
Sathya Perlaac124ff2011-07-25 19:10:14 +0000258 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000259 drvs->rx_pause_frames = port_stats->rx_pause_frames;
260 drvs->rx_crc_errors = port_stats->rx_crc_errors;
261 drvs->rx_control_frames = port_stats->rx_control_frames;
262 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
263 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
264 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
265 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
266 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
267 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
268 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
269 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
270 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
271 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
272 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_dropped_header_too_small =
275 port_stats->rx_dropped_header_too_small;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000276 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000277 drvs->rx_alignment_symbol_errors =
278 port_stats->rx_alignment_symbol_errors;
279
280 drvs->tx_pauseframes = port_stats->tx_pauseframes;
281 drvs->tx_controlframes = port_stats->tx_controlframes;
282
283 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000284 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000285 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000286 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000287 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
288 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
289 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
290 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
291 drvs->forwarded_packets = rxf_stats->forwarded_packets;
292 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000293 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
294 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000295 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
296}
297
298static void populate_be3_stats(struct be_adapter *adapter)
299{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000300 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
301 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
302 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000303 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000304 &rxf_stats->port[adapter->port_num];
305 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306
Sathya Perlaac124ff2011-07-25 19:10:14 +0000307 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000308 drvs->rx_pause_frames = port_stats->rx_pause_frames;
309 drvs->rx_crc_errors = port_stats->rx_crc_errors;
310 drvs->rx_control_frames = port_stats->rx_control_frames;
311 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
312 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
313 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
314 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
315 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
316 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
317 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
318 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
319 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
320 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
321 drvs->rx_dropped_header_too_small =
322 port_stats->rx_dropped_header_too_small;
323 drvs->rx_input_fifo_overflow_drop =
324 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000325 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000326 drvs->rx_alignment_symbol_errors =
327 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000328 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000329 drvs->tx_pauseframes = port_stats->tx_pauseframes;
330 drvs->tx_controlframes = port_stats->tx_controlframes;
331 drvs->jabber_events = port_stats->jabber_events;
332 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
333 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
334 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
335 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
336 drvs->forwarded_packets = rxf_stats->forwarded_packets;
337 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
339 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000340 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
341}
342
Selvin Xavier005d5692011-05-16 07:36:35 +0000343static void populate_lancer_stats(struct be_adapter *adapter)
344{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345
Selvin Xavier005d5692011-05-16 07:36:35 +0000346 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000347 struct lancer_pport_stats *pport_stats =
348 pport_stats_from_cmd(adapter);
349
350 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
351 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
352 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
353 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000354 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000355 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000356 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
357 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
358 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
359 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
360 drvs->rx_dropped_tcp_length =
361 pport_stats->rx_dropped_invalid_tcp_length;
362 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
363 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
364 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
365 drvs->rx_dropped_header_too_small =
366 pport_stats->rx_dropped_header_too_small;
367 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
368 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000369 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000370 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000371 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
372 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000373 drvs->jabber_events = pport_stats->rx_jabbers;
Selvin Xavier005d5692011-05-16 07:36:35 +0000374 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000375 drvs->forwarded_packets = pport_stats->num_forwards_lo;
376 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000377 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000378 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000379}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380
Sathya Perla09c1c682011-08-22 19:41:53 +0000381static void accumulate_16bit_val(u32 *acc, u16 val)
382{
383#define lo(x) (x & 0xFFFF)
384#define hi(x) (x & 0xFFFF0000)
385 bool wrapped = val < lo(*acc);
386 u32 newacc = hi(*acc) + val;
387
388 if (wrapped)
389 newacc += 65536;
390 ACCESS_ONCE(*acc) = newacc;
391}
392
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000393void be_parse_stats(struct be_adapter *adapter)
394{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
396 struct be_rx_obj *rxo;
397 int i;
398
Selvin Xavier005d5692011-05-16 07:36:35 +0000399 if (adapter->generation == BE_GEN3) {
400 if (lancer_chip(adapter))
401 populate_lancer_stats(adapter);
402 else
403 populate_be3_stats(adapter);
404 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000405 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000406 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000407
408 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000409 for_all_rx_queues(adapter, rxo, i) {
410 /* below erx HW counter can actually wrap around after
411 * 65535. Driver accumulates a 32-bit value
412 */
413 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
414 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
415 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416}
417
Sathya Perlaab1594e2011-07-25 19:10:15 +0000418static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
419 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700420{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000421 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700423 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000424 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000425 u64 pkts, bytes;
426 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700427 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700428
Sathya Perla3abcded2010-10-03 22:12:27 -0700429 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000430 const struct be_rx_stats *rx_stats = rx_stats(rxo);
431 do {
432 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
433 pkts = rx_stats(rxo)->rx_pkts;
434 bytes = rx_stats(rxo)->rx_bytes;
435 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
436 stats->rx_packets += pkts;
437 stats->rx_bytes += bytes;
438 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
439 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
440 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700441 }
442
Sathya Perla3c8def92011-06-12 20:01:58 +0000443 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000444 const struct be_tx_stats *tx_stats = tx_stats(txo);
445 do {
446 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
447 pkts = tx_stats(txo)->tx_pkts;
448 bytes = tx_stats(txo)->tx_bytes;
449 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
450 stats->tx_packets += pkts;
451 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000452 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700453
454 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000455 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000456 drvs->rx_alignment_symbol_errors +
457 drvs->rx_in_range_errors +
458 drvs->rx_out_range_errors +
459 drvs->rx_frame_too_long +
460 drvs->rx_dropped_too_small +
461 drvs->rx_dropped_too_short +
462 drvs->rx_dropped_header_too_small +
463 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000464 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700465
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700466 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000467 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000468 drvs->rx_out_range_errors +
469 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000470
Sathya Perlaab1594e2011-07-25 19:10:15 +0000471 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700472
473 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000474 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000475
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700476 /* receiver fifo overrun */
477 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000478 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000479 drvs->rx_input_fifo_overflow_drop +
480 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000481 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700482}
483
Sathya Perlaea172a02011-08-02 19:57:42 +0000484void be_link_status_update(struct be_adapter *adapter, u32 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700485{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486 struct net_device *netdev = adapter->netdev;
487
Sathya Perlaea172a02011-08-02 19:57:42 +0000488 /* when link status changes, link speed must be re-queried from card */
489 adapter->link_speed = -1;
490 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
491 netif_carrier_on(netdev);
492 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
493 } else {
494 netif_carrier_off(netdev);
495 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700496 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497}
498
Sathya Perla3c8def92011-06-12 20:01:58 +0000499static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000500 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700501{
Sathya Perla3c8def92011-06-12 20:01:58 +0000502 struct be_tx_stats *stats = tx_stats(txo);
503
Sathya Perlaab1594e2011-07-25 19:10:15 +0000504 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000505 stats->tx_reqs++;
506 stats->tx_wrbs += wrb_cnt;
507 stats->tx_bytes += copied;
508 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700509 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000510 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000511 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512}
513
514/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000515static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
516 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700517{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700518 int cnt = (skb->len > skb->data_len);
519
520 cnt += skb_shinfo(skb)->nr_frags;
521
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522 /* to account for hdr wrb */
523 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000524 if (lancer_chip(adapter) || !(cnt & 1)) {
525 *dummy = false;
526 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700527 /* add a dummy to make it an even num */
528 cnt++;
529 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000530 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700531 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
532 return cnt;
533}
534
535static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
536{
537 wrb->frag_pa_hi = upper_32_bits(addr);
538 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
539 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
540}
541
Somnath Koturcc4ce022010-10-21 07:11:14 -0700542static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
543 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700544{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700545 u8 vlan_prio = 0;
546 u16 vlan_tag = 0;
547
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 memset(hdr, 0, sizeof(*hdr));
549
550 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
551
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000552 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700553 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
554 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
555 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000556 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000557 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000558 if (lancer_chip(adapter) && adapter->sli_family ==
559 LANCER_A0_SLI_FAMILY) {
560 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
561 if (is_tcp_pkt(skb))
562 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
563 tcpcs, hdr, 1);
564 else if (is_udp_pkt(skb))
565 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
566 udpcs, hdr, 1);
567 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700568 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
569 if (is_tcp_pkt(skb))
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
571 else if (is_udp_pkt(skb))
572 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
573 }
574
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700575 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700576 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700577 vlan_tag = vlan_tx_tag_get(skb);
578 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
579 /* If vlan priority provided by OS is NOT in available bmap */
580 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
581 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
582 adapter->recommended_prio;
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584 }
585
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
588 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
590}
591
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000592static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000593 bool unmap_single)
594{
595 dma_addr_t dma;
596
597 be_dws_le_to_cpu(wrb, sizeof(*wrb));
598
599 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000600 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000601 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000602 dma_unmap_single(dev, dma, wrb->frag_len,
603 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000604 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000605 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000606 }
607}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700608
Sathya Perla3c8def92011-06-12 20:01:58 +0000609static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
611{
Sathya Perla7101e112010-03-22 20:41:12 +0000612 dma_addr_t busaddr;
613 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000614 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700616 struct be_eth_wrb *wrb;
617 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000618 bool map_single = false;
619 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700620
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700621 hdr = queue_head_node(txq);
622 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000623 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700624
David S. Millerebc8d2a2009-06-09 01:01:31 -0700625 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700626 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000627 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
628 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000629 goto dma_err;
630 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700631 wrb = queue_head_node(txq);
632 wrb_fill(wrb, busaddr, len);
633 be_dws_cpu_to_le(wrb, sizeof(*wrb));
634 queue_head_inc(txq);
635 copied += len;
636 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700637
David S. Millerebc8d2a2009-06-09 01:01:31 -0700638 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000639 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700640 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000641 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000642 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000643 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000644 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700645 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000646 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700647 be_dws_cpu_to_le(wrb, sizeof(*wrb));
648 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000649 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650 }
651
652 if (dummy_wrb) {
653 wrb = queue_head_node(txq);
654 wrb_fill(wrb, 0, 0);
655 be_dws_cpu_to_le(wrb, sizeof(*wrb));
656 queue_head_inc(txq);
657 }
658
Somnath Koturcc4ce022010-10-21 07:11:14 -0700659 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700660 be_dws_cpu_to_le(hdr, sizeof(*hdr));
661
662 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000663dma_err:
664 txq->head = map_head;
665 while (copied) {
666 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000667 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000668 map_single = false;
669 copied -= wrb->frag_len;
670 queue_head_inc(txq);
671 }
672 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673}
674
Stephen Hemminger613573252009-08-31 19:50:58 +0000675static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700676 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677{
678 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000679 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
680 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 u32 wrb_cnt = 0, copied = 0;
682 u32 start = txq->head;
683 bool dummy_wrb, stopped = false;
684
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000685 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700686
Sathya Perla3c8def92011-06-12 20:01:58 +0000687 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000688 if (copied) {
689 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000690 BUG_ON(txo->sent_skb_list[start]);
691 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000693 /* Ensure txq has space for the next skb; Else stop the queue
694 * *BEFORE* ringing the tx doorbell, so that we serialze the
695 * tx compls of the current transmit which'll wake up the queue
696 */
Sathya Perla7101e112010-03-22 20:41:12 +0000697 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000698 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
699 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000700 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000701 stopped = true;
702 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700703
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000704 be_txq_notify(adapter, txq->id, wrb_cnt);
705
Sathya Perla3c8def92011-06-12 20:01:58 +0000706 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000707 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000708 } else {
709 txq->head = start;
710 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700711 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700712 return NETDEV_TX_OK;
713}
714
715static int be_change_mtu(struct net_device *netdev, int new_mtu)
716{
717 struct be_adapter *adapter = netdev_priv(netdev);
718 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000719 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
720 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700721 dev_info(&adapter->pdev->dev,
722 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000723 BE_MIN_MTU,
724 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700725 return -EINVAL;
726 }
727 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
728 netdev->mtu, new_mtu);
729 netdev->mtu = new_mtu;
730 return 0;
731}
732
733/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000734 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
735 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000737static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700738{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 u16 vtag[BE_NUM_VLANS_SUPPORTED];
740 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000741 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000742 u32 if_handle;
743
744 if (vf) {
745 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
746 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
747 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
748 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000750 /* No need to further configure vids if in promiscuous mode */
751 if (adapter->promiscuous)
752 return 0;
753
Ajit Khaparde82903e42010-02-09 01:34:57 +0000754 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700755 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000756 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757 if (adapter->vlan_tag[i]) {
758 vtag[ntags] = cpu_to_le16(i);
759 ntags++;
760 }
761 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700762 status = be_cmd_vlan_config(adapter, adapter->if_handle,
763 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700764 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700765 status = be_cmd_vlan_config(adapter, adapter->if_handle,
766 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700767 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000768
Sathya Perlab31c50a2009-09-17 10:30:13 -0700769 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770}
771
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
773{
774 struct be_adapter *adapter = netdev_priv(netdev);
775
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000776 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000777 if (!be_physfn(adapter))
778 return;
779
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000781 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000782 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700783}
784
785static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
786{
787 struct be_adapter *adapter = netdev_priv(netdev);
788
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000789 adapter->vlans_added--;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000790
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000791 if (!be_physfn(adapter))
792 return;
793
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700794 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000795 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000796 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700797}
798
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799static void be_set_multicast_list(struct net_device *netdev)
800{
801 struct be_adapter *adapter = netdev_priv(netdev);
802
803 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000804 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000805 adapter->promiscuous = true;
806 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700807 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000808
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300809 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000810 if (adapter->promiscuous) {
811 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000812 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000813
814 if (adapter->vlans_added)
815 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000816 }
817
Sathya Perlae7b909a2009-11-22 22:01:10 +0000818 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000819 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000820 netdev_mc_count(netdev) > BE_MAX_MC) {
821 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000822 goto done;
823 }
824
Sathya Perla5b8821b2011-08-02 19:57:44 +0000825 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000826done:
827 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700828}
829
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000830static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
831{
832 struct be_adapter *adapter = netdev_priv(netdev);
833 int status;
834
835 if (!adapter->sriov_enabled)
836 return -EPERM;
837
838 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
839 return -EINVAL;
840
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000841 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
842 status = be_cmd_pmac_del(adapter,
843 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000844 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000845
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000846 status = be_cmd_pmac_add(adapter, mac,
847 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000848 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000849
850 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000851 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
852 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000853 else
854 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
855
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000856 return status;
857}
858
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000859static int be_get_vf_config(struct net_device *netdev, int vf,
860 struct ifla_vf_info *vi)
861{
862 struct be_adapter *adapter = netdev_priv(netdev);
863
864 if (!adapter->sriov_enabled)
865 return -EPERM;
866
867 if (vf >= num_vfs)
868 return -EINVAL;
869
870 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000871 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000872 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000873 vi->qos = 0;
874 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
875
876 return 0;
877}
878
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000879static int be_set_vf_vlan(struct net_device *netdev,
880 int vf, u16 vlan, u8 qos)
881{
882 struct be_adapter *adapter = netdev_priv(netdev);
883 int status = 0;
884
885 if (!adapter->sriov_enabled)
886 return -EPERM;
887
888 if ((vf >= num_vfs) || (vlan > 4095))
889 return -EINVAL;
890
891 if (vlan) {
892 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
893 adapter->vlans_added++;
894 } else {
895 adapter->vf_cfg[vf].vf_vlan_tag = 0;
896 adapter->vlans_added--;
897 }
898
899 status = be_vid_config(adapter, true, vf);
900
901 if (status)
902 dev_info(&adapter->pdev->dev,
903 "VLAN %d config on VF %d failed\n", vlan, vf);
904 return status;
905}
906
Ajit Khapardee1d18732010-07-23 01:52:13 +0000907static int be_set_vf_tx_rate(struct net_device *netdev,
908 int vf, int rate)
909{
910 struct be_adapter *adapter = netdev_priv(netdev);
911 int status = 0;
912
913 if (!adapter->sriov_enabled)
914 return -EPERM;
915
916 if ((vf >= num_vfs) || (rate < 0))
917 return -EINVAL;
918
919 if (rate > 10000)
920 rate = 10000;
921
922 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000923 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000924
925 if (status)
926 dev_info(&adapter->pdev->dev,
927 "tx rate %d on VF %d failed\n", rate, vf);
928 return status;
929}
930
Sathya Perlaac124ff2011-07-25 19:10:14 +0000931static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700932{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000933 struct be_eq_obj *rx_eq = &rxo->rx_eq;
934 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700935 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000936 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000937 u64 pkts;
938 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000939
940 if (!rx_eq->enable_aic)
941 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700942
Sathya Perla4097f662009-03-24 16:40:13 -0700943 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700944 if (time_before(now, stats->rx_jiffies)) {
945 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700946 return;
947 }
948
Sathya Perlaac124ff2011-07-25 19:10:14 +0000949 /* Update once a second */
950 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700951 return;
952
Sathya Perlaab1594e2011-07-25 19:10:15 +0000953 do {
954 start = u64_stats_fetch_begin_bh(&stats->sync);
955 pkts = stats->rx_pkts;
956 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
957
Eric Dumazet68c3e5a2011-08-09 06:23:07 +0000958 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000959 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -0700960 stats->rx_jiffies = now;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000961 eqd = stats->rx_pps / 110000;
962 eqd = eqd << 3;
963 if (eqd > rx_eq->max_eqd)
964 eqd = rx_eq->max_eqd;
965 if (eqd < rx_eq->min_eqd)
966 eqd = rx_eq->min_eqd;
967 if (eqd < 10)
968 eqd = 0;
969 if (eqd != rx_eq->cur_eqd) {
970 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
971 rx_eq->cur_eqd = eqd;
972 }
Sathya Perla4097f662009-03-24 16:40:13 -0700973}
974
Sathya Perla3abcded2010-10-03 22:12:27 -0700975static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000976 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -0700977{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000978 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700979
Sathya Perlaab1594e2011-07-25 19:10:15 +0000980 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -0700981 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000982 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -0700983 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000984 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700985 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000986 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000987 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000988 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700989}
990
Sathya Perla2e588f82011-03-11 02:49:26 +0000991static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -0700992{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +0000993 /* L4 checksum is not reliable for non TCP/UDP packets.
994 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +0000995 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
996 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700997}
998
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700999static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001000get_rx_page_info(struct be_adapter *adapter,
1001 struct be_rx_obj *rxo,
1002 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001003{
1004 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001005 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001006
Sathya Perla3abcded2010-10-03 22:12:27 -07001007 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001008 BUG_ON(!rx_page_info->page);
1009
Ajit Khaparde205859a2010-02-09 01:34:21 +00001010 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001011 dma_unmap_page(&adapter->pdev->dev,
1012 dma_unmap_addr(rx_page_info, bus),
1013 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001014 rx_page_info->last_page_user = false;
1015 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001016
1017 atomic_dec(&rxq->used);
1018 return rx_page_info;
1019}
1020
1021/* Throwaway the data in the Rx completion */
1022static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001023 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001024 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001025{
Sathya Perla3abcded2010-10-03 22:12:27 -07001026 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001027 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001028 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001029
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001030 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001031 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001032 put_page(page_info->page);
1033 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001034 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001035 }
1036}
1037
1038/*
1039 * skb_fill_rx_data forms a complete skb for an ether frame
1040 * indicated by rxcp.
1041 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001042static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001043 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001044{
Sathya Perla3abcded2010-10-03 22:12:27 -07001045 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001046 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001047 u16 i, j;
1048 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001049 u8 *start;
1050
Sathya Perla2e588f82011-03-11 02:49:26 +00001051 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001052 start = page_address(page_info->page) + page_info->page_offset;
1053 prefetch(start);
1054
1055 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001056 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001057
1058 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001059 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001060 memcpy(skb->data, start, hdr_len);
1061 skb->len = curr_frag_len;
1062 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1063 /* Complete packet has now been moved to data */
1064 put_page(page_info->page);
1065 skb->data_len = 0;
1066 skb->tail += curr_frag_len;
1067 } else {
1068 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001069 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070 skb_shinfo(skb)->frags[0].page_offset =
1071 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001072 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001073 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001074 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001075 skb->tail += hdr_len;
1076 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001077 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078
Sathya Perla2e588f82011-03-11 02:49:26 +00001079 if (rxcp->pkt_size <= rx_frag_size) {
1080 BUG_ON(rxcp->num_rcvd != 1);
1081 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001082 }
1083
1084 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001085 index_inc(&rxcp->rxq_idx, rxq->len);
1086 remaining = rxcp->pkt_size - curr_frag_len;
1087 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1088 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1089 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001090
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001091 /* Coalesce all frags from the same physical page in one slot */
1092 if (page_info->page_offset == 0) {
1093 /* Fresh page */
1094 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001095 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001096 skb_shinfo(skb)->frags[j].page_offset =
1097 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001098 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001099 skb_shinfo(skb)->nr_frags++;
1100 } else {
1101 put_page(page_info->page);
1102 }
1103
Eric Dumazet9e903e02011-10-18 21:00:24 +00001104 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001105 skb->len += curr_frag_len;
1106 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001107 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001108 remaining -= curr_frag_len;
1109 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001110 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001111 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001112 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001113}
1114
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001115/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001117 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001118 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001119{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001120 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001121 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001122
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001123 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001124 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001125 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001126 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001127 return;
1128 }
1129
Sathya Perla2e588f82011-03-11 02:49:26 +00001130 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001131
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001132 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001133 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001134 else
1135 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001136
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001137 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001138 if (adapter->netdev->features & NETIF_F_RXHASH)
1139 skb->rxhash = rxcp->rss_hash;
1140
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001141
Jiri Pirko343e43c2011-08-25 02:50:51 +00001142 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001143 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1144
1145 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146}
1147
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001148/* Process the RX completion indicated by rxcp when GRO is enabled */
1149static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001150 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001151 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001152{
1153 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001154 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001155 struct be_queue_info *rxq = &rxo->q;
1156 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001157 u16 remaining, curr_frag_len;
1158 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001159
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001160 skb = napi_get_frags(&eq_obj->napi);
1161 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001162 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001163 return;
1164 }
1165
Sathya Perla2e588f82011-03-11 02:49:26 +00001166 remaining = rxcp->pkt_size;
1167 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1168 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001169
1170 curr_frag_len = min(remaining, rx_frag_size);
1171
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001172 /* Coalesce all frags from the same physical page in one slot */
1173 if (i == 0 || page_info->page_offset == 0) {
1174 /* First frag or Fresh page */
1175 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001176 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001177 skb_shinfo(skb)->frags[j].page_offset =
1178 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001179 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001180 } else {
1181 put_page(page_info->page);
1182 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001183 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001184 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001185 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001186 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001187 memset(page_info, 0, sizeof(*page_info));
1188 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001189 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001190
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001191 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001192 skb->len = rxcp->pkt_size;
1193 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001194 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001195 if (adapter->netdev->features & NETIF_F_RXHASH)
1196 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001197
Jiri Pirko343e43c2011-08-25 02:50:51 +00001198 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001199 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1200
1201 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202}
1203
Sathya Perla2e588f82011-03-11 02:49:26 +00001204static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1205 struct be_eth_rx_compl *compl,
1206 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001207{
Sathya Perla2e588f82011-03-11 02:49:26 +00001208 rxcp->pkt_size =
1209 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1210 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1211 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1212 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001213 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001214 rxcp->ip_csum =
1215 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1216 rxcp->l4_csum =
1217 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1218 rxcp->ipv6 =
1219 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1220 rxcp->rxq_idx =
1221 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1222 rxcp->num_rcvd =
1223 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1224 rxcp->pkt_type =
1225 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001226 rxcp->rss_hash =
1227 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001228 if (rxcp->vlanf) {
1229 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001230 compl);
1231 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1232 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001233 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001234 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001235}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001236
Sathya Perla2e588f82011-03-11 02:49:26 +00001237static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1238 struct be_eth_rx_compl *compl,
1239 struct be_rx_compl_info *rxcp)
1240{
1241 rxcp->pkt_size =
1242 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1243 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1244 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1245 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001246 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001247 rxcp->ip_csum =
1248 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1249 rxcp->l4_csum =
1250 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1251 rxcp->ipv6 =
1252 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1253 rxcp->rxq_idx =
1254 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1255 rxcp->num_rcvd =
1256 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1257 rxcp->pkt_type =
1258 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001259 rxcp->rss_hash =
1260 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001261 if (rxcp->vlanf) {
1262 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001263 compl);
1264 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1265 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001266 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001267 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001268}
1269
1270static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1271{
1272 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1273 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1274 struct be_adapter *adapter = rxo->adapter;
1275
1276 /* For checking the valid bit it is Ok to use either definition as the
1277 * valid bit is at the same position in both v0 and v1 Rx compl */
1278 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001279 return NULL;
1280
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001281 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001282 be_dws_le_to_cpu(compl, sizeof(*compl));
1283
1284 if (adapter->be3_native)
1285 be_parse_rx_compl_v1(adapter, compl, rxcp);
1286 else
1287 be_parse_rx_compl_v0(adapter, compl, rxcp);
1288
Sathya Perla15d72182011-03-21 20:49:26 +00001289 if (rxcp->vlanf) {
1290 /* vlanf could be wrongly set in some cards.
1291 * ignore if vtm is not set */
1292 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1293 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001294
Sathya Perla15d72182011-03-21 20:49:26 +00001295 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001296 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001297
Somnath Kotur939cf302011-08-18 21:51:49 -07001298 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001299 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001300 rxcp->vlanf = 0;
1301 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001302
1303 /* As the compl has been parsed, reset it; we wont touch it again */
1304 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001305
Sathya Perla3abcded2010-10-03 22:12:27 -07001306 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001307 return rxcp;
1308}
1309
Eric Dumazet1829b082011-03-01 05:48:12 +00001310static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001311{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001312 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001313
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001314 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001315 gfp |= __GFP_COMP;
1316 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001317}
1318
1319/*
1320 * Allocate a page, split it to fragments of size rx_frag_size and post as
1321 * receive buffers to BE
1322 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001323static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001324{
Sathya Perla3abcded2010-10-03 22:12:27 -07001325 struct be_adapter *adapter = rxo->adapter;
1326 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001327 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001328 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001329 struct page *pagep = NULL;
1330 struct be_eth_rx_d *rxd;
1331 u64 page_dmaaddr = 0, frag_dmaaddr;
1332 u32 posted, page_offset = 0;
1333
Sathya Perla3abcded2010-10-03 22:12:27 -07001334 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001335 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1336 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001337 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001338 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001339 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001340 break;
1341 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001342 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1343 0, adapter->big_page_size,
1344 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001345 page_info->page_offset = 0;
1346 } else {
1347 get_page(pagep);
1348 page_info->page_offset = page_offset + rx_frag_size;
1349 }
1350 page_offset = page_info->page_offset;
1351 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001352 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001353 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1354
1355 rxd = queue_head_node(rxq);
1356 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1357 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358
1359 /* Any space left in the current big page for another frag? */
1360 if ((page_offset + rx_frag_size + rx_frag_size) >
1361 adapter->big_page_size) {
1362 pagep = NULL;
1363 page_info->last_page_user = true;
1364 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001365
1366 prev_page_info = page_info;
1367 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001368 page_info = &page_info_tbl[rxq->head];
1369 }
1370 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001371 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001372
1373 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001374 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001375 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001376 } else if (atomic_read(&rxq->used) == 0) {
1377 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001378 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380}
1381
Sathya Perla5fb379e2009-06-18 00:02:59 +00001382static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001383{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1385
1386 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1387 return NULL;
1388
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001389 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001390 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1391
1392 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1393
1394 queue_tail_inc(tx_cq);
1395 return txcp;
1396}
1397
Sathya Perla3c8def92011-06-12 20:01:58 +00001398static u16 be_tx_compl_process(struct be_adapter *adapter,
1399 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001400{
Sathya Perla3c8def92011-06-12 20:01:58 +00001401 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001402 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001403 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001404 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001405 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1406 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001408 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001410 sent_skbs[txq->tail] = NULL;
1411
1412 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001413 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001414
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001415 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001416 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001417 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001418 unmap_tx_frag(&adapter->pdev->dev, wrb,
1419 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001420 unmap_skb_hdr = false;
1421
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001422 num_wrbs++;
1423 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001424 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001425
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001426 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001427 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001428}
1429
Sathya Perla859b1e42009-08-10 03:43:51 +00001430static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1431{
1432 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1433
1434 if (!eqe->evt)
1435 return NULL;
1436
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001437 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001438 eqe->evt = le32_to_cpu(eqe->evt);
1439 queue_tail_inc(&eq_obj->q);
1440 return eqe;
1441}
1442
1443static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001444 struct be_eq_obj *eq_obj,
1445 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001446{
1447 struct be_eq_entry *eqe;
1448 u16 num = 0;
1449
1450 while ((eqe = event_get(eq_obj)) != NULL) {
1451 eqe->evt = 0;
1452 num++;
1453 }
1454
1455 /* Deal with any spurious interrupts that come
1456 * without events
1457 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001458 if (!num)
1459 rearm = true;
1460
1461 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001462 if (num)
1463 napi_schedule(&eq_obj->napi);
1464
1465 return num;
1466}
1467
1468/* Just read and notify events without processing them.
1469 * Used at the time of destroying event queues */
1470static void be_eq_clean(struct be_adapter *adapter,
1471 struct be_eq_obj *eq_obj)
1472{
1473 struct be_eq_entry *eqe;
1474 u16 num = 0;
1475
1476 while ((eqe = event_get(eq_obj)) != NULL) {
1477 eqe->evt = 0;
1478 num++;
1479 }
1480
1481 if (num)
1482 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1483}
1484
Sathya Perla3abcded2010-10-03 22:12:27 -07001485static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001486{
1487 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001488 struct be_queue_info *rxq = &rxo->q;
1489 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001490 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001491 u16 tail;
1492
1493 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001494 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1495 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001496 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001497 }
1498
1499 /* Then free posted rx buffer that were not used */
1500 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001501 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001502 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001503 put_page(page_info->page);
1504 memset(page_info, 0, sizeof(*page_info));
1505 }
1506 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001507 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001508}
1509
Sathya Perla3c8def92011-06-12 20:01:58 +00001510static void be_tx_compl_clean(struct be_adapter *adapter,
1511 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512{
Sathya Perla3c8def92011-06-12 20:01:58 +00001513 struct be_queue_info *tx_cq = &txo->cq;
1514 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001515 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001516 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001517 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001518 struct sk_buff *sent_skb;
1519 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520
Sathya Perlaa8e91792009-08-10 03:42:43 +00001521 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1522 do {
1523 while ((txcp = be_tx_compl_get(tx_cq))) {
1524 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1525 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001526 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001527 cmpl++;
1528 }
1529 if (cmpl) {
1530 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001531 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001532 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001533 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001534 }
1535
1536 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1537 break;
1538
1539 mdelay(1);
1540 } while (true);
1541
1542 if (atomic_read(&txq->used))
1543 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1544 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001545
1546 /* free posted tx for which compls will never arrive */
1547 while (atomic_read(&txq->used)) {
1548 sent_skb = sent_skbs[txq->tail];
1549 end_idx = txq->tail;
1550 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001551 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1552 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001553 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001554 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001555 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556}
1557
Sathya Perla5fb379e2009-06-18 00:02:59 +00001558static void be_mcc_queues_destroy(struct be_adapter *adapter)
1559{
1560 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001561
Sathya Perla8788fdc2009-07-27 22:52:03 +00001562 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001563 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001564 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001565 be_queue_free(adapter, q);
1566
Sathya Perla8788fdc2009-07-27 22:52:03 +00001567 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001568 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001569 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001570 be_queue_free(adapter, q);
1571}
1572
1573/* Must be called only after TX qs are created as MCC shares TX EQ */
1574static int be_mcc_queues_create(struct be_adapter *adapter)
1575{
1576 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001577
1578 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001579 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001580 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001581 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001582 goto err;
1583
1584 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001585 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001586 goto mcc_cq_free;
1587
1588 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001589 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001590 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1591 goto mcc_cq_destroy;
1592
1593 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001594 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001595 goto mcc_q_free;
1596
1597 return 0;
1598
1599mcc_q_free:
1600 be_queue_free(adapter, q);
1601mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001602 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001603mcc_cq_free:
1604 be_queue_free(adapter, cq);
1605err:
1606 return -1;
1607}
1608
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001609static void be_tx_queues_destroy(struct be_adapter *adapter)
1610{
1611 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001612 struct be_tx_obj *txo;
1613 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001614
Sathya Perla3c8def92011-06-12 20:01:58 +00001615 for_all_tx_queues(adapter, txo, i) {
1616 q = &txo->q;
1617 if (q->created)
1618 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1619 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001620
Sathya Perla3c8def92011-06-12 20:01:58 +00001621 q = &txo->cq;
1622 if (q->created)
1623 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1624 be_queue_free(adapter, q);
1625 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001626
Sathya Perla859b1e42009-08-10 03:43:51 +00001627 /* Clear any residual events */
1628 be_eq_clean(adapter, &adapter->tx_eq);
1629
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630 q = &adapter->tx_eq.q;
1631 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001632 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001633 be_queue_free(adapter, q);
1634}
1635
Sathya Perla3c8def92011-06-12 20:01:58 +00001636/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637static int be_tx_queues_create(struct be_adapter *adapter)
1638{
1639 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001640 struct be_tx_obj *txo;
1641 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001642
1643 adapter->tx_eq.max_eqd = 0;
1644 adapter->tx_eq.min_eqd = 0;
1645 adapter->tx_eq.cur_eqd = 96;
1646 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001647
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001649 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1650 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001651 return -1;
1652
Sathya Perla8788fdc2009-07-27 22:52:03 +00001653 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001654 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001655 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001656
Sathya Perla3c8def92011-06-12 20:01:58 +00001657 for_all_tx_queues(adapter, txo, i) {
1658 cq = &txo->cq;
1659 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001660 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001661 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001662
Sathya Perla3c8def92011-06-12 20:01:58 +00001663 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1664 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665
Sathya Perla3c8def92011-06-12 20:01:58 +00001666 q = &txo->q;
1667 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1668 sizeof(struct be_eth_wrb)))
1669 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670
Sathya Perla3c8def92011-06-12 20:01:58 +00001671 if (be_cmd_txq_create(adapter, q, cq))
1672 goto err;
1673 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001674 return 0;
1675
Sathya Perla3c8def92011-06-12 20:01:58 +00001676err:
1677 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001678 return -1;
1679}
1680
1681static void be_rx_queues_destroy(struct be_adapter *adapter)
1682{
1683 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001684 struct be_rx_obj *rxo;
1685 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686
Sathya Perla3abcded2010-10-03 22:12:27 -07001687 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001688 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001689
Sathya Perla3abcded2010-10-03 22:12:27 -07001690 q = &rxo->cq;
1691 if (q->created)
1692 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1693 be_queue_free(adapter, q);
1694
Sathya Perla3abcded2010-10-03 22:12:27 -07001695 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001696 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001697 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001698 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001699 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001700}
1701
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001702static u32 be_num_rxqs_want(struct be_adapter *adapter)
1703{
Sathya Perlac814fd32011-06-26 20:41:25 +00001704 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001705 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1706 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1707 } else {
1708 dev_warn(&adapter->pdev->dev,
1709 "No support for multiple RX queues\n");
1710 return 1;
1711 }
1712}
1713
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001714static int be_rx_queues_create(struct be_adapter *adapter)
1715{
1716 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001717 struct be_rx_obj *rxo;
1718 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001719
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001720 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1721 msix_enabled(adapter) ?
1722 adapter->num_msix_vec - 1 : 1);
1723 if (adapter->num_rx_qs != MAX_RX_QS)
1724 dev_warn(&adapter->pdev->dev,
1725 "Can create only %d RX queues", adapter->num_rx_qs);
1726
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001727 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001728 for_all_rx_queues(adapter, rxo, i) {
1729 rxo->adapter = adapter;
1730 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1731 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001732
Sathya Perla3abcded2010-10-03 22:12:27 -07001733 /* EQ */
1734 eq = &rxo->rx_eq.q;
1735 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1736 sizeof(struct be_eq_entry));
1737 if (rc)
1738 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001739
Sathya Perla3abcded2010-10-03 22:12:27 -07001740 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1741 if (rc)
1742 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001743
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001744 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001745
Sathya Perla3abcded2010-10-03 22:12:27 -07001746 /* CQ */
1747 cq = &rxo->cq;
1748 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1749 sizeof(struct be_eth_rx_compl));
1750 if (rc)
1751 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752
Sathya Perla3abcded2010-10-03 22:12:27 -07001753 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1754 if (rc)
1755 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001756
1757 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001758 q = &rxo->q;
1759 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1760 sizeof(struct be_eth_rx_d));
1761 if (rc)
1762 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001763
Sathya Perla3abcded2010-10-03 22:12:27 -07001764 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001765
1766 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001767err:
1768 be_rx_queues_destroy(adapter);
1769 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001770}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001771
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001772static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001773{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001774 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1775 if (!eqe->evt)
1776 return false;
1777 else
1778 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001779}
1780
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001781static irqreturn_t be_intx(int irq, void *dev)
1782{
1783 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001784 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001785 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001786
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001787 if (lancer_chip(adapter)) {
1788 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001789 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001790 for_all_rx_queues(adapter, rxo, i) {
1791 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001792 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001793 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001794
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001795 if (!(tx || rx))
1796 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001797
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001798 } else {
1799 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1800 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1801 if (!isr)
1802 return IRQ_NONE;
1803
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001804 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001805 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001806
1807 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001808 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001809 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001810 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001811 }
Sathya Perlac001c212009-07-01 01:06:07 +00001812
Sathya Perla8788fdc2009-07-27 22:52:03 +00001813 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001814}
1815
1816static irqreturn_t be_msix_rx(int irq, void *dev)
1817{
Sathya Perla3abcded2010-10-03 22:12:27 -07001818 struct be_rx_obj *rxo = dev;
1819 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001820
Sathya Perla3c8def92011-06-12 20:01:58 +00001821 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001822
1823 return IRQ_HANDLED;
1824}
1825
Sathya Perla5fb379e2009-06-18 00:02:59 +00001826static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001827{
1828 struct be_adapter *adapter = dev;
1829
Sathya Perla3c8def92011-06-12 20:01:58 +00001830 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831
1832 return IRQ_HANDLED;
1833}
1834
Sathya Perla2e588f82011-03-11 02:49:26 +00001835static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001836{
Sathya Perla2e588f82011-03-11 02:49:26 +00001837 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001838}
1839
stephen hemminger49b05222010-10-21 07:50:48 +00001840static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001841{
1842 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001843 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1844 struct be_adapter *adapter = rxo->adapter;
1845 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001846 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847 u32 work_done;
1848
Sathya Perlaac124ff2011-07-25 19:10:14 +00001849 rx_stats(rxo)->rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001850 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001851 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001852 if (!rxcp)
1853 break;
1854
Sathya Perla12004ae2011-08-02 19:57:46 +00001855 /* Is it a flush compl that has no data */
1856 if (unlikely(rxcp->num_rcvd == 0))
1857 goto loop_continue;
1858
1859 /* Discard compl with partial DMA Lancer B0 */
1860 if (unlikely(!rxcp->pkt_size)) {
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001861 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001862 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001863 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001864
Sathya Perla12004ae2011-08-02 19:57:46 +00001865 /* On BE drop pkts that arrive due to imperfect filtering in
1866 * promiscuous mode on some skews
1867 */
1868 if (unlikely(rxcp->port != adapter->port_num &&
1869 !lancer_chip(adapter))) {
1870 be_rx_compl_discard(adapter, rxo, rxcp);
1871 goto loop_continue;
1872 }
1873
1874 if (do_gro(rxcp))
1875 be_rx_compl_process_gro(adapter, rxo, rxcp);
1876 else
1877 be_rx_compl_process(adapter, rxo, rxcp);
1878loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001879 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880 }
1881
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001882 /* Refill the queue */
Sathya Perla857c9902011-08-22 19:41:51 +00001883 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001884 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001885
1886 /* All consumed */
1887 if (work_done < budget) {
1888 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001889 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001890 } else {
1891 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001892 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001893 }
1894 return work_done;
1895}
1896
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001897/* As TX and MCC share the same EQ check for both TX and MCC completions.
1898 * For TX/MCC we don't honour budget; consume everything
1899 */
1900static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001902 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1903 struct be_adapter *adapter =
1904 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla3c8def92011-06-12 20:01:58 +00001905 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001906 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001907 int tx_compl, mcc_compl, status = 0;
1908 u8 i;
1909 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001910
Sathya Perla3c8def92011-06-12 20:01:58 +00001911 for_all_tx_queues(adapter, txo, i) {
1912 tx_compl = 0;
1913 num_wrbs = 0;
1914 while ((txcp = be_tx_compl_get(&txo->cq))) {
1915 num_wrbs += be_tx_compl_process(adapter, txo,
1916 AMAP_GET_BITS(struct amap_eth_tx_compl,
1917 wrb_index, txcp));
1918 tx_compl++;
1919 }
1920 if (tx_compl) {
1921 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1922
1923 atomic_sub(num_wrbs, &txo->q.used);
1924
1925 /* As Tx wrbs have been freed up, wake up netdev queue
1926 * if it was stopped due to lack of tx wrbs. */
1927 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1928 atomic_read(&txo->q.used) < txo->q.len / 2) {
1929 netif_wake_subqueue(adapter->netdev, i);
1930 }
1931
Sathya Perlaab1594e2011-07-25 19:10:15 +00001932 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001933 tx_stats(txo)->tx_compl += tx_compl;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001934 u64_stats_update_end(&tx_stats(txo)->sync_compl);
Sathya Perla3c8def92011-06-12 20:01:58 +00001935 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001936 }
1937
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001938 mcc_compl = be_process_mcc(adapter, &status);
1939
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001940 if (mcc_compl) {
1941 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1942 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1943 }
1944
Sathya Perla3c8def92011-06-12 20:01:58 +00001945 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001946
Sathya Perla3c8def92011-06-12 20:01:58 +00001947 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001948 adapter->drv_stats.tx_events++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001949 return 1;
1950}
1951
Ajit Khaparded053de92010-09-03 06:23:30 +00001952void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001953{
1954 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1955 u32 i;
1956
1957 pci_read_config_dword(adapter->pdev,
1958 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1959 pci_read_config_dword(adapter->pdev,
1960 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1961 pci_read_config_dword(adapter->pdev,
1962 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1963 pci_read_config_dword(adapter->pdev,
1964 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1965
1966 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1967 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1968
Ajit Khaparded053de92010-09-03 06:23:30 +00001969 if (ue_status_lo || ue_status_hi) {
1970 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00001971 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00001972 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1973 }
1974
Ajit Khaparde7c185272010-07-29 06:16:33 +00001975 if (ue_status_lo) {
1976 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1977 if (ue_status_lo & 1)
1978 dev_err(&adapter->pdev->dev,
1979 "UE: %s bit set\n", ue_status_low_desc[i]);
1980 }
1981 }
1982 if (ue_status_hi) {
1983 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1984 if (ue_status_hi & 1)
1985 dev_err(&adapter->pdev->dev,
1986 "UE: %s bit set\n", ue_status_hi_desc[i]);
1987 }
1988 }
1989
1990}
1991
Sathya Perlaea1dae12009-03-19 23:56:20 -07001992static void be_worker(struct work_struct *work)
1993{
1994 struct be_adapter *adapter =
1995 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07001996 struct be_rx_obj *rxo;
1997 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07001998
Sathya Perla16da8252011-03-21 20:49:27 +00001999 if (!adapter->ue_detected && !lancer_chip(adapter))
2000 be_detect_dump_ue(adapter);
2001
Somnath Koturf203af72010-10-25 23:01:03 +00002002 /* when interrupts are not yet enabled, just reap any pending
2003 * mcc completions */
2004 if (!netif_running(adapter->netdev)) {
2005 int mcc_compl, status = 0;
2006
2007 mcc_compl = be_process_mcc(adapter, &status);
2008
2009 if (mcc_compl) {
2010 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2011 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2012 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00002013
Somnath Koturf203af72010-10-25 23:01:03 +00002014 goto reschedule;
2015 }
2016
Selvin Xavier005d5692011-05-16 07:36:35 +00002017 if (!adapter->stats_cmd_sent) {
2018 if (lancer_chip(adapter))
2019 lancer_cmd_get_pport_stats(adapter,
2020 &adapter->stats_cmd);
2021 else
2022 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2023 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002024
Sathya Perla3abcded2010-10-03 22:12:27 -07002025 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002026 be_rx_eqd_update(adapter, rxo);
2027
2028 if (rxo->rx_post_starved) {
2029 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00002030 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002031 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07002032 }
2033
Somnath Koturf203af72010-10-25 23:01:03 +00002034reschedule:
Ivan Vecerae74fbd032011-04-21 00:20:04 +00002035 adapter->work_counter++;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002036 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2037}
2038
Sathya Perla8d56ff12009-11-22 22:02:26 +00002039static void be_msix_disable(struct be_adapter *adapter)
2040{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002041 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002042 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002043 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002044 }
2045}
2046
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002047static void be_msix_enable(struct be_adapter *adapter)
2048{
Sathya Perla3abcded2010-10-03 22:12:27 -07002049#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002050 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002051
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002052 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002053
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002054 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002055 adapter->msix_entries[i].entry = i;
2056
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002057 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002058 if (status == 0) {
2059 goto done;
2060 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002061 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002062 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002063 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002064 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002065 }
2066 return;
2067done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002068 adapter->num_msix_vec = num_vec;
2069 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002070}
2071
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002072static void be_sriov_enable(struct be_adapter *adapter)
2073{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002074 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002075#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002076 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002077 int status, pos;
2078 u16 nvfs;
2079
2080 pos = pci_find_ext_capability(adapter->pdev,
2081 PCI_EXT_CAP_ID_SRIOV);
2082 pci_read_config_word(adapter->pdev,
2083 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2084
2085 if (num_vfs > nvfs) {
2086 dev_info(&adapter->pdev->dev,
2087 "Device supports %d VFs and not %d\n",
2088 nvfs, num_vfs);
2089 num_vfs = nvfs;
2090 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002091
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002092 status = pci_enable_sriov(adapter->pdev, num_vfs);
2093 adapter->sriov_enabled = status ? false : true;
2094 }
2095#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002096}
2097
2098static void be_sriov_disable(struct be_adapter *adapter)
2099{
2100#ifdef CONFIG_PCI_IOV
2101 if (adapter->sriov_enabled) {
2102 pci_disable_sriov(adapter->pdev);
2103 adapter->sriov_enabled = false;
2104 }
2105#endif
2106}
2107
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002108static inline int be_msix_vec_get(struct be_adapter *adapter,
2109 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002110{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002111 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002112}
2113
2114static int be_request_irq(struct be_adapter *adapter,
2115 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002116 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002117{
2118 struct net_device *netdev = adapter->netdev;
2119 int vec;
2120
2121 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002122 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002123 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002124}
2125
Sathya Perla3abcded2010-10-03 22:12:27 -07002126static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2127 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002128{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002129 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002130 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002131}
2132
2133static int be_msix_register(struct be_adapter *adapter)
2134{
Sathya Perla3abcded2010-10-03 22:12:27 -07002135 struct be_rx_obj *rxo;
2136 int status, i;
2137 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002138
Sathya Perla3abcded2010-10-03 22:12:27 -07002139 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2140 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002141 if (status)
2142 goto err;
2143
Sathya Perla3abcded2010-10-03 22:12:27 -07002144 for_all_rx_queues(adapter, rxo, i) {
2145 sprintf(qname, "rxq%d", i);
2146 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2147 qname, rxo);
2148 if (status)
2149 goto err_msix;
2150 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002151
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002152 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002153
Sathya Perla3abcded2010-10-03 22:12:27 -07002154err_msix:
2155 be_free_irq(adapter, &adapter->tx_eq, adapter);
2156
2157 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2158 be_free_irq(adapter, &rxo->rx_eq, rxo);
2159
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002160err:
2161 dev_warn(&adapter->pdev->dev,
2162 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002163 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002164 return status;
2165}
2166
2167static int be_irq_register(struct be_adapter *adapter)
2168{
2169 struct net_device *netdev = adapter->netdev;
2170 int status;
2171
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002172 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002173 status = be_msix_register(adapter);
2174 if (status == 0)
2175 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002176 /* INTx is not supported for VF */
2177 if (!be_physfn(adapter))
2178 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002179 }
2180
2181 /* INTx */
2182 netdev->irq = adapter->pdev->irq;
2183 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2184 adapter);
2185 if (status) {
2186 dev_err(&adapter->pdev->dev,
2187 "INTx request IRQ failed - err %d\n", status);
2188 return status;
2189 }
2190done:
2191 adapter->isr_registered = true;
2192 return 0;
2193}
2194
2195static void be_irq_unregister(struct be_adapter *adapter)
2196{
2197 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002198 struct be_rx_obj *rxo;
2199 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002200
2201 if (!adapter->isr_registered)
2202 return;
2203
2204 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002205 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002206 free_irq(netdev->irq, adapter);
2207 goto done;
2208 }
2209
2210 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002211 be_free_irq(adapter, &adapter->tx_eq, adapter);
2212
2213 for_all_rx_queues(adapter, rxo, i)
2214 be_free_irq(adapter, &rxo->rx_eq, rxo);
2215
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002216done:
2217 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002218}
2219
Sathya Perla482c9e72011-06-29 23:33:17 +00002220static void be_rx_queues_clear(struct be_adapter *adapter)
2221{
2222 struct be_queue_info *q;
2223 struct be_rx_obj *rxo;
2224 int i;
2225
2226 for_all_rx_queues(adapter, rxo, i) {
2227 q = &rxo->q;
2228 if (q->created) {
2229 be_cmd_rxq_destroy(adapter, q);
2230 /* After the rxq is invalidated, wait for a grace time
2231 * of 1ms for all dma to end and the flush compl to
2232 * arrive
2233 */
2234 mdelay(1);
2235 be_rx_q_clean(adapter, rxo);
2236 }
2237
2238 /* Clear any residual events */
2239 q = &rxo->rx_eq.q;
2240 if (q->created)
2241 be_eq_clean(adapter, &rxo->rx_eq);
2242 }
2243}
2244
Sathya Perla889cd4b2010-05-30 23:33:45 +00002245static int be_close(struct net_device *netdev)
2246{
2247 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002248 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002249 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002250 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002251 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002252
Sathya Perla889cd4b2010-05-30 23:33:45 +00002253 be_async_mcc_disable(adapter);
2254
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002255 if (!lancer_chip(adapter))
2256 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002257
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002258 for_all_rx_queues(adapter, rxo, i)
2259 napi_disable(&rxo->rx_eq.napi);
2260
2261 napi_disable(&tx_eq->napi);
2262
2263 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002264 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2265 for_all_rx_queues(adapter, rxo, i)
2266 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002267 for_all_tx_queues(adapter, txo, i)
2268 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002269 }
2270
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002271 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002272 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002273 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002274
2275 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002276 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002277 synchronize_irq(vec);
2278 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002279 } else {
2280 synchronize_irq(netdev->irq);
2281 }
2282 be_irq_unregister(adapter);
2283
Sathya Perla889cd4b2010-05-30 23:33:45 +00002284 /* Wait for all pending tx completions to arrive so that
2285 * all tx skbs are freed.
2286 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002287 for_all_tx_queues(adapter, txo, i)
2288 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002289
Sathya Perla482c9e72011-06-29 23:33:17 +00002290 be_rx_queues_clear(adapter);
2291 return 0;
2292}
2293
2294static int be_rx_queues_setup(struct be_adapter *adapter)
2295{
2296 struct be_rx_obj *rxo;
2297 int rc, i;
2298 u8 rsstable[MAX_RSS_QS];
2299
2300 for_all_rx_queues(adapter, rxo, i) {
2301 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2302 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2303 adapter->if_handle,
2304 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2305 if (rc)
2306 return rc;
2307 }
2308
2309 if (be_multi_rxq(adapter)) {
2310 for_all_rss_queues(adapter, rxo, i)
2311 rsstable[i] = rxo->rss_id;
2312
2313 rc = be_cmd_rss_config(adapter, rsstable,
2314 adapter->num_rx_qs - 1);
2315 if (rc)
2316 return rc;
2317 }
2318
2319 /* First time posting */
2320 for_all_rx_queues(adapter, rxo, i) {
2321 be_post_rx_frags(rxo, GFP_KERNEL);
2322 napi_enable(&rxo->rx_eq.napi);
2323 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002324 return 0;
2325}
2326
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002327static int be_open(struct net_device *netdev)
2328{
2329 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002330 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002331 struct be_rx_obj *rxo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002332 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002333
Sathya Perla482c9e72011-06-29 23:33:17 +00002334 status = be_rx_queues_setup(adapter);
2335 if (status)
2336 goto err;
2337
Sathya Perla5fb379e2009-06-18 00:02:59 +00002338 napi_enable(&tx_eq->napi);
2339
2340 be_irq_register(adapter);
2341
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002342 if (!lancer_chip(adapter))
2343 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002344
2345 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002346 for_all_rx_queues(adapter, rxo, i) {
2347 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2348 be_cq_notify(adapter, rxo->cq.id, true, 0);
2349 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002350 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002351
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002352 /* Now that interrupts are on we can process async mcc */
2353 be_async_mcc_enable(adapter);
2354
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002355 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002356 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002357 if (status)
2358 goto err;
2359
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002360 status = be_cmd_set_flow_control(adapter,
2361 adapter->tx_fc, adapter->rx_fc);
2362 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002363 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002364 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002365
Sathya Perla889cd4b2010-05-30 23:33:45 +00002366 return 0;
2367err:
2368 be_close(adapter->netdev);
2369 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002370}
2371
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002372static int be_setup_wol(struct be_adapter *adapter, bool enable)
2373{
2374 struct be_dma_mem cmd;
2375 int status = 0;
2376 u8 mac[ETH_ALEN];
2377
2378 memset(mac, 0, ETH_ALEN);
2379
2380 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002381 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2382 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002383 if (cmd.va == NULL)
2384 return -1;
2385 memset(cmd.va, 0, cmd.size);
2386
2387 if (enable) {
2388 status = pci_write_config_dword(adapter->pdev,
2389 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2390 if (status) {
2391 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002392 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002393 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2394 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002395 return status;
2396 }
2397 status = be_cmd_enable_magic_wol(adapter,
2398 adapter->netdev->dev_addr, &cmd);
2399 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2400 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2401 } else {
2402 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2403 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2404 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2405 }
2406
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002407 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002408 return status;
2409}
2410
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002411/*
2412 * Generate a seed MAC address from the PF MAC Address using jhash.
2413 * MAC Address for VFs are assigned incrementally starting from the seed.
2414 * These addresses are programmed in the ASIC by the PF and the VF driver
2415 * queries for the MAC address during its probe.
2416 */
2417static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2418{
2419 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002420 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002421 u8 mac[ETH_ALEN];
2422
2423 be_vf_eth_addr_generate(adapter, mac);
2424
2425 for (vf = 0; vf < num_vfs; vf++) {
2426 status = be_cmd_pmac_add(adapter, mac,
2427 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002428 &adapter->vf_cfg[vf].vf_pmac_id,
2429 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002430 if (status)
2431 dev_err(&adapter->pdev->dev,
2432 "Mac address add failed for VF %d\n", vf);
2433 else
2434 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2435
2436 mac[5] += 1;
2437 }
2438 return status;
2439}
2440
2441static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2442{
2443 u32 vf;
2444
2445 for (vf = 0; vf < num_vfs; vf++) {
2446 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2447 be_cmd_pmac_del(adapter,
2448 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002449 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002450 }
2451}
2452
Sathya Perla5fb379e2009-06-18 00:02:59 +00002453static int be_setup(struct be_adapter *adapter)
2454{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002455 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002456 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002457 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002458 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002459
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002460 be_cmd_req_native_mode(adapter);
2461
Padmanabh Ratnakarf21b5382011-03-07 03:09:36 +00002462 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2463 BE_IF_FLAGS_BROADCAST |
2464 BE_IF_FLAGS_MULTICAST;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002465
2466 if (be_physfn(adapter)) {
2467 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2468 BE_IF_FLAGS_PROMISCUOUS |
2469 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2470 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002471
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002472 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002473 cap_flags |= BE_IF_FLAGS_RSS;
2474 en_flags |= BE_IF_FLAGS_RSS;
2475 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002476 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002477
2478 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2479 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002480 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002481 if (status != 0)
2482 goto do_none;
2483
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002484 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002485 if (adapter->sriov_enabled) {
2486 while (vf < num_vfs) {
2487 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2488 BE_IF_FLAGS_BROADCAST;
2489 status = be_cmd_if_create(adapter, cap_flags,
2490 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002491 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002492 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002493 if (status) {
2494 dev_err(&adapter->pdev->dev,
2495 "Interface Create failed for VF %d\n",
2496 vf);
2497 goto if_destroy;
2498 }
2499 adapter->vf_cfg[vf].vf_pmac_id =
2500 BE_INVALID_PMAC_ID;
2501 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002502 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002503 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002504 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002505 status = be_cmd_mac_addr_query(adapter, mac,
2506 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2507 if (!status) {
2508 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2509 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2510 }
2511 }
2512
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002513 status = be_tx_queues_create(adapter);
2514 if (status != 0)
2515 goto if_destroy;
2516
2517 status = be_rx_queues_create(adapter);
2518 if (status != 0)
2519 goto tx_qs_destroy;
2520
Sathya Perla2903dd62011-06-26 20:41:53 +00002521 /* Allow all priorities by default. A GRP5 evt may modify this */
2522 adapter->vlan_prio_bmap = 0xff;
2523
Sathya Perla5fb379e2009-06-18 00:02:59 +00002524 status = be_mcc_queues_create(adapter);
2525 if (status != 0)
2526 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002527
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002528 adapter->link_speed = -1;
2529
Sathya Perla04b71172011-09-27 13:30:27 -04002530 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002531
2532 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002533 return 0;
2534
Sathya Perla5fb379e2009-06-18 00:02:59 +00002535rx_qs_destroy:
2536 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002537tx_qs_destroy:
2538 be_tx_queues_destroy(adapter);
2539if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002540 if (be_physfn(adapter) && adapter->sriov_enabled)
2541 for (vf = 0; vf < num_vfs; vf++)
2542 if (adapter->vf_cfg[vf].vf_if_handle)
2543 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002544 adapter->vf_cfg[vf].vf_if_handle,
2545 vf + 1);
2546 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002547do_none:
2548 return status;
2549}
2550
Sathya Perla5fb379e2009-06-18 00:02:59 +00002551static int be_clear(struct be_adapter *adapter)
2552{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002553 int vf;
2554
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002555 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002556 be_vf_eth_addr_rem(adapter);
2557
Sathya Perla1a8887d2009-08-17 00:58:41 +00002558 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002559 be_rx_queues_destroy(adapter);
2560 be_tx_queues_destroy(adapter);
Padmanabh Ratnakar1f5db832011-04-03 01:54:39 +00002561 adapter->eq_next_idx = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002562
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002563 if (be_physfn(adapter) && adapter->sriov_enabled)
2564 for (vf = 0; vf < num_vfs; vf++)
2565 if (adapter->vf_cfg[vf].vf_if_handle)
2566 be_cmd_if_destroy(adapter,
2567 adapter->vf_cfg[vf].vf_if_handle,
2568 vf + 1);
2569
Ajit Khaparde658681f2011-02-11 13:34:46 +00002570 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002571
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002572 adapter->be3_native = 0;
2573
Sathya Perla2243e2e2009-11-22 22:02:03 +00002574 /* tell fw we're done with firing cmds */
2575 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002576 return 0;
2577}
2578
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002579
Ajit Khaparde84517482009-09-04 03:12:16 +00002580#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002581static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002582 const u8 *p, u32 img_start, int image_size,
2583 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002584{
2585 u32 crc_offset;
2586 u8 flashed_crc[4];
2587 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002588
2589 crc_offset = hdr_size + img_start + image_size - 4;
2590
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002591 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002592
2593 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002594 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002595 if (status) {
2596 dev_err(&adapter->pdev->dev,
2597 "could not get crc from flash, not flashing redboot\n");
2598 return false;
2599 }
2600
2601 /*update redboot only if crc does not match*/
2602 if (!memcmp(flashed_crc, p, 4))
2603 return false;
2604 else
2605 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002606}
2607
Sathya Perla306f1342011-08-02 19:57:45 +00002608static bool phy_flashing_required(struct be_adapter *adapter)
2609{
2610 int status = 0;
2611 struct be_phy_info phy_info;
2612
2613 status = be_cmd_get_phy_info(adapter, &phy_info);
2614 if (status)
2615 return false;
2616 if ((phy_info.phy_type == TN_8022) &&
2617 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2618 return true;
2619 }
2620 return false;
2621}
2622
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002623static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002624 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002625 struct be_dma_mem *flash_cmd, int num_of_images)
2626
Ajit Khaparde84517482009-09-04 03:12:16 +00002627{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002628 int status = 0, i, filehdr_size = 0;
2629 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002630 int num_bytes;
2631 const u8 *p = fw->data;
2632 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002633 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002634 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002635
Sathya Perla306f1342011-08-02 19:57:45 +00002636 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002637 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2638 FLASH_IMAGE_MAX_SIZE_g3},
2639 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2640 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2641 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2642 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2643 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2644 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2645 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2646 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2647 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2648 FLASH_IMAGE_MAX_SIZE_g3},
2649 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2650 FLASH_IMAGE_MAX_SIZE_g3},
2651 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002652 FLASH_IMAGE_MAX_SIZE_g3},
2653 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002654 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2655 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2656 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002657 };
Joe Perches215faf92010-12-21 02:16:10 -08002658 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002659 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2660 FLASH_IMAGE_MAX_SIZE_g2},
2661 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2662 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2663 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2664 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2665 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2666 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2667 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2668 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2669 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2670 FLASH_IMAGE_MAX_SIZE_g2},
2671 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2672 FLASH_IMAGE_MAX_SIZE_g2},
2673 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2674 FLASH_IMAGE_MAX_SIZE_g2}
2675 };
2676
2677 if (adapter->generation == BE_GEN3) {
2678 pflashcomp = gen3_flash_types;
2679 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002680 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002681 } else {
2682 pflashcomp = gen2_flash_types;
2683 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002684 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002685 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002686 for (i = 0; i < num_comp; i++) {
2687 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2688 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2689 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002690 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2691 if (!phy_flashing_required(adapter))
2692 continue;
2693 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002694 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2695 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002696 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2697 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002698 continue;
2699 p = fw->data;
2700 p += filehdr_size + pflashcomp[i].offset
2701 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002702 if (p + pflashcomp[i].size > fw->data + fw->size)
2703 return -1;
2704 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002705 while (total_bytes) {
2706 if (total_bytes > 32*1024)
2707 num_bytes = 32*1024;
2708 else
2709 num_bytes = total_bytes;
2710 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002711 if (!total_bytes) {
2712 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2713 flash_op = FLASHROM_OPER_PHY_FLASH;
2714 else
2715 flash_op = FLASHROM_OPER_FLASH;
2716 } else {
2717 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2718 flash_op = FLASHROM_OPER_PHY_SAVE;
2719 else
2720 flash_op = FLASHROM_OPER_SAVE;
2721 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002722 memcpy(req->params.data_buf, p, num_bytes);
2723 p += num_bytes;
2724 status = be_cmd_write_flashrom(adapter, flash_cmd,
2725 pflashcomp[i].optype, flash_op, num_bytes);
2726 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002727 if ((status == ILLEGAL_IOCTL_REQ) &&
2728 (pflashcomp[i].optype ==
2729 IMG_TYPE_PHY_FW))
2730 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002731 dev_err(&adapter->pdev->dev,
2732 "cmd to write to flash rom failed.\n");
2733 return -1;
2734 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002735 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002736 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002737 return 0;
2738}
2739
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002740static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2741{
2742 if (fhdr == NULL)
2743 return 0;
2744 if (fhdr->build[0] == '3')
2745 return BE_GEN3;
2746 else if (fhdr->build[0] == '2')
2747 return BE_GEN2;
2748 else
2749 return 0;
2750}
2751
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002752static int lancer_fw_download(struct be_adapter *adapter,
2753 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002754{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002755#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2756#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2757 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002758 const u8 *data_ptr = NULL;
2759 u8 *dest_image_ptr = NULL;
2760 size_t image_size = 0;
2761 u32 chunk_size = 0;
2762 u32 data_written = 0;
2763 u32 offset = 0;
2764 int status = 0;
2765 u8 add_status = 0;
2766
2767 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2768 dev_err(&adapter->pdev->dev,
2769 "FW Image not properly aligned. "
2770 "Length must be 4 byte aligned.\n");
2771 status = -EINVAL;
2772 goto lancer_fw_exit;
2773 }
2774
2775 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2776 + LANCER_FW_DOWNLOAD_CHUNK;
2777 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2778 &flash_cmd.dma, GFP_KERNEL);
2779 if (!flash_cmd.va) {
2780 status = -ENOMEM;
2781 dev_err(&adapter->pdev->dev,
2782 "Memory allocation failure while flashing\n");
2783 goto lancer_fw_exit;
2784 }
2785
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002786 dest_image_ptr = flash_cmd.va +
2787 sizeof(struct lancer_cmd_req_write_object);
2788 image_size = fw->size;
2789 data_ptr = fw->data;
2790
2791 while (image_size) {
2792 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2793
2794 /* Copy the image chunk content. */
2795 memcpy(dest_image_ptr, data_ptr, chunk_size);
2796
2797 status = lancer_cmd_write_object(adapter, &flash_cmd,
2798 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2799 &data_written, &add_status);
2800
2801 if (status)
2802 break;
2803
2804 offset += data_written;
2805 data_ptr += data_written;
2806 image_size -= data_written;
2807 }
2808
2809 if (!status) {
2810 /* Commit the FW written */
2811 status = lancer_cmd_write_object(adapter, &flash_cmd,
2812 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2813 &data_written, &add_status);
2814 }
2815
2816 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2817 flash_cmd.dma);
2818 if (status) {
2819 dev_err(&adapter->pdev->dev,
2820 "Firmware load error. "
2821 "Status code: 0x%x Additional Status: 0x%x\n",
2822 status, add_status);
2823 goto lancer_fw_exit;
2824 }
2825
2826 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2827lancer_fw_exit:
2828 return status;
2829}
2830
2831static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2832{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002833 struct flash_file_hdr_g2 *fhdr;
2834 struct flash_file_hdr_g3 *fhdr3;
2835 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002836 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002837 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002838 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002839
2840 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002841 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002842
Ajit Khaparde84517482009-09-04 03:12:16 +00002843 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002844 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2845 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002846 if (!flash_cmd.va) {
2847 status = -ENOMEM;
2848 dev_err(&adapter->pdev->dev,
2849 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002850 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002851 }
2852
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002853 if ((adapter->generation == BE_GEN3) &&
2854 (get_ufigen_type(fhdr) == BE_GEN3)) {
2855 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002856 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2857 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002858 img_hdr_ptr = (struct image_hdr *) (fw->data +
2859 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002860 i * sizeof(struct image_hdr)));
2861 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2862 status = be_flash_data(adapter, fw, &flash_cmd,
2863 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002864 }
2865 } else if ((adapter->generation == BE_GEN2) &&
2866 (get_ufigen_type(fhdr) == BE_GEN2)) {
2867 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2868 } else {
2869 dev_err(&adapter->pdev->dev,
2870 "UFI and Interface are not compatible for flashing\n");
2871 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002872 }
2873
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002874 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2875 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002876 if (status) {
2877 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002878 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002879 }
2880
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002881 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002882
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002883be_fw_exit:
2884 return status;
2885}
2886
2887int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2888{
2889 const struct firmware *fw;
2890 int status;
2891
2892 if (!netif_running(adapter->netdev)) {
2893 dev_err(&adapter->pdev->dev,
2894 "Firmware load not allowed (interface is down)\n");
2895 return -1;
2896 }
2897
2898 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2899 if (status)
2900 goto fw_exit;
2901
2902 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2903
2904 if (lancer_chip(adapter))
2905 status = lancer_fw_download(adapter, fw);
2906 else
2907 status = be_fw_download(adapter, fw);
2908
Ajit Khaparde84517482009-09-04 03:12:16 +00002909fw_exit:
2910 release_firmware(fw);
2911 return status;
2912}
2913
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002914static struct net_device_ops be_netdev_ops = {
2915 .ndo_open = be_open,
2916 .ndo_stop = be_close,
2917 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002918 .ndo_set_rx_mode = be_set_multicast_list,
2919 .ndo_set_mac_address = be_mac_addr_set,
2920 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00002921 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002922 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002923 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2924 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002925 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002926 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002927 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002928 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002929};
2930
2931static void be_netdev_init(struct net_device *netdev)
2932{
2933 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002934 struct be_rx_obj *rxo;
2935 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002936
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002937 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002938 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2939 NETIF_F_HW_VLAN_TX;
2940 if (be_multi_rxq(adapter))
2941 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002942
2943 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002944 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00002945
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07002946 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00002947 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002948
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002949 netdev->flags |= IFF_MULTICAST;
2950
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002951 /* Default settings for Rx and Tx flow control */
2952 adapter->rx_fc = true;
2953 adapter->tx_fc = true;
2954
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002955 netif_set_gso_max_size(netdev, 65535);
2956
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002957 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2958
2959 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2960
Sathya Perla3abcded2010-10-03 22:12:27 -07002961 for_all_rx_queues(adapter, rxo, i)
2962 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2963 BE_NAPI_WEIGHT);
2964
Sathya Perla5fb379e2009-06-18 00:02:59 +00002965 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002966 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002967}
2968
2969static void be_unmap_pci_bars(struct be_adapter *adapter)
2970{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002971 if (adapter->csr)
2972 iounmap(adapter->csr);
2973 if (adapter->db)
2974 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002975}
2976
2977static int be_map_pci_bars(struct be_adapter *adapter)
2978{
2979 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00002980 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002981
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002982 if (lancer_chip(adapter)) {
2983 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2984 pci_resource_len(adapter->pdev, 0));
2985 if (addr == NULL)
2986 return -ENOMEM;
2987 adapter->db = addr;
2988 return 0;
2989 }
2990
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002991 if (be_physfn(adapter)) {
2992 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2993 pci_resource_len(adapter->pdev, 2));
2994 if (addr == NULL)
2995 return -ENOMEM;
2996 adapter->csr = addr;
2997 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002998
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002999 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003000 db_reg = 4;
3001 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003002 if (be_physfn(adapter))
3003 db_reg = 4;
3004 else
3005 db_reg = 0;
3006 }
3007 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3008 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003009 if (addr == NULL)
3010 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003011 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003012
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003013 return 0;
3014pci_map_err:
3015 be_unmap_pci_bars(adapter);
3016 return -ENOMEM;
3017}
3018
3019
3020static void be_ctrl_cleanup(struct be_adapter *adapter)
3021{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003022 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003023
3024 be_unmap_pci_bars(adapter);
3025
3026 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003027 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3028 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003029
Sathya Perla5b8821b2011-08-02 19:57:44 +00003030 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003031 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003032 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3033 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003034}
3035
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003036static int be_ctrl_init(struct be_adapter *adapter)
3037{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003038 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3039 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003040 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003041 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003042
3043 status = be_map_pci_bars(adapter);
3044 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003045 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003046
3047 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003048 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3049 mbox_mem_alloc->size,
3050 &mbox_mem_alloc->dma,
3051 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003052 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003053 status = -ENOMEM;
3054 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003055 }
3056 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3057 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3058 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3059 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003060
Sathya Perla5b8821b2011-08-02 19:57:44 +00003061 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3062 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3063 &rx_filter->dma, GFP_KERNEL);
3064 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003065 status = -ENOMEM;
3066 goto free_mbox;
3067 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003068 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003069
Ivan Vecera29849612010-12-14 05:43:19 +00003070 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003071 spin_lock_init(&adapter->mcc_lock);
3072 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003073
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003074 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003075 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003076 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003077
3078free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003079 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3080 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003081
3082unmap_pci_bars:
3083 be_unmap_pci_bars(adapter);
3084
3085done:
3086 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003087}
3088
3089static void be_stats_cleanup(struct be_adapter *adapter)
3090{
Sathya Perla3abcded2010-10-03 22:12:27 -07003091 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003092
3093 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003094 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3095 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003096}
3097
3098static int be_stats_init(struct be_adapter *adapter)
3099{
Sathya Perla3abcded2010-10-03 22:12:27 -07003100 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003101
Selvin Xavier005d5692011-05-16 07:36:35 +00003102 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003103 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003104 } else {
3105 if (lancer_chip(adapter))
3106 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3107 else
3108 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3109 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003110 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3111 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003112 if (cmd->va == NULL)
3113 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003114 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003115 return 0;
3116}
3117
3118static void __devexit be_remove(struct pci_dev *pdev)
3119{
3120 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003121
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003122 if (!adapter)
3123 return;
3124
Somnath Koturf203af72010-10-25 23:01:03 +00003125 cancel_delayed_work_sync(&adapter->work);
3126
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003127 unregister_netdev(adapter->netdev);
3128
Sathya Perla5fb379e2009-06-18 00:02:59 +00003129 be_clear(adapter);
3130
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003131 be_stats_cleanup(adapter);
3132
3133 be_ctrl_cleanup(adapter);
3134
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003135 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003136 be_sriov_disable(adapter);
3137
Sathya Perla8d56ff12009-11-22 22:02:26 +00003138 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003139
3140 pci_set_drvdata(pdev, NULL);
3141 pci_release_regions(pdev);
3142 pci_disable_device(pdev);
3143
3144 free_netdev(adapter->netdev);
3145}
3146
Sathya Perla2243e2e2009-11-22 22:02:03 +00003147static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003148{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003149 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003150 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003151
Sathya Perla3abcded2010-10-03 22:12:27 -07003152 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3153 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003154 if (status)
3155 return status;
3156
3157 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003158
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003159 /* A default permanent address is given to each VF for Lancer*/
3160 if (be_physfn(adapter) || lancer_chip(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003161 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00003162 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003163
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003164 if (status)
3165 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003166
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003167 if (!is_valid_ether_addr(mac))
3168 return -EADDRNOTAVAIL;
3169
3170 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3171 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3172 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003173
Ajit Khaparde3486be22010-07-23 02:04:54 +00003174 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003175 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3176 else
3177 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3178
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003179 status = be_cmd_get_cntl_attributes(adapter);
3180 if (status)
3181 return status;
3182
Sathya Perla3c8def92011-06-12 20:01:58 +00003183 if ((num_vfs && adapter->sriov_enabled) ||
3184 (adapter->function_mode & 0x400) ||
3185 lancer_chip(adapter) || !be_physfn(adapter)) {
3186 adapter->num_tx_qs = 1;
3187 netif_set_real_num_tx_queues(adapter->netdev,
3188 adapter->num_tx_qs);
3189 } else {
3190 adapter->num_tx_qs = MAX_TX_QS;
3191 }
3192
Sathya Perla2243e2e2009-11-22 22:02:03 +00003193 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003194}
3195
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003196static int be_dev_family_check(struct be_adapter *adapter)
3197{
3198 struct pci_dev *pdev = adapter->pdev;
3199 u32 sli_intf = 0, if_type;
3200
3201 switch (pdev->device) {
3202 case BE_DEVICE_ID1:
3203 case OC_DEVICE_ID1:
3204 adapter->generation = BE_GEN2;
3205 break;
3206 case BE_DEVICE_ID2:
3207 case OC_DEVICE_ID2:
3208 adapter->generation = BE_GEN3;
3209 break;
3210 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003211 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003212 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3213 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3214 SLI_INTF_IF_TYPE_SHIFT;
3215
3216 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3217 if_type != 0x02) {
3218 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3219 return -EINVAL;
3220 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003221 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3222 SLI_INTF_FAMILY_SHIFT);
3223 adapter->generation = BE_GEN3;
3224 break;
3225 default:
3226 adapter->generation = 0;
3227 }
3228 return 0;
3229}
3230
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003231static int lancer_wait_ready(struct be_adapter *adapter)
3232{
3233#define SLIPORT_READY_TIMEOUT 500
3234 u32 sliport_status;
3235 int status = 0, i;
3236
3237 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3238 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3239 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3240 break;
3241
3242 msleep(20);
3243 }
3244
3245 if (i == SLIPORT_READY_TIMEOUT)
3246 status = -1;
3247
3248 return status;
3249}
3250
3251static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3252{
3253 int status;
3254 u32 sliport_status, err, reset_needed;
3255 status = lancer_wait_ready(adapter);
3256 if (!status) {
3257 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3258 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3259 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3260 if (err && reset_needed) {
3261 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3262 adapter->db + SLIPORT_CONTROL_OFFSET);
3263
3264 /* check adapter has corrected the error */
3265 status = lancer_wait_ready(adapter);
3266 sliport_status = ioread32(adapter->db +
3267 SLIPORT_STATUS_OFFSET);
3268 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3269 SLIPORT_STATUS_RN_MASK);
3270 if (status || sliport_status)
3271 status = -1;
3272 } else if (err || reset_needed) {
3273 status = -1;
3274 }
3275 }
3276 return status;
3277}
3278
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003279static int __devinit be_probe(struct pci_dev *pdev,
3280 const struct pci_device_id *pdev_id)
3281{
3282 int status = 0;
3283 struct be_adapter *adapter;
3284 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003285
3286 status = pci_enable_device(pdev);
3287 if (status)
3288 goto do_none;
3289
3290 status = pci_request_regions(pdev, DRV_NAME);
3291 if (status)
3292 goto disable_dev;
3293 pci_set_master(pdev);
3294
Sathya Perla3c8def92011-06-12 20:01:58 +00003295 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003296 if (netdev == NULL) {
3297 status = -ENOMEM;
3298 goto rel_reg;
3299 }
3300 adapter = netdev_priv(netdev);
3301 adapter->pdev = pdev;
3302 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003303
3304 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003305 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003306 goto free_netdev;
3307
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003308 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003309 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003310
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003311 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003312 if (!status) {
3313 netdev->features |= NETIF_F_HIGHDMA;
3314 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003315 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003316 if (status) {
3317 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3318 goto free_netdev;
3319 }
3320 }
3321
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003322 be_sriov_enable(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003323 if (adapter->sriov_enabled) {
3324 adapter->vf_cfg = kcalloc(num_vfs,
3325 sizeof(struct be_vf_cfg), GFP_KERNEL);
3326
3327 if (!adapter->vf_cfg)
3328 goto free_netdev;
3329 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003330
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003331 status = be_ctrl_init(adapter);
3332 if (status)
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003333 goto free_vf_cfg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003334
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003335 if (lancer_chip(adapter)) {
3336 status = lancer_test_and_set_rdy_state(adapter);
3337 if (status) {
3338 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003339 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003340 }
3341 }
3342
Sathya Perla2243e2e2009-11-22 22:02:03 +00003343 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003344 if (be_physfn(adapter)) {
3345 status = be_cmd_POST(adapter);
3346 if (status)
3347 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003348 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003349
3350 /* tell fw we're ready to fire cmds */
3351 status = be_cmd_fw_init(adapter);
3352 if (status)
3353 goto ctrl_clean;
3354
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003355 status = be_cmd_reset_function(adapter);
3356 if (status)
3357 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003358
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003359 status = be_stats_init(adapter);
3360 if (status)
3361 goto ctrl_clean;
3362
Sathya Perla2243e2e2009-11-22 22:02:03 +00003363 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003364 if (status)
3365 goto stats_clean;
3366
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003367 /* The INTR bit may be set in the card when probed by a kdump kernel
3368 * after a crash.
3369 */
3370 if (!lancer_chip(adapter))
3371 be_intr_set(adapter, false);
3372
Sathya Perla3abcded2010-10-03 22:12:27 -07003373 be_msix_enable(adapter);
3374
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003375 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003376
Sathya Perla5fb379e2009-06-18 00:02:59 +00003377 status = be_setup(adapter);
3378 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003379 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003380
Sathya Perla3abcded2010-10-03 22:12:27 -07003381 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003382 status = register_netdev(netdev);
3383 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003384 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003385
Ajit Khapardee6319362011-02-11 13:35:41 +00003386 if (be_physfn(adapter) && adapter->sriov_enabled) {
Ajit Khaparded0381c42011-04-19 12:11:55 +00003387 u8 mac_speed;
Ajit Khaparded0381c42011-04-19 12:11:55 +00003388 u16 vf, lnk_speed;
3389
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003390 if (!lancer_chip(adapter)) {
3391 status = be_vf_eth_addr_config(adapter);
3392 if (status)
3393 goto unreg_netdev;
3394 }
Ajit Khaparded0381c42011-04-19 12:11:55 +00003395
3396 for (vf = 0; vf < num_vfs; vf++) {
Sathya Perlaea172a02011-08-02 19:57:42 +00003397 status = be_cmd_link_status_query(adapter, &mac_speed,
3398 &lnk_speed, vf + 1);
Ajit Khaparded0381c42011-04-19 12:11:55 +00003399 if (!status)
3400 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3401 else
3402 goto unreg_netdev;
3403 }
Ajit Khapardee6319362011-02-11 13:35:41 +00003404 }
3405
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003406 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003407
Somnath Koturf203af72010-10-25 23:01:03 +00003408 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003409 return 0;
3410
Ajit Khapardee6319362011-02-11 13:35:41 +00003411unreg_netdev:
3412 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003413unsetup:
3414 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003415msix_disable:
3416 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003417stats_clean:
3418 be_stats_cleanup(adapter);
3419ctrl_clean:
3420 be_ctrl_cleanup(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003421free_vf_cfg:
3422 kfree(adapter->vf_cfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003423free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003424 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003425 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003426 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003427rel_reg:
3428 pci_release_regions(pdev);
3429disable_dev:
3430 pci_disable_device(pdev);
3431do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003432 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003433 return status;
3434}
3435
3436static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3437{
3438 struct be_adapter *adapter = pci_get_drvdata(pdev);
3439 struct net_device *netdev = adapter->netdev;
3440
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003441 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003442 if (adapter->wol)
3443 be_setup_wol(adapter, true);
3444
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003445 netif_device_detach(netdev);
3446 if (netif_running(netdev)) {
3447 rtnl_lock();
3448 be_close(netdev);
3449 rtnl_unlock();
3450 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003451 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003452 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003453
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003454 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003455 pci_save_state(pdev);
3456 pci_disable_device(pdev);
3457 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3458 return 0;
3459}
3460
3461static int be_resume(struct pci_dev *pdev)
3462{
3463 int status = 0;
3464 struct be_adapter *adapter = pci_get_drvdata(pdev);
3465 struct net_device *netdev = adapter->netdev;
3466
3467 netif_device_detach(netdev);
3468
3469 status = pci_enable_device(pdev);
3470 if (status)
3471 return status;
3472
3473 pci_set_power_state(pdev, 0);
3474 pci_restore_state(pdev);
3475
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003476 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003477 /* tell fw we're ready to fire cmds */
3478 status = be_cmd_fw_init(adapter);
3479 if (status)
3480 return status;
3481
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003482 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003483 if (netif_running(netdev)) {
3484 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003485 be_open(netdev);
3486 rtnl_unlock();
3487 }
3488 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003489
3490 if (adapter->wol)
3491 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003492
3493 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003494 return 0;
3495}
3496
Sathya Perla82456b02010-02-17 01:35:37 +00003497/*
3498 * An FLR will stop BE from DMAing any data.
3499 */
3500static void be_shutdown(struct pci_dev *pdev)
3501{
3502 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003503
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003504 if (!adapter)
3505 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003506
Sathya Perla0f4a6822011-03-21 20:49:28 +00003507 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003508
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003509 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003510
Sathya Perla82456b02010-02-17 01:35:37 +00003511 if (adapter->wol)
3512 be_setup_wol(adapter, true);
3513
Ajit Khaparde57841862011-04-06 18:08:43 +00003514 be_cmd_reset_function(adapter);
3515
Sathya Perla82456b02010-02-17 01:35:37 +00003516 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003517}
3518
Sathya Perlacf588472010-02-14 21:22:01 +00003519static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3520 pci_channel_state_t state)
3521{
3522 struct be_adapter *adapter = pci_get_drvdata(pdev);
3523 struct net_device *netdev = adapter->netdev;
3524
3525 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3526
3527 adapter->eeh_err = true;
3528
3529 netif_device_detach(netdev);
3530
3531 if (netif_running(netdev)) {
3532 rtnl_lock();
3533 be_close(netdev);
3534 rtnl_unlock();
3535 }
3536 be_clear(adapter);
3537
3538 if (state == pci_channel_io_perm_failure)
3539 return PCI_ERS_RESULT_DISCONNECT;
3540
3541 pci_disable_device(pdev);
3542
3543 return PCI_ERS_RESULT_NEED_RESET;
3544}
3545
3546static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3547{
3548 struct be_adapter *adapter = pci_get_drvdata(pdev);
3549 int status;
3550
3551 dev_info(&adapter->pdev->dev, "EEH reset\n");
3552 adapter->eeh_err = false;
3553
3554 status = pci_enable_device(pdev);
3555 if (status)
3556 return PCI_ERS_RESULT_DISCONNECT;
3557
3558 pci_set_master(pdev);
3559 pci_set_power_state(pdev, 0);
3560 pci_restore_state(pdev);
3561
3562 /* Check if card is ok and fw is ready */
3563 status = be_cmd_POST(adapter);
3564 if (status)
3565 return PCI_ERS_RESULT_DISCONNECT;
3566
3567 return PCI_ERS_RESULT_RECOVERED;
3568}
3569
3570static void be_eeh_resume(struct pci_dev *pdev)
3571{
3572 int status = 0;
3573 struct be_adapter *adapter = pci_get_drvdata(pdev);
3574 struct net_device *netdev = adapter->netdev;
3575
3576 dev_info(&adapter->pdev->dev, "EEH resume\n");
3577
3578 pci_save_state(pdev);
3579
3580 /* tell fw we're ready to fire cmds */
3581 status = be_cmd_fw_init(adapter);
3582 if (status)
3583 goto err;
3584
3585 status = be_setup(adapter);
3586 if (status)
3587 goto err;
3588
3589 if (netif_running(netdev)) {
3590 status = be_open(netdev);
3591 if (status)
3592 goto err;
3593 }
3594 netif_device_attach(netdev);
3595 return;
3596err:
3597 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003598}
3599
3600static struct pci_error_handlers be_eeh_handlers = {
3601 .error_detected = be_eeh_err_detected,
3602 .slot_reset = be_eeh_reset,
3603 .resume = be_eeh_resume,
3604};
3605
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003606static struct pci_driver be_driver = {
3607 .name = DRV_NAME,
3608 .id_table = be_dev_ids,
3609 .probe = be_probe,
3610 .remove = be_remove,
3611 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003612 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003613 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003614 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003615};
3616
3617static int __init be_init_module(void)
3618{
Joe Perches8e95a202009-12-03 07:58:21 +00003619 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3620 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003621 printk(KERN_WARNING DRV_NAME
3622 " : Module param rx_frag_size must be 2048/4096/8192."
3623 " Using 2048\n");
3624 rx_frag_size = 2048;
3625 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003626
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003627 return pci_register_driver(&be_driver);
3628}
3629module_init(be_init_module);
3630
3631static void __exit be_exit_module(void)
3632{
3633 pci_unregister_driver(&be_driver);
3634}
3635module_exit(be_exit_module);