blob: 9b5304a653f348d6de93973002cdf801115e1395 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070045 { 0 }
46};
47MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000048/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070049static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000050 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82};
83/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070084static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000085 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700108 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700118
Sathya Perla752961a2011-10-24 02:45:03 +0000119/* Is BE in a multi-channel mode */
120static inline bool be_is_mc(struct be_adapter *adapter) {
121 return (adapter->function_mode & FLEX10_MODE ||
122 adapter->function_mode & VNIC_MODE ||
123 adapter->function_mode & UMC_ENABLED);
124}
125
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700132}
133
134static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
135 u16 len, u16 entry_size)
136{
137 struct be_dma_mem *mem = &q->dma_mem;
138
139 memset(q, 0, sizeof(*q));
140 q->len = len;
141 q->entry_size = entry_size;
142 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000143 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
144 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700145 if (!mem->va)
146 return -1;
147 memset(mem->va, 0, mem->size);
148 return 0;
149}
150
Sathya Perla8788fdc2009-07-27 22:52:03 +0000151static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152{
Sathya Perladb3ea782011-08-22 19:41:52 +0000153 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000154
Sathya Perlacf588472010-02-14 21:22:01 +0000155 if (adapter->eeh_err)
156 return;
157
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
159 &reg);
160 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
161
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700167 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000168
Sathya Perladb3ea782011-08-22 19:41:52 +0000169 pci_write_config_dword(adapter->pdev,
170 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700171}
172
Sathya Perla8788fdc2009-07-27 22:52:03 +0000173static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174{
175 u32 val = 0;
176 val |= qid & DB_RQ_RING_ID_MASK;
177 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000178
179 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000180 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700181}
182
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184{
185 u32 val = 0;
186 val |= qid & DB_TXULP_RING_ID_MASK;
187 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000188
189 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191}
192
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194 bool arm, bool clear_int, u16 num_popped)
195{
196 u32 val = 0;
197 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000198 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
199 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000200
201 if (adapter->eeh_err)
202 return;
203
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700204 if (arm)
205 val |= 1 << DB_EQ_REARM_SHIFT;
206 if (clear_int)
207 val |= 1 << DB_EQ_CLR_SHIFT;
208 val |= 1 << DB_EQ_EVNT_SHIFT;
209 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000210 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211}
212
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214{
215 u32 val = 0;
216 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000217 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
218 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000219
220 if (adapter->eeh_err)
221 return;
222
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700223 if (arm)
224 val |= 1 << DB_CQ_REARM_SHIFT;
225 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000226 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227}
228
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229static int be_mac_addr_set(struct net_device *netdev, void *p)
230{
231 struct be_adapter *adapter = netdev_priv(netdev);
232 struct sockaddr *addr = p;
233 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000234 u8 current_mac[ETH_ALEN];
235 u32 pmac_id = adapter->pmac_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000237 if (!is_valid_ether_addr(addr->sa_data))
238 return -EADDRNOTAVAIL;
239
Somnath Koture3a7ae22011-10-27 07:14:05 +0000240 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000241 MAC_ADDRESS_TYPE_NETWORK, false,
242 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000243 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000244 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245
Somnath Koture3a7ae22011-10-27 07:14:05 +0000246 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
247 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000248 adapter->if_handle, &adapter->pmac_id, 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (status)
250 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
253 }
254 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
255 return 0;
256err:
257 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700258 return status;
259}
260
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000261static void populate_be2_stats(struct be_adapter *adapter)
262{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000263 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
264 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
265 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000266 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000267 &rxf_stats->port[adapter->port_num];
268 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000271 drvs->rx_pause_frames = port_stats->rx_pause_frames;
272 drvs->rx_crc_errors = port_stats->rx_crc_errors;
273 drvs->rx_control_frames = port_stats->rx_control_frames;
274 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
275 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
276 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
277 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
278 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
279 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
280 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
281 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
282 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
283 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
284 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000285 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000286 drvs->rx_dropped_header_too_small =
287 port_stats->rx_dropped_header_too_small;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_alignment_symbol_errors =
290 port_stats->rx_alignment_symbol_errors;
291
292 drvs->tx_pauseframes = port_stats->tx_pauseframes;
293 drvs->tx_controlframes = port_stats->tx_controlframes;
294
295 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000296 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000297 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000298 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000299 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
300 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
301 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
302 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
303 drvs->forwarded_packets = rxf_stats->forwarded_packets;
304 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000305 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
306 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000307 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
308}
309
310static void populate_be3_stats(struct be_adapter *adapter)
311{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000312 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
313 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
314 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000315 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000316 &rxf_stats->port[adapter->port_num];
317 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000320 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
321 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000322 drvs->rx_pause_frames = port_stats->rx_pause_frames;
323 drvs->rx_crc_errors = port_stats->rx_crc_errors;
324 drvs->rx_control_frames = port_stats->rx_control_frames;
325 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
326 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
327 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
328 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
329 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
330 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
331 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
332 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
333 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
334 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
335 drvs->rx_dropped_header_too_small =
336 port_stats->rx_dropped_header_too_small;
337 drvs->rx_input_fifo_overflow_drop =
338 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000339 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000340 drvs->rx_alignment_symbol_errors =
341 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->tx_pauseframes = port_stats->tx_pauseframes;
344 drvs->tx_controlframes = port_stats->tx_controlframes;
345 drvs->jabber_events = port_stats->jabber_events;
346 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
347 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
348 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
349 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
350 drvs->forwarded_packets = rxf_stats->forwarded_packets;
351 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000352 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
353 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
355}
356
Selvin Xavier005d5692011-05-16 07:36:35 +0000357static void populate_lancer_stats(struct be_adapter *adapter)
358{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000359
Selvin Xavier005d5692011-05-16 07:36:35 +0000360 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000361 struct lancer_pport_stats *pport_stats =
362 pport_stats_from_cmd(adapter);
363
364 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
365 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
366 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
367 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000368 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000369 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000370 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
371 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
372 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
373 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
374 drvs->rx_dropped_tcp_length =
375 pport_stats->rx_dropped_invalid_tcp_length;
376 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
377 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
378 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
379 drvs->rx_dropped_header_too_small =
380 pport_stats->rx_dropped_header_too_small;
381 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
382 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000383 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000384 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000385 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
386 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->jabber_events = pport_stats->rx_jabbers;
Selvin Xavier005d5692011-05-16 07:36:35 +0000388 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000389 drvs->forwarded_packets = pport_stats->num_forwards_lo;
390 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000391 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000392 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394
Sathya Perla09c1c682011-08-22 19:41:53 +0000395static void accumulate_16bit_val(u32 *acc, u16 val)
396{
397#define lo(x) (x & 0xFFFF)
398#define hi(x) (x & 0xFFFF0000)
399 bool wrapped = val < lo(*acc);
400 u32 newacc = hi(*acc) + val;
401
402 if (wrapped)
403 newacc += 65536;
404 ACCESS_ONCE(*acc) = newacc;
405}
406
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000407void be_parse_stats(struct be_adapter *adapter)
408{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000409 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
410 struct be_rx_obj *rxo;
411 int i;
412
Selvin Xavier005d5692011-05-16 07:36:35 +0000413 if (adapter->generation == BE_GEN3) {
414 if (lancer_chip(adapter))
415 populate_lancer_stats(adapter);
416 else
417 populate_be3_stats(adapter);
418 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000420 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421
422 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000423 for_all_rx_queues(adapter, rxo, i) {
424 /* below erx HW counter can actually wrap around after
425 * 65535. Driver accumulates a 32-bit value
426 */
427 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
428 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
429 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000430}
431
Sathya Perlaab1594e2011-07-25 19:10:15 +0000432static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
433 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700434{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000435 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000436 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700437 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000438 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000439 u64 pkts, bytes;
440 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700441 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700442
Sathya Perla3abcded2010-10-03 22:12:27 -0700443 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000444 const struct be_rx_stats *rx_stats = rx_stats(rxo);
445 do {
446 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
447 pkts = rx_stats(rxo)->rx_pkts;
448 bytes = rx_stats(rxo)->rx_bytes;
449 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
450 stats->rx_packets += pkts;
451 stats->rx_bytes += bytes;
452 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
453 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
454 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700455 }
456
Sathya Perla3c8def92011-06-12 20:01:58 +0000457 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000458 const struct be_tx_stats *tx_stats = tx_stats(txo);
459 do {
460 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
461 pkts = tx_stats(txo)->tx_pkts;
462 bytes = tx_stats(txo)->tx_bytes;
463 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
464 stats->tx_packets += pkts;
465 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000466 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700467
468 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000469 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000470 drvs->rx_alignment_symbol_errors +
471 drvs->rx_in_range_errors +
472 drvs->rx_out_range_errors +
473 drvs->rx_frame_too_long +
474 drvs->rx_dropped_too_small +
475 drvs->rx_dropped_too_short +
476 drvs->rx_dropped_header_too_small +
477 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000478 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700479
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700480 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000481 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000482 drvs->rx_out_range_errors +
483 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000484
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486
487 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000489
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700490 /* receiver fifo overrun */
491 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000493 drvs->rx_input_fifo_overflow_drop +
494 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700496}
497
Sathya Perlaea172a02011-08-02 19:57:42 +0000498void be_link_status_update(struct be_adapter *adapter, u32 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700499{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700500 struct net_device *netdev = adapter->netdev;
501
Sathya Perlaea172a02011-08-02 19:57:42 +0000502 /* when link status changes, link speed must be re-queried from card */
503 adapter->link_speed = -1;
504 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
505 netif_carrier_on(netdev);
506 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
507 } else {
508 netif_carrier_off(netdev);
509 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700510 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700511}
512
Sathya Perla3c8def92011-06-12 20:01:58 +0000513static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000514 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700515{
Sathya Perla3c8def92011-06-12 20:01:58 +0000516 struct be_tx_stats *stats = tx_stats(txo);
517
Sathya Perlaab1594e2011-07-25 19:10:15 +0000518 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000519 stats->tx_reqs++;
520 stats->tx_wrbs += wrb_cnt;
521 stats->tx_bytes += copied;
522 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700526}
527
528/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000529static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
530 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700531{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700532 int cnt = (skb->len > skb->data_len);
533
534 cnt += skb_shinfo(skb)->nr_frags;
535
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700536 /* to account for hdr wrb */
537 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000538 if (lancer_chip(adapter) || !(cnt & 1)) {
539 *dummy = false;
540 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700541 /* add a dummy to make it an even num */
542 cnt++;
543 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000544 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700545 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
546 return cnt;
547}
548
549static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
550{
551 wrb->frag_pa_hi = upper_32_bits(addr);
552 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
553 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
554}
555
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000556static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
557 struct sk_buff *skb)
558{
559 u8 vlan_prio;
560 u16 vlan_tag;
561
562 vlan_tag = vlan_tx_tag_get(skb);
563 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
564 /* If vlan priority provided by OS is NOT in available bmap */
565 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
566 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
567 adapter->recommended_prio;
568
569 return vlan_tag;
570}
571
Somnath Koturcc4ce022010-10-21 07:11:14 -0700572static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
573 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700574{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000575 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700576
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700577 memset(hdr, 0, sizeof(*hdr));
578
579 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
580
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000581 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700582 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
584 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000585 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000587 if (lancer_chip(adapter) && adapter->sli_family ==
588 LANCER_A0_SLI_FAMILY) {
589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
590 if (is_tcp_pkt(skb))
591 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
592 tcpcs, hdr, 1);
593 else if (is_udp_pkt(skb))
594 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
595 udpcs, hdr, 1);
596 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
598 if (is_tcp_pkt(skb))
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
600 else if (is_udp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
602 }
603
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700604 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000606 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700607 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700608 }
609
610 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
611 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
614}
615
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000616static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000617 bool unmap_single)
618{
619 dma_addr_t dma;
620
621 be_dws_le_to_cpu(wrb, sizeof(*wrb));
622
623 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000624 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000625 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000626 dma_unmap_single(dev, dma, wrb->frag_len,
627 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000628 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000629 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000630 }
631}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632
Sathya Perla3c8def92011-06-12 20:01:58 +0000633static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
635{
Sathya Perla7101e112010-03-22 20:41:12 +0000636 dma_addr_t busaddr;
637 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000638 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700640 struct be_eth_wrb *wrb;
641 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000642 bool map_single = false;
643 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645 hdr = queue_head_node(txq);
646 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000647 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648
David S. Millerebc8d2a2009-06-09 01:01:31 -0700649 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700650 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000651 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
652 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000653 goto dma_err;
654 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700655 wrb = queue_head_node(txq);
656 wrb_fill(wrb, busaddr, len);
657 be_dws_cpu_to_le(wrb, sizeof(*wrb));
658 queue_head_inc(txq);
659 copied += len;
660 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661
David S. Millerebc8d2a2009-06-09 01:01:31 -0700662 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000663 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700664 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000665 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000666 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000667 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000668 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700669 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000670 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700671 be_dws_cpu_to_le(wrb, sizeof(*wrb));
672 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000673 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674 }
675
676 if (dummy_wrb) {
677 wrb = queue_head_node(txq);
678 wrb_fill(wrb, 0, 0);
679 be_dws_cpu_to_le(wrb, sizeof(*wrb));
680 queue_head_inc(txq);
681 }
682
Somnath Koturcc4ce022010-10-21 07:11:14 -0700683 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 be_dws_cpu_to_le(hdr, sizeof(*hdr));
685
686 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000687dma_err:
688 txq->head = map_head;
689 while (copied) {
690 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000691 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000692 map_single = false;
693 copied -= wrb->frag_len;
694 queue_head_inc(txq);
695 }
696 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697}
698
Stephen Hemminger613573252009-08-31 19:50:58 +0000699static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700700 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700701{
702 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000703 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
704 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705 u32 wrb_cnt = 0, copied = 0;
706 u32 start = txq->head;
707 bool dummy_wrb, stopped = false;
708
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000709 /* For vlan tagged pkts, BE
710 * 1) calculates checksum even when CSO is not requested
711 * 2) calculates checksum wrongly for padded pkt less than
712 * 60 bytes long.
713 * As a workaround disable TX vlan offloading in such cases.
714 */
715 if (unlikely(vlan_tx_tag_present(skb) &&
716 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
717 skb = skb_share_check(skb, GFP_ATOMIC);
718 if (unlikely(!skb))
719 goto tx_drop;
720
721 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
722 if (unlikely(!skb))
723 goto tx_drop;
724
725 skb->vlan_tci = 0;
726 }
727
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000728 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700729
Sathya Perla3c8def92011-06-12 20:01:58 +0000730 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000731 if (copied) {
732 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000733 BUG_ON(txo->sent_skb_list[start]);
734 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000736 /* Ensure txq has space for the next skb; Else stop the queue
737 * *BEFORE* ringing the tx doorbell, so that we serialze the
738 * tx compls of the current transmit which'll wake up the queue
739 */
Sathya Perla7101e112010-03-22 20:41:12 +0000740 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000741 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
742 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000743 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000744 stopped = true;
745 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700746
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000747 be_txq_notify(adapter, txq->id, wrb_cnt);
748
Sathya Perla3c8def92011-06-12 20:01:58 +0000749 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000750 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000751 } else {
752 txq->head = start;
753 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700754 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000755tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700756 return NETDEV_TX_OK;
757}
758
759static int be_change_mtu(struct net_device *netdev, int new_mtu)
760{
761 struct be_adapter *adapter = netdev_priv(netdev);
762 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000763 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
764 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765 dev_info(&adapter->pdev->dev,
766 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000767 BE_MIN_MTU,
768 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700769 return -EINVAL;
770 }
771 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
772 netdev->mtu, new_mtu);
773 netdev->mtu = new_mtu;
774 return 0;
775}
776
777/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000778 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
779 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000781static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700782{
Sathya Perla11ac75e2011-12-13 00:58:50 +0000783 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784 u16 vtag[BE_NUM_VLANS_SUPPORTED];
785 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000786 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000787
788 if (vf) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000789 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
790 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
791 1, 1, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000792 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000794 /* No need to further configure vids if in promiscuous mode */
795 if (adapter->promiscuous)
796 return 0;
797
Ajit Khaparde82903e42010-02-09 01:34:57 +0000798 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000800 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700801 if (adapter->vlan_tag[i]) {
802 vtag[ntags] = cpu_to_le16(i);
803 ntags++;
804 }
805 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700806 status = be_cmd_vlan_config(adapter, adapter->if_handle,
807 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700808 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700809 status = be_cmd_vlan_config(adapter, adapter->if_handle,
810 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700811 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000812
Sathya Perlab31c50a2009-09-17 10:30:13 -0700813 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814}
815
Jiri Pirko8e586132011-12-08 19:52:37 -0500816static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817{
818 struct be_adapter *adapter = netdev_priv(netdev);
819
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000820 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000821 if (!be_physfn(adapter))
Jiri Pirko8e586132011-12-08 19:52:37 -0500822 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000823
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700824 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000825 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000826 be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500827
828 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700829}
830
Jiri Pirko8e586132011-12-08 19:52:37 -0500831static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832{
833 struct be_adapter *adapter = netdev_priv(netdev);
834
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000835 adapter->vlans_added--;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000836
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000837 if (!be_physfn(adapter))
Jiri Pirko8e586132011-12-08 19:52:37 -0500838 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000839
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700840 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000841 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000842 be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500843
844 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700845}
846
Sathya Perlaa54769f2011-10-24 02:45:00 +0000847static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700848{
849 struct be_adapter *adapter = netdev_priv(netdev);
850
851 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000852 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000853 adapter->promiscuous = true;
854 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000856
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300857 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000858 if (adapter->promiscuous) {
859 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000860 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000861
862 if (adapter->vlans_added)
863 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000864 }
865
Sathya Perlae7b909a2009-11-22 22:01:10 +0000866 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000867 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000868 netdev_mc_count(netdev) > BE_MAX_MC) {
869 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000870 goto done;
871 }
872
Sathya Perla5b8821b2011-08-02 19:57:44 +0000873 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000874done:
875 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700876}
877
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000878static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
879{
880 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000881 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000882 int status;
883
Sathya Perla11ac75e2011-12-13 00:58:50 +0000884 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000885 return -EPERM;
886
Sathya Perla11ac75e2011-12-13 00:58:50 +0000887 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000888 return -EINVAL;
889
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000890 if (lancer_chip(adapter)) {
891 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
892 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000893 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
894 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000895
Sathya Perla11ac75e2011-12-13 00:58:50 +0000896 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
897 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000898 }
899
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000900 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000901 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
902 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000903 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000904 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000905
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000906 return status;
907}
908
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000909static int be_get_vf_config(struct net_device *netdev, int vf,
910 struct ifla_vf_info *vi)
911{
912 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000913 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000914
Sathya Perla11ac75e2011-12-13 00:58:50 +0000915 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000916 return -EPERM;
917
Sathya Perla11ac75e2011-12-13 00:58:50 +0000918 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000919 return -EINVAL;
920
921 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000922 vi->tx_rate = vf_cfg->tx_rate;
923 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000924 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000925 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000926
927 return 0;
928}
929
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000930static int be_set_vf_vlan(struct net_device *netdev,
931 int vf, u16 vlan, u8 qos)
932{
933 struct be_adapter *adapter = netdev_priv(netdev);
934 int status = 0;
935
Sathya Perla11ac75e2011-12-13 00:58:50 +0000936 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000937 return -EPERM;
938
Sathya Perla11ac75e2011-12-13 00:58:50 +0000939 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000940 return -EINVAL;
941
942 if (vlan) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000943 adapter->vf_cfg[vf].vlan_tag = vlan;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000944 adapter->vlans_added++;
945 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000946 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000947 adapter->vlans_added--;
948 }
949
950 status = be_vid_config(adapter, true, vf);
951
952 if (status)
953 dev_info(&adapter->pdev->dev,
954 "VLAN %d config on VF %d failed\n", vlan, vf);
955 return status;
956}
957
Ajit Khapardee1d18732010-07-23 01:52:13 +0000958static int be_set_vf_tx_rate(struct net_device *netdev,
959 int vf, int rate)
960{
961 struct be_adapter *adapter = netdev_priv(netdev);
962 int status = 0;
963
Sathya Perla11ac75e2011-12-13 00:58:50 +0000964 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +0000965 return -EPERM;
966
Sathya Perla11ac75e2011-12-13 00:58:50 +0000967 if (vf >= adapter->num_vfs || rate < 0)
Ajit Khapardee1d18732010-07-23 01:52:13 +0000968 return -EINVAL;
969
970 if (rate > 10000)
971 rate = 10000;
972
Sathya Perla11ac75e2011-12-13 00:58:50 +0000973 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000974 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000975
976 if (status)
977 dev_info(&adapter->pdev->dev,
978 "tx rate %d on VF %d failed\n", rate, vf);
979 return status;
980}
981
Sathya Perlaac124ff2011-07-25 19:10:14 +0000982static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700983{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000984 struct be_eq_obj *rx_eq = &rxo->rx_eq;
985 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700986 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000987 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000988 u64 pkts;
989 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000990
991 if (!rx_eq->enable_aic)
992 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700993
Sathya Perla4097f662009-03-24 16:40:13 -0700994 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700995 if (time_before(now, stats->rx_jiffies)) {
996 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700997 return;
998 }
999
Sathya Perlaac124ff2011-07-25 19:10:14 +00001000 /* Update once a second */
1001 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001002 return;
1003
Sathya Perlaab1594e2011-07-25 19:10:15 +00001004 do {
1005 start = u64_stats_fetch_begin_bh(&stats->sync);
1006 pkts = stats->rx_pkts;
1007 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1008
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001009 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001010 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001011 stats->rx_jiffies = now;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001012 eqd = stats->rx_pps / 110000;
1013 eqd = eqd << 3;
1014 if (eqd > rx_eq->max_eqd)
1015 eqd = rx_eq->max_eqd;
1016 if (eqd < rx_eq->min_eqd)
1017 eqd = rx_eq->min_eqd;
1018 if (eqd < 10)
1019 eqd = 0;
1020 if (eqd != rx_eq->cur_eqd) {
1021 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
1022 rx_eq->cur_eqd = eqd;
1023 }
Sathya Perla4097f662009-03-24 16:40:13 -07001024}
1025
Sathya Perla3abcded2010-10-03 22:12:27 -07001026static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001027 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001028{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001029 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001030
Sathya Perlaab1594e2011-07-25 19:10:15 +00001031 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001032 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001033 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001034 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001035 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001036 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001037 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001038 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001039 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001040}
1041
Sathya Perla2e588f82011-03-11 02:49:26 +00001042static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001043{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001044 /* L4 checksum is not reliable for non TCP/UDP packets.
1045 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001046 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1047 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001048}
1049
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001051get_rx_page_info(struct be_adapter *adapter,
1052 struct be_rx_obj *rxo,
1053 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001054{
1055 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001056 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001057
Sathya Perla3abcded2010-10-03 22:12:27 -07001058 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001059 BUG_ON(!rx_page_info->page);
1060
Ajit Khaparde205859a2010-02-09 01:34:21 +00001061 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001062 dma_unmap_page(&adapter->pdev->dev,
1063 dma_unmap_addr(rx_page_info, bus),
1064 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001065 rx_page_info->last_page_user = false;
1066 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001067
1068 atomic_dec(&rxq->used);
1069 return rx_page_info;
1070}
1071
1072/* Throwaway the data in the Rx completion */
1073static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001074 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001075 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001076{
Sathya Perla3abcded2010-10-03 22:12:27 -07001077 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001079 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001080
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001081 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001082 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001083 put_page(page_info->page);
1084 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001085 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001086 }
1087}
1088
1089/*
1090 * skb_fill_rx_data forms a complete skb for an ether frame
1091 * indicated by rxcp.
1092 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001093static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001094 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095{
Sathya Perla3abcded2010-10-03 22:12:27 -07001096 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001097 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001098 u16 i, j;
1099 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001100 u8 *start;
1101
Sathya Perla2e588f82011-03-11 02:49:26 +00001102 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001103 start = page_address(page_info->page) + page_info->page_offset;
1104 prefetch(start);
1105
1106 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001107 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001108
1109 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001110 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001111 memcpy(skb->data, start, hdr_len);
1112 skb->len = curr_frag_len;
1113 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1114 /* Complete packet has now been moved to data */
1115 put_page(page_info->page);
1116 skb->data_len = 0;
1117 skb->tail += curr_frag_len;
1118 } else {
1119 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001120 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001121 skb_shinfo(skb)->frags[0].page_offset =
1122 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001123 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001124 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001125 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001126 skb->tail += hdr_len;
1127 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001128 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001129
Sathya Perla2e588f82011-03-11 02:49:26 +00001130 if (rxcp->pkt_size <= rx_frag_size) {
1131 BUG_ON(rxcp->num_rcvd != 1);
1132 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001133 }
1134
1135 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001136 index_inc(&rxcp->rxq_idx, rxq->len);
1137 remaining = rxcp->pkt_size - curr_frag_len;
1138 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1139 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1140 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001141
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001142 /* Coalesce all frags from the same physical page in one slot */
1143 if (page_info->page_offset == 0) {
1144 /* Fresh page */
1145 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001146 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001147 skb_shinfo(skb)->frags[j].page_offset =
1148 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001149 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001150 skb_shinfo(skb)->nr_frags++;
1151 } else {
1152 put_page(page_info->page);
1153 }
1154
Eric Dumazet9e903e02011-10-18 21:00:24 +00001155 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001156 skb->len += curr_frag_len;
1157 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001158 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001159 remaining -= curr_frag_len;
1160 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001161 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001162 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001163 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001164}
1165
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001166/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001167static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001168 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001169 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001171 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001172 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001173
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001174 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001175 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001176 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001177 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001178 return;
1179 }
1180
Sathya Perla2e588f82011-03-11 02:49:26 +00001181 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001183 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001184 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001185 else
1186 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001187
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001188 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001189 if (adapter->netdev->features & NETIF_F_RXHASH)
1190 skb->rxhash = rxcp->rss_hash;
1191
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001192
Jiri Pirko343e43c2011-08-25 02:50:51 +00001193 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001194 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1195
1196 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001197}
1198
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001199/* Process the RX completion indicated by rxcp when GRO is enabled */
1200static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001201 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001202 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001203{
1204 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001205 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001206 struct be_queue_info *rxq = &rxo->q;
1207 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001208 u16 remaining, curr_frag_len;
1209 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001210
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001211 skb = napi_get_frags(&eq_obj->napi);
1212 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001213 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001214 return;
1215 }
1216
Sathya Perla2e588f82011-03-11 02:49:26 +00001217 remaining = rxcp->pkt_size;
1218 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1219 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001220
1221 curr_frag_len = min(remaining, rx_frag_size);
1222
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001223 /* Coalesce all frags from the same physical page in one slot */
1224 if (i == 0 || page_info->page_offset == 0) {
1225 /* First frag or Fresh page */
1226 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001227 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001228 skb_shinfo(skb)->frags[j].page_offset =
1229 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001230 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001231 } else {
1232 put_page(page_info->page);
1233 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001234 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001235 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001236 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001237 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001238 memset(page_info, 0, sizeof(*page_info));
1239 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001240 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001241
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001242 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001243 skb->len = rxcp->pkt_size;
1244 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001245 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001246 if (adapter->netdev->features & NETIF_F_RXHASH)
1247 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001248
Jiri Pirko343e43c2011-08-25 02:50:51 +00001249 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001250 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1251
1252 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001253}
1254
Sathya Perla2e588f82011-03-11 02:49:26 +00001255static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1256 struct be_eth_rx_compl *compl,
1257 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258{
Sathya Perla2e588f82011-03-11 02:49:26 +00001259 rxcp->pkt_size =
1260 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1261 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1262 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1263 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001264 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001265 rxcp->ip_csum =
1266 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1267 rxcp->l4_csum =
1268 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1269 rxcp->ipv6 =
1270 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1271 rxcp->rxq_idx =
1272 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1273 rxcp->num_rcvd =
1274 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1275 rxcp->pkt_type =
1276 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001277 rxcp->rss_hash =
1278 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001279 if (rxcp->vlanf) {
1280 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001281 compl);
1282 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1283 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001284 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001285 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001286}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001287
Sathya Perla2e588f82011-03-11 02:49:26 +00001288static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1289 struct be_eth_rx_compl *compl,
1290 struct be_rx_compl_info *rxcp)
1291{
1292 rxcp->pkt_size =
1293 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1294 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1295 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1296 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001297 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001298 rxcp->ip_csum =
1299 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1300 rxcp->l4_csum =
1301 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1302 rxcp->ipv6 =
1303 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1304 rxcp->rxq_idx =
1305 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1306 rxcp->num_rcvd =
1307 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1308 rxcp->pkt_type =
1309 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001310 rxcp->rss_hash =
1311 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001312 if (rxcp->vlanf) {
1313 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001314 compl);
1315 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1316 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001317 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001318 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001319}
1320
1321static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1322{
1323 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1324 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1325 struct be_adapter *adapter = rxo->adapter;
1326
1327 /* For checking the valid bit it is Ok to use either definition as the
1328 * valid bit is at the same position in both v0 and v1 Rx compl */
1329 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001330 return NULL;
1331
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001332 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001333 be_dws_le_to_cpu(compl, sizeof(*compl));
1334
1335 if (adapter->be3_native)
1336 be_parse_rx_compl_v1(adapter, compl, rxcp);
1337 else
1338 be_parse_rx_compl_v0(adapter, compl, rxcp);
1339
Sathya Perla15d72182011-03-21 20:49:26 +00001340 if (rxcp->vlanf) {
1341 /* vlanf could be wrongly set in some cards.
1342 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001343 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001344 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001345
Sathya Perla15d72182011-03-21 20:49:26 +00001346 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001347 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001348
Somnath Kotur939cf302011-08-18 21:51:49 -07001349 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001350 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001351 rxcp->vlanf = 0;
1352 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001353
1354 /* As the compl has been parsed, reset it; we wont touch it again */
1355 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001356
Sathya Perla3abcded2010-10-03 22:12:27 -07001357 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358 return rxcp;
1359}
1360
Eric Dumazet1829b082011-03-01 05:48:12 +00001361static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001362{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001363 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001364
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001365 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001366 gfp |= __GFP_COMP;
1367 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001368}
1369
1370/*
1371 * Allocate a page, split it to fragments of size rx_frag_size and post as
1372 * receive buffers to BE
1373 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001374static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375{
Sathya Perla3abcded2010-10-03 22:12:27 -07001376 struct be_adapter *adapter = rxo->adapter;
1377 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001378 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001379 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380 struct page *pagep = NULL;
1381 struct be_eth_rx_d *rxd;
1382 u64 page_dmaaddr = 0, frag_dmaaddr;
1383 u32 posted, page_offset = 0;
1384
Sathya Perla3abcded2010-10-03 22:12:27 -07001385 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1387 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001388 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001389 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001390 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001391 break;
1392 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001393 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1394 0, adapter->big_page_size,
1395 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396 page_info->page_offset = 0;
1397 } else {
1398 get_page(pagep);
1399 page_info->page_offset = page_offset + rx_frag_size;
1400 }
1401 page_offset = page_info->page_offset;
1402 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001403 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001404 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1405
1406 rxd = queue_head_node(rxq);
1407 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1408 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409
1410 /* Any space left in the current big page for another frag? */
1411 if ((page_offset + rx_frag_size + rx_frag_size) >
1412 adapter->big_page_size) {
1413 pagep = NULL;
1414 page_info->last_page_user = true;
1415 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001416
1417 prev_page_info = page_info;
1418 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001419 page_info = &page_info_tbl[rxq->head];
1420 }
1421 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001422 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001423
1424 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001425 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001426 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001427 } else if (atomic_read(&rxq->used) == 0) {
1428 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001429 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001430 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001431}
1432
Sathya Perla5fb379e2009-06-18 00:02:59 +00001433static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001434{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001435 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1436
1437 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1438 return NULL;
1439
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001440 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001441 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1442
1443 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1444
1445 queue_tail_inc(tx_cq);
1446 return txcp;
1447}
1448
Sathya Perla3c8def92011-06-12 20:01:58 +00001449static u16 be_tx_compl_process(struct be_adapter *adapter,
1450 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451{
Sathya Perla3c8def92011-06-12 20:01:58 +00001452 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001453 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001454 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001456 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1457 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001459 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001460 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001461 sent_skbs[txq->tail] = NULL;
1462
1463 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001464 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001465
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001466 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001467 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001468 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001469 unmap_tx_frag(&adapter->pdev->dev, wrb,
1470 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001471 unmap_skb_hdr = false;
1472
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473 num_wrbs++;
1474 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001475 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001478 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479}
1480
Sathya Perla859b1e42009-08-10 03:43:51 +00001481static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1482{
1483 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1484
1485 if (!eqe->evt)
1486 return NULL;
1487
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001488 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001489 eqe->evt = le32_to_cpu(eqe->evt);
1490 queue_tail_inc(&eq_obj->q);
1491 return eqe;
1492}
1493
1494static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001495 struct be_eq_obj *eq_obj,
1496 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001497{
1498 struct be_eq_entry *eqe;
1499 u16 num = 0;
1500
1501 while ((eqe = event_get(eq_obj)) != NULL) {
1502 eqe->evt = 0;
1503 num++;
1504 }
1505
1506 /* Deal with any spurious interrupts that come
1507 * without events
1508 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001509 if (!num)
1510 rearm = true;
1511
1512 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001513 if (num)
1514 napi_schedule(&eq_obj->napi);
1515
1516 return num;
1517}
1518
1519/* Just read and notify events without processing them.
1520 * Used at the time of destroying event queues */
1521static void be_eq_clean(struct be_adapter *adapter,
1522 struct be_eq_obj *eq_obj)
1523{
1524 struct be_eq_entry *eqe;
1525 u16 num = 0;
1526
1527 while ((eqe = event_get(eq_obj)) != NULL) {
1528 eqe->evt = 0;
1529 num++;
1530 }
1531
1532 if (num)
1533 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1534}
1535
Sathya Perla3abcded2010-10-03 22:12:27 -07001536static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001537{
1538 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001539 struct be_queue_info *rxq = &rxo->q;
1540 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001541 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001542 u16 tail;
1543
1544 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001545 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1546 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001547 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001548 }
1549
1550 /* Then free posted rx buffer that were not used */
1551 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001552 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001553 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554 put_page(page_info->page);
1555 memset(page_info, 0, sizeof(*page_info));
1556 }
1557 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001558 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001559}
1560
Sathya Perla3c8def92011-06-12 20:01:58 +00001561static void be_tx_compl_clean(struct be_adapter *adapter,
1562 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001563{
Sathya Perla3c8def92011-06-12 20:01:58 +00001564 struct be_queue_info *tx_cq = &txo->cq;
1565 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001566 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001567 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001568 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001569 struct sk_buff *sent_skb;
1570 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571
Sathya Perlaa8e91792009-08-10 03:42:43 +00001572 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1573 do {
1574 while ((txcp = be_tx_compl_get(tx_cq))) {
1575 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1576 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001577 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001578 cmpl++;
1579 }
1580 if (cmpl) {
1581 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001582 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001583 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001584 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001585 }
1586
1587 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1588 break;
1589
1590 mdelay(1);
1591 } while (true);
1592
1593 if (atomic_read(&txq->used))
1594 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1595 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001596
1597 /* free posted tx for which compls will never arrive */
1598 while (atomic_read(&txq->used)) {
1599 sent_skb = sent_skbs[txq->tail];
1600 end_idx = txq->tail;
1601 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001602 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1603 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001604 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001605 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001606 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001607}
1608
Sathya Perla5fb379e2009-06-18 00:02:59 +00001609static void be_mcc_queues_destroy(struct be_adapter *adapter)
1610{
1611 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001612
Sathya Perla8788fdc2009-07-27 22:52:03 +00001613 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001614 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001615 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001616 be_queue_free(adapter, q);
1617
Sathya Perla8788fdc2009-07-27 22:52:03 +00001618 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001619 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001620 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001621 be_queue_free(adapter, q);
1622}
1623
1624/* Must be called only after TX qs are created as MCC shares TX EQ */
1625static int be_mcc_queues_create(struct be_adapter *adapter)
1626{
1627 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001628
1629 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001630 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001631 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001632 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001633 goto err;
1634
1635 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001636 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001637 goto mcc_cq_free;
1638
1639 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001640 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001641 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1642 goto mcc_cq_destroy;
1643
1644 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001645 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001646 goto mcc_q_free;
1647
1648 return 0;
1649
1650mcc_q_free:
1651 be_queue_free(adapter, q);
1652mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001653 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001654mcc_cq_free:
1655 be_queue_free(adapter, cq);
1656err:
1657 return -1;
1658}
1659
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001660static void be_tx_queues_destroy(struct be_adapter *adapter)
1661{
1662 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001663 struct be_tx_obj *txo;
1664 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665
Sathya Perla3c8def92011-06-12 20:01:58 +00001666 for_all_tx_queues(adapter, txo, i) {
1667 q = &txo->q;
1668 if (q->created)
1669 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1670 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001671
Sathya Perla3c8def92011-06-12 20:01:58 +00001672 q = &txo->cq;
1673 if (q->created)
1674 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1675 be_queue_free(adapter, q);
1676 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001677
Sathya Perla859b1e42009-08-10 03:43:51 +00001678 /* Clear any residual events */
1679 be_eq_clean(adapter, &adapter->tx_eq);
1680
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681 q = &adapter->tx_eq.q;
1682 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001683 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001684 be_queue_free(adapter, q);
1685}
1686
Sathya Perladafc0fe2011-10-24 02:45:02 +00001687static int be_num_txqs_want(struct be_adapter *adapter)
1688{
Sathya Perla11ac75e2011-12-13 00:58:50 +00001689 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001690 lancer_chip(adapter) || !be_physfn(adapter) ||
1691 adapter->generation == BE_GEN2)
1692 return 1;
1693 else
1694 return MAX_TX_QS;
1695}
1696
Sathya Perla3c8def92011-06-12 20:01:58 +00001697/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698static int be_tx_queues_create(struct be_adapter *adapter)
1699{
1700 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001701 struct be_tx_obj *txo;
1702 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001703
Sathya Perladafc0fe2011-10-24 02:45:02 +00001704 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001705 if (adapter->num_tx_qs != MAX_TX_QS) {
1706 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001707 netif_set_real_num_tx_queues(adapter->netdev,
1708 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001709 rtnl_unlock();
1710 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001711
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001712 adapter->tx_eq.max_eqd = 0;
1713 adapter->tx_eq.min_eqd = 0;
1714 adapter->tx_eq.cur_eqd = 96;
1715 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001716
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001717 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001718 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1719 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001720 return -1;
1721
Sathya Perla8788fdc2009-07-27 22:52:03 +00001722 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001723 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001724 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001725
Sathya Perla3c8def92011-06-12 20:01:58 +00001726 for_all_tx_queues(adapter, txo, i) {
1727 cq = &txo->cq;
1728 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001729 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001730 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731
Sathya Perla3c8def92011-06-12 20:01:58 +00001732 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1733 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001734
Sathya Perla3c8def92011-06-12 20:01:58 +00001735 q = &txo->q;
1736 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1737 sizeof(struct be_eth_wrb)))
1738 goto err;
Sathya Perla3c8def92011-06-12 20:01:58 +00001739 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001740 return 0;
1741
Sathya Perla3c8def92011-06-12 20:01:58 +00001742err:
1743 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001744 return -1;
1745}
1746
1747static void be_rx_queues_destroy(struct be_adapter *adapter)
1748{
1749 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001750 struct be_rx_obj *rxo;
1751 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752
Sathya Perla3abcded2010-10-03 22:12:27 -07001753 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001754 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001755
Sathya Perla3abcded2010-10-03 22:12:27 -07001756 q = &rxo->cq;
1757 if (q->created)
1758 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1759 be_queue_free(adapter, q);
1760
Sathya Perla3abcded2010-10-03 22:12:27 -07001761 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001762 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001763 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001764 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001765 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766}
1767
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001768static u32 be_num_rxqs_want(struct be_adapter *adapter)
1769{
Sathya Perlac814fd32011-06-26 20:41:25 +00001770 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla11ac75e2011-12-13 00:58:50 +00001771 !sriov_enabled(adapter) && be_physfn(adapter) &&
1772 !be_is_mc(adapter)) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001773 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1774 } else {
1775 dev_warn(&adapter->pdev->dev,
1776 "No support for multiple RX queues\n");
1777 return 1;
1778 }
1779}
1780
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001781static int be_rx_queues_create(struct be_adapter *adapter)
1782{
1783 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001784 struct be_rx_obj *rxo;
1785 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001786
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001787 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1788 msix_enabled(adapter) ?
1789 adapter->num_msix_vec - 1 : 1);
1790 if (adapter->num_rx_qs != MAX_RX_QS)
1791 dev_warn(&adapter->pdev->dev,
1792 "Can create only %d RX queues", adapter->num_rx_qs);
1793
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001794 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001795 for_all_rx_queues(adapter, rxo, i) {
1796 rxo->adapter = adapter;
1797 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1798 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001799
Sathya Perla3abcded2010-10-03 22:12:27 -07001800 /* EQ */
1801 eq = &rxo->rx_eq.q;
1802 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1803 sizeof(struct be_eq_entry));
1804 if (rc)
1805 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806
Sathya Perla3abcded2010-10-03 22:12:27 -07001807 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1808 if (rc)
1809 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001810
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001811 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001812
Sathya Perla3abcded2010-10-03 22:12:27 -07001813 /* CQ */
1814 cq = &rxo->cq;
1815 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1816 sizeof(struct be_eth_rx_compl));
1817 if (rc)
1818 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001819
Sathya Perla3abcded2010-10-03 22:12:27 -07001820 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1821 if (rc)
1822 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001823
1824 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001825 q = &rxo->q;
1826 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1827 sizeof(struct be_eth_rx_d));
1828 if (rc)
1829 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001830
Sathya Perla3abcded2010-10-03 22:12:27 -07001831 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001832
1833 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001834err:
1835 be_rx_queues_destroy(adapter);
1836 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001838
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001839static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001840{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001841 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1842 if (!eqe->evt)
1843 return false;
1844 else
1845 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001846}
1847
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001848static irqreturn_t be_intx(int irq, void *dev)
1849{
1850 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001851 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001852 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001854 if (lancer_chip(adapter)) {
1855 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001856 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001857 for_all_rx_queues(adapter, rxo, i) {
1858 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001859 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001860 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001862 if (!(tx || rx))
1863 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001864
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001865 } else {
1866 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1867 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1868 if (!isr)
1869 return IRQ_NONE;
1870
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001871 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001872 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001873
1874 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001875 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001876 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001877 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001878 }
Sathya Perlac001c212009-07-01 01:06:07 +00001879
Sathya Perla8788fdc2009-07-27 22:52:03 +00001880 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001881}
1882
1883static irqreturn_t be_msix_rx(int irq, void *dev)
1884{
Sathya Perla3abcded2010-10-03 22:12:27 -07001885 struct be_rx_obj *rxo = dev;
1886 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001887
Sathya Perla3c8def92011-06-12 20:01:58 +00001888 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001889
1890 return IRQ_HANDLED;
1891}
1892
Sathya Perla5fb379e2009-06-18 00:02:59 +00001893static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001894{
1895 struct be_adapter *adapter = dev;
1896
Sathya Perla3c8def92011-06-12 20:01:58 +00001897 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898
1899 return IRQ_HANDLED;
1900}
1901
Sathya Perla2e588f82011-03-11 02:49:26 +00001902static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903{
Sathya Perla2e588f82011-03-11 02:49:26 +00001904 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905}
1906
stephen hemminger49b05222010-10-21 07:50:48 +00001907static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001908{
1909 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001910 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1911 struct be_adapter *adapter = rxo->adapter;
1912 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001913 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914 u32 work_done;
1915
Sathya Perlaac124ff2011-07-25 19:10:14 +00001916 rx_stats(rxo)->rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001918 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919 if (!rxcp)
1920 break;
1921
Sathya Perla12004ae2011-08-02 19:57:46 +00001922 /* Is it a flush compl that has no data */
1923 if (unlikely(rxcp->num_rcvd == 0))
1924 goto loop_continue;
1925
1926 /* Discard compl with partial DMA Lancer B0 */
1927 if (unlikely(!rxcp->pkt_size)) {
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001928 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001929 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001930 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001931
Sathya Perla12004ae2011-08-02 19:57:46 +00001932 /* On BE drop pkts that arrive due to imperfect filtering in
1933 * promiscuous mode on some skews
1934 */
1935 if (unlikely(rxcp->port != adapter->port_num &&
1936 !lancer_chip(adapter))) {
1937 be_rx_compl_discard(adapter, rxo, rxcp);
1938 goto loop_continue;
1939 }
1940
1941 if (do_gro(rxcp))
1942 be_rx_compl_process_gro(adapter, rxo, rxcp);
1943 else
1944 be_rx_compl_process(adapter, rxo, rxcp);
1945loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001946 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947 }
1948
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001949 be_cq_notify(adapter, rx_cq->id, false, work_done);
1950
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951 /* Refill the queue */
Sathya Perla857c9902011-08-22 19:41:51 +00001952 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001953 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001954
1955 /* All consumed */
1956 if (work_done < budget) {
1957 napi_complete(napi);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001958 /* Arm CQ */
1959 be_cq_notify(adapter, rx_cq->id, true, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001960 }
1961 return work_done;
1962}
1963
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001964/* As TX and MCC share the same EQ check for both TX and MCC completions.
1965 * For TX/MCC we don't honour budget; consume everything
1966 */
1967static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001968{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001969 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1970 struct be_adapter *adapter =
1971 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla3c8def92011-06-12 20:01:58 +00001972 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001973 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001974 int tx_compl, mcc_compl, status = 0;
1975 u8 i;
1976 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977
Sathya Perla3c8def92011-06-12 20:01:58 +00001978 for_all_tx_queues(adapter, txo, i) {
1979 tx_compl = 0;
1980 num_wrbs = 0;
1981 while ((txcp = be_tx_compl_get(&txo->cq))) {
1982 num_wrbs += be_tx_compl_process(adapter, txo,
1983 AMAP_GET_BITS(struct amap_eth_tx_compl,
1984 wrb_index, txcp));
1985 tx_compl++;
1986 }
1987 if (tx_compl) {
1988 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1989
1990 atomic_sub(num_wrbs, &txo->q.used);
1991
1992 /* As Tx wrbs have been freed up, wake up netdev queue
1993 * if it was stopped due to lack of tx wrbs. */
1994 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1995 atomic_read(&txo->q.used) < txo->q.len / 2) {
1996 netif_wake_subqueue(adapter->netdev, i);
1997 }
1998
Sathya Perlaab1594e2011-07-25 19:10:15 +00001999 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
Sathya Perlaac124ff2011-07-25 19:10:14 +00002000 tx_stats(txo)->tx_compl += tx_compl;
Sathya Perlaab1594e2011-07-25 19:10:15 +00002001 u64_stats_update_end(&tx_stats(txo)->sync_compl);
Sathya Perla3c8def92011-06-12 20:01:58 +00002002 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002003 }
2004
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002005 mcc_compl = be_process_mcc(adapter, &status);
2006
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002007 if (mcc_compl) {
2008 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2009 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2010 }
2011
Sathya Perla3c8def92011-06-12 20:01:58 +00002012 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002013
Sathya Perla3c8def92011-06-12 20:01:58 +00002014 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perlaab1594e2011-07-25 19:10:15 +00002015 adapter->drv_stats.tx_events++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002016 return 1;
2017}
2018
Ajit Khaparded053de92010-09-03 06:23:30 +00002019void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002020{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002021 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2022 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002023 u32 i;
2024
Sathya Perla72f02482011-11-10 19:17:58 +00002025 if (adapter->eeh_err || adapter->ue_detected)
2026 return;
2027
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002028 if (lancer_chip(adapter)) {
2029 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2030 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2031 sliport_err1 = ioread32(adapter->db +
2032 SLIPORT_ERROR1_OFFSET);
2033 sliport_err2 = ioread32(adapter->db +
2034 SLIPORT_ERROR2_OFFSET);
2035 }
2036 } else {
2037 pci_read_config_dword(adapter->pdev,
2038 PCICFG_UE_STATUS_LOW, &ue_lo);
2039 pci_read_config_dword(adapter->pdev,
2040 PCICFG_UE_STATUS_HIGH, &ue_hi);
2041 pci_read_config_dword(adapter->pdev,
2042 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2043 pci_read_config_dword(adapter->pdev,
2044 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002045
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002046 ue_lo = (ue_lo & (~ue_lo_mask));
2047 ue_hi = (ue_hi & (~ue_hi_mask));
2048 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002049
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002050 if (ue_lo || ue_hi ||
2051 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002052 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002053 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002054 dev_err(&adapter->pdev->dev,
2055 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002056 }
2057
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002058 if (ue_lo) {
2059 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2060 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002061 dev_err(&adapter->pdev->dev,
2062 "UE: %s bit set\n", ue_status_low_desc[i]);
2063 }
2064 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002065 if (ue_hi) {
2066 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2067 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002068 dev_err(&adapter->pdev->dev,
2069 "UE: %s bit set\n", ue_status_hi_desc[i]);
2070 }
2071 }
2072
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002073 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2074 dev_err(&adapter->pdev->dev,
2075 "sliport status 0x%x\n", sliport_status);
2076 dev_err(&adapter->pdev->dev,
2077 "sliport error1 0x%x\n", sliport_err1);
2078 dev_err(&adapter->pdev->dev,
2079 "sliport error2 0x%x\n", sliport_err2);
2080 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002081}
2082
Sathya Perla8d56ff12009-11-22 22:02:26 +00002083static void be_msix_disable(struct be_adapter *adapter)
2084{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002085 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002086 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002087 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002088 }
2089}
2090
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002091static void be_msix_enable(struct be_adapter *adapter)
2092{
Sathya Perla3abcded2010-10-03 22:12:27 -07002093#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002094 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002095
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002096 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002097
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002098 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002099 adapter->msix_entries[i].entry = i;
2100
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002101 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002102 if (status == 0) {
2103 goto done;
2104 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002105 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002106 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002107 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002108 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002109 }
2110 return;
2111done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002112 adapter->num_msix_vec = num_vec;
2113 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002114}
2115
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002116static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002117{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002118 be_check_sriov_fn_type(adapter);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002119
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002120#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002121 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002122 int status, pos;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002123 u16 dev_vfs;
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002124
2125 pos = pci_find_ext_capability(adapter->pdev,
2126 PCI_EXT_CAP_ID_SRIOV);
2127 pci_read_config_word(adapter->pdev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002128 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002129
Sathya Perla11ac75e2011-12-13 00:58:50 +00002130 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2131 if (adapter->num_vfs != num_vfs)
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002132 dev_info(&adapter->pdev->dev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002133 "Device supports %d VFs and not %d\n",
2134 adapter->num_vfs, num_vfs);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002135
Sathya Perla11ac75e2011-12-13 00:58:50 +00002136 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2137 if (status)
2138 adapter->num_vfs = 0;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002139
Sathya Perla11ac75e2011-12-13 00:58:50 +00002140 if (adapter->num_vfs) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002141 adapter->vf_cfg = kcalloc(num_vfs,
2142 sizeof(struct be_vf_cfg),
2143 GFP_KERNEL);
2144 if (!adapter->vf_cfg)
2145 return -ENOMEM;
2146 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002147 }
2148#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002149 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002150}
2151
2152static void be_sriov_disable(struct be_adapter *adapter)
2153{
2154#ifdef CONFIG_PCI_IOV
Sathya Perla11ac75e2011-12-13 00:58:50 +00002155 if (sriov_enabled(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002156 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002157 kfree(adapter->vf_cfg);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002158 adapter->num_vfs = 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002159 }
2160#endif
2161}
2162
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002163static inline int be_msix_vec_get(struct be_adapter *adapter,
2164 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002165{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002166 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002167}
2168
2169static int be_request_irq(struct be_adapter *adapter,
2170 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002171 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002172{
2173 struct net_device *netdev = adapter->netdev;
2174 int vec;
2175
2176 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002177 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002178 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002179}
2180
Sathya Perla3abcded2010-10-03 22:12:27 -07002181static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2182 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002183{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002184 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002185 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002186}
2187
2188static int be_msix_register(struct be_adapter *adapter)
2189{
Sathya Perla3abcded2010-10-03 22:12:27 -07002190 struct be_rx_obj *rxo;
2191 int status, i;
2192 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002193
Sathya Perla3abcded2010-10-03 22:12:27 -07002194 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2195 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002196 if (status)
2197 goto err;
2198
Sathya Perla3abcded2010-10-03 22:12:27 -07002199 for_all_rx_queues(adapter, rxo, i) {
2200 sprintf(qname, "rxq%d", i);
2201 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2202 qname, rxo);
2203 if (status)
2204 goto err_msix;
2205 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002206
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002207 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002208
Sathya Perla3abcded2010-10-03 22:12:27 -07002209err_msix:
2210 be_free_irq(adapter, &adapter->tx_eq, adapter);
2211
2212 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2213 be_free_irq(adapter, &rxo->rx_eq, rxo);
2214
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002215err:
2216 dev_warn(&adapter->pdev->dev,
2217 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002218 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002219 return status;
2220}
2221
2222static int be_irq_register(struct be_adapter *adapter)
2223{
2224 struct net_device *netdev = adapter->netdev;
2225 int status;
2226
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002227 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002228 status = be_msix_register(adapter);
2229 if (status == 0)
2230 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002231 /* INTx is not supported for VF */
2232 if (!be_physfn(adapter))
2233 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234 }
2235
2236 /* INTx */
2237 netdev->irq = adapter->pdev->irq;
2238 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2239 adapter);
2240 if (status) {
2241 dev_err(&adapter->pdev->dev,
2242 "INTx request IRQ failed - err %d\n", status);
2243 return status;
2244 }
2245done:
2246 adapter->isr_registered = true;
2247 return 0;
2248}
2249
2250static void be_irq_unregister(struct be_adapter *adapter)
2251{
2252 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002253 struct be_rx_obj *rxo;
2254 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002255
2256 if (!adapter->isr_registered)
2257 return;
2258
2259 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002260 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002261 free_irq(netdev->irq, adapter);
2262 goto done;
2263 }
2264
2265 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002266 be_free_irq(adapter, &adapter->tx_eq, adapter);
2267
2268 for_all_rx_queues(adapter, rxo, i)
2269 be_free_irq(adapter, &rxo->rx_eq, rxo);
2270
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002271done:
2272 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002273}
2274
Sathya Perla482c9e72011-06-29 23:33:17 +00002275static void be_rx_queues_clear(struct be_adapter *adapter)
2276{
2277 struct be_queue_info *q;
2278 struct be_rx_obj *rxo;
2279 int i;
2280
2281 for_all_rx_queues(adapter, rxo, i) {
2282 q = &rxo->q;
2283 if (q->created) {
2284 be_cmd_rxq_destroy(adapter, q);
2285 /* After the rxq is invalidated, wait for a grace time
2286 * of 1ms for all dma to end and the flush compl to
2287 * arrive
2288 */
2289 mdelay(1);
2290 be_rx_q_clean(adapter, rxo);
2291 }
2292
2293 /* Clear any residual events */
2294 q = &rxo->rx_eq.q;
2295 if (q->created)
2296 be_eq_clean(adapter, &rxo->rx_eq);
2297 }
2298}
2299
Sathya Perla889cd4b2010-05-30 23:33:45 +00002300static int be_close(struct net_device *netdev)
2301{
2302 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002303 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002304 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002305 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002306 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002307
Sathya Perla889cd4b2010-05-30 23:33:45 +00002308 be_async_mcc_disable(adapter);
2309
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002310 if (!lancer_chip(adapter))
2311 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002312
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002313 for_all_rx_queues(adapter, rxo, i)
2314 napi_disable(&rxo->rx_eq.napi);
2315
2316 napi_disable(&tx_eq->napi);
2317
2318 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002319 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2320 for_all_rx_queues(adapter, rxo, i)
2321 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002322 for_all_tx_queues(adapter, txo, i)
2323 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002324 }
2325
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002326 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002327 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002328 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002329
2330 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002331 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002332 synchronize_irq(vec);
2333 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002334 } else {
2335 synchronize_irq(netdev->irq);
2336 }
2337 be_irq_unregister(adapter);
2338
Sathya Perla889cd4b2010-05-30 23:33:45 +00002339 /* Wait for all pending tx completions to arrive so that
2340 * all tx skbs are freed.
2341 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002342 for_all_tx_queues(adapter, txo, i)
2343 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002344
Sathya Perla482c9e72011-06-29 23:33:17 +00002345 be_rx_queues_clear(adapter);
2346 return 0;
2347}
2348
2349static int be_rx_queues_setup(struct be_adapter *adapter)
2350{
2351 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002352 int rc, i, j;
2353 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002354
2355 for_all_rx_queues(adapter, rxo, i) {
2356 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2357 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2358 adapter->if_handle,
2359 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2360 if (rc)
2361 return rc;
2362 }
2363
2364 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002365 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2366 for_all_rss_queues(adapter, rxo, i) {
2367 if ((j + i) >= 128)
2368 break;
2369 rsstable[j + i] = rxo->rss_id;
2370 }
2371 }
2372 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002373
Sathya Perla482c9e72011-06-29 23:33:17 +00002374 if (rc)
2375 return rc;
2376 }
2377
2378 /* First time posting */
2379 for_all_rx_queues(adapter, rxo, i) {
2380 be_post_rx_frags(rxo, GFP_KERNEL);
2381 napi_enable(&rxo->rx_eq.napi);
2382 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002383 return 0;
2384}
2385
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002386static int be_open(struct net_device *netdev)
2387{
2388 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002389 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002390 struct be_rx_obj *rxo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002391 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002392
Sathya Perla482c9e72011-06-29 23:33:17 +00002393 status = be_rx_queues_setup(adapter);
2394 if (status)
2395 goto err;
2396
Sathya Perla5fb379e2009-06-18 00:02:59 +00002397 napi_enable(&tx_eq->napi);
2398
2399 be_irq_register(adapter);
2400
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002401 if (!lancer_chip(adapter))
2402 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002403
2404 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002405 for_all_rx_queues(adapter, rxo, i) {
2406 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2407 be_cq_notify(adapter, rxo->cq.id, true, 0);
2408 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002409 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002410
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002411 /* Now that interrupts are on we can process async mcc */
2412 be_async_mcc_enable(adapter);
2413
Sathya Perla889cd4b2010-05-30 23:33:45 +00002414 return 0;
2415err:
2416 be_close(adapter->netdev);
2417 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002418}
2419
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002420static int be_setup_wol(struct be_adapter *adapter, bool enable)
2421{
2422 struct be_dma_mem cmd;
2423 int status = 0;
2424 u8 mac[ETH_ALEN];
2425
2426 memset(mac, 0, ETH_ALEN);
2427
2428 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002429 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2430 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002431 if (cmd.va == NULL)
2432 return -1;
2433 memset(cmd.va, 0, cmd.size);
2434
2435 if (enable) {
2436 status = pci_write_config_dword(adapter->pdev,
2437 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2438 if (status) {
2439 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002440 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002441 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2442 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002443 return status;
2444 }
2445 status = be_cmd_enable_magic_wol(adapter,
2446 adapter->netdev->dev_addr, &cmd);
2447 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2448 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2449 } else {
2450 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2451 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2452 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2453 }
2454
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002455 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002456 return status;
2457}
2458
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002459/*
2460 * Generate a seed MAC address from the PF MAC Address using jhash.
2461 * MAC Address for VFs are assigned incrementally starting from the seed.
2462 * These addresses are programmed in the ASIC by the PF and the VF driver
2463 * queries for the MAC address during its probe.
2464 */
2465static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2466{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002467 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002468 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002469 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002470 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002471
2472 be_vf_eth_addr_generate(adapter, mac);
2473
Sathya Perla11ac75e2011-12-13 00:58:50 +00002474 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002475 if (lancer_chip(adapter)) {
2476 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2477 } else {
2478 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002479 vf_cfg->if_handle,
2480 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002481 }
2482
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002483 if (status)
2484 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002485 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002486 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002487 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002488
2489 mac[5] += 1;
2490 }
2491 return status;
2492}
2493
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002494static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002495{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002496 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002497 u32 vf;
2498
Sathya Perla11ac75e2011-12-13 00:58:50 +00002499 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002500 if (lancer_chip(adapter))
2501 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2502 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002503 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2504 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002505
Sathya Perla11ac75e2011-12-13 00:58:50 +00002506 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2507 }
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002508}
2509
Sathya Perlaa54769f2011-10-24 02:45:00 +00002510static int be_clear(struct be_adapter *adapter)
2511{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002512 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002513 be_vf_clear(adapter);
2514
2515 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002516
2517 be_mcc_queues_destroy(adapter);
2518 be_rx_queues_destroy(adapter);
2519 be_tx_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002520
2521 /* tell fw we're done with firing cmds */
2522 be_cmd_fw_clean(adapter);
2523 return 0;
2524}
2525
Sathya Perla30128032011-11-10 19:17:57 +00002526static void be_vf_setup_init(struct be_adapter *adapter)
2527{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002528 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002529 int vf;
2530
Sathya Perla11ac75e2011-12-13 00:58:50 +00002531 for_all_vfs(adapter, vf_cfg, vf) {
2532 vf_cfg->if_handle = -1;
2533 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002534 }
2535}
2536
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002537static int be_vf_setup(struct be_adapter *adapter)
2538{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002539 struct be_vf_cfg *vf_cfg;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002540 u32 cap_flags, en_flags, vf;
2541 u16 lnk_speed;
2542 int status;
2543
Sathya Perla30128032011-11-10 19:17:57 +00002544 be_vf_setup_init(adapter);
2545
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002546 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2547 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002548 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002549 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002550 &vf_cfg->if_handle, NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002551 if (status)
2552 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002553 }
2554
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002555 status = be_vf_eth_addr_config(adapter);
2556 if (status)
2557 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002558
Sathya Perla11ac75e2011-12-13 00:58:50 +00002559 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002560 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002561 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002562 if (status)
2563 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002564 vf_cfg->tx_rate = lnk_speed * 10;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002565 }
2566 return 0;
2567err:
2568 return status;
2569}
2570
Sathya Perla30128032011-11-10 19:17:57 +00002571static void be_setup_init(struct be_adapter *adapter)
2572{
2573 adapter->vlan_prio_bmap = 0xff;
2574 adapter->link_speed = -1;
2575 adapter->if_handle = -1;
2576 adapter->be3_native = false;
2577 adapter->promiscuous = false;
2578 adapter->eq_next_idx = 0;
2579}
2580
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002581static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2582{
2583 u32 pmac_id;
2584 int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2585 if (status != 0)
2586 goto do_none;
2587 status = be_cmd_mac_addr_query(adapter, mac,
2588 MAC_ADDRESS_TYPE_NETWORK,
2589 false, adapter->if_handle, pmac_id);
2590 if (status != 0)
2591 goto do_none;
2592 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2593 &adapter->pmac_id, 0);
2594do_none:
2595 return status;
2596}
2597
Sathya Perla5fb379e2009-06-18 00:02:59 +00002598static int be_setup(struct be_adapter *adapter)
2599{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002600 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002601 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002602 u32 tx_fc, rx_fc;
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002603 int status, i;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002604 u8 mac[ETH_ALEN];
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002605 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002606
Sathya Perla30128032011-11-10 19:17:57 +00002607 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002608
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002609 be_cmd_req_native_mode(adapter);
2610
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002611 status = be_tx_queues_create(adapter);
2612 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002613 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002614
2615 status = be_rx_queues_create(adapter);
2616 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002617 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002618
Sathya Perla5fb379e2009-06-18 00:02:59 +00002619 status = be_mcc_queues_create(adapter);
2620 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002621 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002622
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002623 memset(mac, 0, ETH_ALEN);
2624 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002625 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002626 if (status)
2627 return status;
2628 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2629 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2630
2631 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2632 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2633 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002634 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2635
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002636 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2637 cap_flags |= BE_IF_FLAGS_RSS;
2638 en_flags |= BE_IF_FLAGS_RSS;
2639 }
2640 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2641 netdev->dev_addr, &adapter->if_handle,
2642 &adapter->pmac_id, 0);
2643 if (status != 0)
2644 goto err;
2645
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002646 for_all_tx_queues(adapter, txo, i) {
2647 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2648 if (status)
2649 goto err;
2650 }
2651
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002652 /* The VF's permanent mac queried from card is incorrect.
2653 * For BEx: Query the mac configued by the PF using if_handle
2654 * For Lancer: Get and use mac_list to obtain mac address.
2655 */
2656 if (!be_physfn(adapter)) {
2657 if (lancer_chip(adapter))
2658 status = be_configure_mac_from_list(adapter, mac);
2659 else
2660 status = be_cmd_mac_addr_query(adapter, mac,
2661 MAC_ADDRESS_TYPE_NETWORK, false,
2662 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002663 if (!status) {
2664 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2665 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2666 }
2667 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002668
Sathya Perla04b71172011-09-27 13:30:27 -04002669 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002670
Sathya Perlaa54769f2011-10-24 02:45:00 +00002671 status = be_vid_config(adapter, false, 0);
2672 if (status)
2673 goto err;
2674
2675 be_set_rx_mode(adapter->netdev);
2676
2677 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002678 /* For Lancer: It is legal for this cmd to fail on VF */
2679 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002680 goto err;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002681
Sathya Perlaa54769f2011-10-24 02:45:00 +00002682 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2683 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2684 adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002685 /* For Lancer: It is legal for this cmd to fail on VF */
2686 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002687 goto err;
2688 }
2689
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002690 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002691
Sathya Perla11ac75e2011-12-13 00:58:50 +00002692 if (sriov_enabled(adapter)) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002693 status = be_vf_setup(adapter);
2694 if (status)
2695 goto err;
2696 }
2697
2698 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002699err:
2700 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002701 return status;
2702}
2703
Ivan Vecera66268732011-12-08 01:31:21 +00002704#ifdef CONFIG_NET_POLL_CONTROLLER
2705static void be_netpoll(struct net_device *netdev)
2706{
2707 struct be_adapter *adapter = netdev_priv(netdev);
2708 struct be_rx_obj *rxo;
2709 int i;
2710
2711 event_handle(adapter, &adapter->tx_eq, false);
2712 for_all_rx_queues(adapter, rxo, i)
2713 event_handle(adapter, &rxo->rx_eq, true);
2714}
2715#endif
2716
Ajit Khaparde84517482009-09-04 03:12:16 +00002717#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002718static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002719 const u8 *p, u32 img_start, int image_size,
2720 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002721{
2722 u32 crc_offset;
2723 u8 flashed_crc[4];
2724 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002725
2726 crc_offset = hdr_size + img_start + image_size - 4;
2727
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002728 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002729
2730 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002731 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002732 if (status) {
2733 dev_err(&adapter->pdev->dev,
2734 "could not get crc from flash, not flashing redboot\n");
2735 return false;
2736 }
2737
2738 /*update redboot only if crc does not match*/
2739 if (!memcmp(flashed_crc, p, 4))
2740 return false;
2741 else
2742 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002743}
2744
Sathya Perla306f1342011-08-02 19:57:45 +00002745static bool phy_flashing_required(struct be_adapter *adapter)
2746{
2747 int status = 0;
2748 struct be_phy_info phy_info;
2749
2750 status = be_cmd_get_phy_info(adapter, &phy_info);
2751 if (status)
2752 return false;
2753 if ((phy_info.phy_type == TN_8022) &&
2754 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2755 return true;
2756 }
2757 return false;
2758}
2759
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002760static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002761 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002762 struct be_dma_mem *flash_cmd, int num_of_images)
2763
Ajit Khaparde84517482009-09-04 03:12:16 +00002764{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002765 int status = 0, i, filehdr_size = 0;
2766 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002767 int num_bytes;
2768 const u8 *p = fw->data;
2769 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002770 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002771 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002772
Sathya Perla306f1342011-08-02 19:57:45 +00002773 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002774 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2775 FLASH_IMAGE_MAX_SIZE_g3},
2776 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2777 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2778 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2779 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2780 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2781 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2782 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2783 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2784 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2785 FLASH_IMAGE_MAX_SIZE_g3},
2786 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2787 FLASH_IMAGE_MAX_SIZE_g3},
2788 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002789 FLASH_IMAGE_MAX_SIZE_g3},
2790 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002791 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2792 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2793 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002794 };
Joe Perches215faf92010-12-21 02:16:10 -08002795 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002796 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2797 FLASH_IMAGE_MAX_SIZE_g2},
2798 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2799 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2800 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2801 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2802 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2803 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2804 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2805 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2806 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2807 FLASH_IMAGE_MAX_SIZE_g2},
2808 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2809 FLASH_IMAGE_MAX_SIZE_g2},
2810 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2811 FLASH_IMAGE_MAX_SIZE_g2}
2812 };
2813
2814 if (adapter->generation == BE_GEN3) {
2815 pflashcomp = gen3_flash_types;
2816 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002817 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002818 } else {
2819 pflashcomp = gen2_flash_types;
2820 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002821 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002822 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002823 for (i = 0; i < num_comp; i++) {
2824 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2825 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2826 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002827 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2828 if (!phy_flashing_required(adapter))
2829 continue;
2830 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002831 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2832 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002833 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2834 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002835 continue;
2836 p = fw->data;
2837 p += filehdr_size + pflashcomp[i].offset
2838 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002839 if (p + pflashcomp[i].size > fw->data + fw->size)
2840 return -1;
2841 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002842 while (total_bytes) {
2843 if (total_bytes > 32*1024)
2844 num_bytes = 32*1024;
2845 else
2846 num_bytes = total_bytes;
2847 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002848 if (!total_bytes) {
2849 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2850 flash_op = FLASHROM_OPER_PHY_FLASH;
2851 else
2852 flash_op = FLASHROM_OPER_FLASH;
2853 } else {
2854 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2855 flash_op = FLASHROM_OPER_PHY_SAVE;
2856 else
2857 flash_op = FLASHROM_OPER_SAVE;
2858 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002859 memcpy(req->params.data_buf, p, num_bytes);
2860 p += num_bytes;
2861 status = be_cmd_write_flashrom(adapter, flash_cmd,
2862 pflashcomp[i].optype, flash_op, num_bytes);
2863 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002864 if ((status == ILLEGAL_IOCTL_REQ) &&
2865 (pflashcomp[i].optype ==
2866 IMG_TYPE_PHY_FW))
2867 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002868 dev_err(&adapter->pdev->dev,
2869 "cmd to write to flash rom failed.\n");
2870 return -1;
2871 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002872 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002873 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002874 return 0;
2875}
2876
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002877static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2878{
2879 if (fhdr == NULL)
2880 return 0;
2881 if (fhdr->build[0] == '3')
2882 return BE_GEN3;
2883 else if (fhdr->build[0] == '2')
2884 return BE_GEN2;
2885 else
2886 return 0;
2887}
2888
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002889static int lancer_fw_download(struct be_adapter *adapter,
2890 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002891{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002892#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2893#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2894 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002895 const u8 *data_ptr = NULL;
2896 u8 *dest_image_ptr = NULL;
2897 size_t image_size = 0;
2898 u32 chunk_size = 0;
2899 u32 data_written = 0;
2900 u32 offset = 0;
2901 int status = 0;
2902 u8 add_status = 0;
2903
2904 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2905 dev_err(&adapter->pdev->dev,
2906 "FW Image not properly aligned. "
2907 "Length must be 4 byte aligned.\n");
2908 status = -EINVAL;
2909 goto lancer_fw_exit;
2910 }
2911
2912 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2913 + LANCER_FW_DOWNLOAD_CHUNK;
2914 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2915 &flash_cmd.dma, GFP_KERNEL);
2916 if (!flash_cmd.va) {
2917 status = -ENOMEM;
2918 dev_err(&adapter->pdev->dev,
2919 "Memory allocation failure while flashing\n");
2920 goto lancer_fw_exit;
2921 }
2922
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002923 dest_image_ptr = flash_cmd.va +
2924 sizeof(struct lancer_cmd_req_write_object);
2925 image_size = fw->size;
2926 data_ptr = fw->data;
2927
2928 while (image_size) {
2929 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2930
2931 /* Copy the image chunk content. */
2932 memcpy(dest_image_ptr, data_ptr, chunk_size);
2933
2934 status = lancer_cmd_write_object(adapter, &flash_cmd,
2935 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2936 &data_written, &add_status);
2937
2938 if (status)
2939 break;
2940
2941 offset += data_written;
2942 data_ptr += data_written;
2943 image_size -= data_written;
2944 }
2945
2946 if (!status) {
2947 /* Commit the FW written */
2948 status = lancer_cmd_write_object(adapter, &flash_cmd,
2949 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2950 &data_written, &add_status);
2951 }
2952
2953 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2954 flash_cmd.dma);
2955 if (status) {
2956 dev_err(&adapter->pdev->dev,
2957 "Firmware load error. "
2958 "Status code: 0x%x Additional Status: 0x%x\n",
2959 status, add_status);
2960 goto lancer_fw_exit;
2961 }
2962
2963 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2964lancer_fw_exit:
2965 return status;
2966}
2967
2968static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2969{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002970 struct flash_file_hdr_g2 *fhdr;
2971 struct flash_file_hdr_g3 *fhdr3;
2972 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002973 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002974 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002975 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002976
2977 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002978 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002979
Ajit Khaparde84517482009-09-04 03:12:16 +00002980 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002981 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2982 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002983 if (!flash_cmd.va) {
2984 status = -ENOMEM;
2985 dev_err(&adapter->pdev->dev,
2986 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002987 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002988 }
2989
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002990 if ((adapter->generation == BE_GEN3) &&
2991 (get_ufigen_type(fhdr) == BE_GEN3)) {
2992 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002993 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2994 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002995 img_hdr_ptr = (struct image_hdr *) (fw->data +
2996 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002997 i * sizeof(struct image_hdr)));
2998 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2999 status = be_flash_data(adapter, fw, &flash_cmd,
3000 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003001 }
3002 } else if ((adapter->generation == BE_GEN2) &&
3003 (get_ufigen_type(fhdr) == BE_GEN2)) {
3004 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3005 } else {
3006 dev_err(&adapter->pdev->dev,
3007 "UFI and Interface are not compatible for flashing\n");
3008 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003009 }
3010
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003011 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3012 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003013 if (status) {
3014 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003015 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003016 }
3017
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003018 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003019
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003020be_fw_exit:
3021 return status;
3022}
3023
3024int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3025{
3026 const struct firmware *fw;
3027 int status;
3028
3029 if (!netif_running(adapter->netdev)) {
3030 dev_err(&adapter->pdev->dev,
3031 "Firmware load not allowed (interface is down)\n");
3032 return -1;
3033 }
3034
3035 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3036 if (status)
3037 goto fw_exit;
3038
3039 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3040
3041 if (lancer_chip(adapter))
3042 status = lancer_fw_download(adapter, fw);
3043 else
3044 status = be_fw_download(adapter, fw);
3045
Ajit Khaparde84517482009-09-04 03:12:16 +00003046fw_exit:
3047 release_firmware(fw);
3048 return status;
3049}
3050
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003051static struct net_device_ops be_netdev_ops = {
3052 .ndo_open = be_open,
3053 .ndo_stop = be_close,
3054 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003055 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003056 .ndo_set_mac_address = be_mac_addr_set,
3057 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003058 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003059 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003060 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3061 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003062 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003063 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003064 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003065 .ndo_get_vf_config = be_get_vf_config,
3066#ifdef CONFIG_NET_POLL_CONTROLLER
3067 .ndo_poll_controller = be_netpoll,
3068#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003069};
3070
3071static void be_netdev_init(struct net_device *netdev)
3072{
3073 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07003074 struct be_rx_obj *rxo;
3075 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003076
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003077 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003078 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3079 NETIF_F_HW_VLAN_TX;
3080 if (be_multi_rxq(adapter))
3081 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003082
3083 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003084 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003085
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003086 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003087 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003088
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003089 netdev->flags |= IFF_MULTICAST;
3090
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003091 netif_set_gso_max_size(netdev, 65535);
3092
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003093 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3094
3095 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3096
Sathya Perla3abcded2010-10-03 22:12:27 -07003097 for_all_rx_queues(adapter, rxo, i)
3098 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3099 BE_NAPI_WEIGHT);
3100
Sathya Perla5fb379e2009-06-18 00:02:59 +00003101 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003102 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003103}
3104
3105static void be_unmap_pci_bars(struct be_adapter *adapter)
3106{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003107 if (adapter->csr)
3108 iounmap(adapter->csr);
3109 if (adapter->db)
3110 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003111}
3112
3113static int be_map_pci_bars(struct be_adapter *adapter)
3114{
3115 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003116 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003117
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003118 if (lancer_chip(adapter)) {
3119 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3120 pci_resource_len(adapter->pdev, 0));
3121 if (addr == NULL)
3122 return -ENOMEM;
3123 adapter->db = addr;
3124 return 0;
3125 }
3126
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003127 if (be_physfn(adapter)) {
3128 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3129 pci_resource_len(adapter->pdev, 2));
3130 if (addr == NULL)
3131 return -ENOMEM;
3132 adapter->csr = addr;
3133 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003134
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003135 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003136 db_reg = 4;
3137 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003138 if (be_physfn(adapter))
3139 db_reg = 4;
3140 else
3141 db_reg = 0;
3142 }
3143 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3144 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003145 if (addr == NULL)
3146 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003147 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003148
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003149 return 0;
3150pci_map_err:
3151 be_unmap_pci_bars(adapter);
3152 return -ENOMEM;
3153}
3154
3155
3156static void be_ctrl_cleanup(struct be_adapter *adapter)
3157{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003158 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003159
3160 be_unmap_pci_bars(adapter);
3161
3162 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003163 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3164 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003165
Sathya Perla5b8821b2011-08-02 19:57:44 +00003166 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003167 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003168 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3169 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003170}
3171
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003172static int be_ctrl_init(struct be_adapter *adapter)
3173{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003174 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3175 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003176 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003177 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003178
3179 status = be_map_pci_bars(adapter);
3180 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003181 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003182
3183 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003184 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3185 mbox_mem_alloc->size,
3186 &mbox_mem_alloc->dma,
3187 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003188 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003189 status = -ENOMEM;
3190 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003191 }
3192 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3193 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3194 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3195 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003196
Sathya Perla5b8821b2011-08-02 19:57:44 +00003197 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3198 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3199 &rx_filter->dma, GFP_KERNEL);
3200 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003201 status = -ENOMEM;
3202 goto free_mbox;
3203 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003204 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003205
Ivan Vecera29849612010-12-14 05:43:19 +00003206 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003207 spin_lock_init(&adapter->mcc_lock);
3208 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003209
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003210 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003211 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003212 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003213
3214free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003215 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3216 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003217
3218unmap_pci_bars:
3219 be_unmap_pci_bars(adapter);
3220
3221done:
3222 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003223}
3224
3225static void be_stats_cleanup(struct be_adapter *adapter)
3226{
Sathya Perla3abcded2010-10-03 22:12:27 -07003227 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003228
3229 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003230 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3231 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003232}
3233
3234static int be_stats_init(struct be_adapter *adapter)
3235{
Sathya Perla3abcded2010-10-03 22:12:27 -07003236 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003237
Selvin Xavier005d5692011-05-16 07:36:35 +00003238 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003239 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003240 } else {
3241 if (lancer_chip(adapter))
3242 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3243 else
3244 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3245 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003246 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3247 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003248 if (cmd->va == NULL)
3249 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003250 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003251 return 0;
3252}
3253
3254static void __devexit be_remove(struct pci_dev *pdev)
3255{
3256 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003257
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003258 if (!adapter)
3259 return;
3260
Somnath Koturf203af72010-10-25 23:01:03 +00003261 cancel_delayed_work_sync(&adapter->work);
3262
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003263 unregister_netdev(adapter->netdev);
3264
Sathya Perla5fb379e2009-06-18 00:02:59 +00003265 be_clear(adapter);
3266
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003267 be_stats_cleanup(adapter);
3268
3269 be_ctrl_cleanup(adapter);
3270
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003271 be_sriov_disable(adapter);
3272
Sathya Perla8d56ff12009-11-22 22:02:26 +00003273 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003274
3275 pci_set_drvdata(pdev, NULL);
3276 pci_release_regions(pdev);
3277 pci_disable_device(pdev);
3278
3279 free_netdev(adapter->netdev);
3280}
3281
Sathya Perla2243e2e2009-11-22 22:02:03 +00003282static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003283{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003284 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003285
Sathya Perla3abcded2010-10-03 22:12:27 -07003286 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3287 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003288 if (status)
3289 return status;
3290
Sathya Perla752961a2011-10-24 02:45:03 +00003291 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003292 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3293 else
3294 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3295
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003296 status = be_cmd_get_cntl_attributes(adapter);
3297 if (status)
3298 return status;
3299
Sathya Perla2243e2e2009-11-22 22:02:03 +00003300 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003301}
3302
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003303static int be_dev_family_check(struct be_adapter *adapter)
3304{
3305 struct pci_dev *pdev = adapter->pdev;
3306 u32 sli_intf = 0, if_type;
3307
3308 switch (pdev->device) {
3309 case BE_DEVICE_ID1:
3310 case OC_DEVICE_ID1:
3311 adapter->generation = BE_GEN2;
3312 break;
3313 case BE_DEVICE_ID2:
3314 case OC_DEVICE_ID2:
3315 adapter->generation = BE_GEN3;
3316 break;
3317 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003318 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003319 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3320 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3321 SLI_INTF_IF_TYPE_SHIFT;
3322
3323 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3324 if_type != 0x02) {
3325 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3326 return -EINVAL;
3327 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003328 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3329 SLI_INTF_FAMILY_SHIFT);
3330 adapter->generation = BE_GEN3;
3331 break;
3332 default:
3333 adapter->generation = 0;
3334 }
3335 return 0;
3336}
3337
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003338static int lancer_wait_ready(struct be_adapter *adapter)
3339{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003340#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003341 u32 sliport_status;
3342 int status = 0, i;
3343
3344 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3345 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3346 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3347 break;
3348
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003349 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003350 }
3351
3352 if (i == SLIPORT_READY_TIMEOUT)
3353 status = -1;
3354
3355 return status;
3356}
3357
3358static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3359{
3360 int status;
3361 u32 sliport_status, err, reset_needed;
3362 status = lancer_wait_ready(adapter);
3363 if (!status) {
3364 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3365 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3366 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3367 if (err && reset_needed) {
3368 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3369 adapter->db + SLIPORT_CONTROL_OFFSET);
3370
3371 /* check adapter has corrected the error */
3372 status = lancer_wait_ready(adapter);
3373 sliport_status = ioread32(adapter->db +
3374 SLIPORT_STATUS_OFFSET);
3375 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3376 SLIPORT_STATUS_RN_MASK);
3377 if (status || sliport_status)
3378 status = -1;
3379 } else if (err || reset_needed) {
3380 status = -1;
3381 }
3382 }
3383 return status;
3384}
3385
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003386static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3387{
3388 int status;
3389 u32 sliport_status;
3390
3391 if (adapter->eeh_err || adapter->ue_detected)
3392 return;
3393
3394 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3395
3396 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3397 dev_err(&adapter->pdev->dev,
3398 "Adapter in error state."
3399 "Trying to recover.\n");
3400
3401 status = lancer_test_and_set_rdy_state(adapter);
3402 if (status)
3403 goto err;
3404
3405 netif_device_detach(adapter->netdev);
3406
3407 if (netif_running(adapter->netdev))
3408 be_close(adapter->netdev);
3409
3410 be_clear(adapter);
3411
3412 adapter->fw_timeout = false;
3413
3414 status = be_setup(adapter);
3415 if (status)
3416 goto err;
3417
3418 if (netif_running(adapter->netdev)) {
3419 status = be_open(adapter->netdev);
3420 if (status)
3421 goto err;
3422 }
3423
3424 netif_device_attach(adapter->netdev);
3425
3426 dev_err(&adapter->pdev->dev,
3427 "Adapter error recovery succeeded\n");
3428 }
3429 return;
3430err:
3431 dev_err(&adapter->pdev->dev,
3432 "Adapter error recovery failed\n");
3433}
3434
3435static void be_worker(struct work_struct *work)
3436{
3437 struct be_adapter *adapter =
3438 container_of(work, struct be_adapter, work.work);
3439 struct be_rx_obj *rxo;
3440 int i;
3441
3442 if (lancer_chip(adapter))
3443 lancer_test_and_recover_fn_err(adapter);
3444
3445 be_detect_dump_ue(adapter);
3446
3447 /* when interrupts are not yet enabled, just reap any pending
3448 * mcc completions */
3449 if (!netif_running(adapter->netdev)) {
3450 int mcc_compl, status = 0;
3451
3452 mcc_compl = be_process_mcc(adapter, &status);
3453
3454 if (mcc_compl) {
3455 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3456 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3457 }
3458
3459 goto reschedule;
3460 }
3461
3462 if (!adapter->stats_cmd_sent) {
3463 if (lancer_chip(adapter))
3464 lancer_cmd_get_pport_stats(adapter,
3465 &adapter->stats_cmd);
3466 else
3467 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3468 }
3469
3470 for_all_rx_queues(adapter, rxo, i) {
3471 be_rx_eqd_update(adapter, rxo);
3472
3473 if (rxo->rx_post_starved) {
3474 rxo->rx_post_starved = false;
3475 be_post_rx_frags(rxo, GFP_KERNEL);
3476 }
3477 }
3478
3479reschedule:
3480 adapter->work_counter++;
3481 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3482}
3483
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003484static int __devinit be_probe(struct pci_dev *pdev,
3485 const struct pci_device_id *pdev_id)
3486{
3487 int status = 0;
3488 struct be_adapter *adapter;
3489 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003490
3491 status = pci_enable_device(pdev);
3492 if (status)
3493 goto do_none;
3494
3495 status = pci_request_regions(pdev, DRV_NAME);
3496 if (status)
3497 goto disable_dev;
3498 pci_set_master(pdev);
3499
Sathya Perla3c8def92011-06-12 20:01:58 +00003500 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003501 if (netdev == NULL) {
3502 status = -ENOMEM;
3503 goto rel_reg;
3504 }
3505 adapter = netdev_priv(netdev);
3506 adapter->pdev = pdev;
3507 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003508
3509 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003510 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003511 goto free_netdev;
3512
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003513 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003514 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003515
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003516 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003517 if (!status) {
3518 netdev->features |= NETIF_F_HIGHDMA;
3519 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003520 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003521 if (status) {
3522 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3523 goto free_netdev;
3524 }
3525 }
3526
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003527 status = be_sriov_enable(adapter);
3528 if (status)
3529 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003530
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003531 status = be_ctrl_init(adapter);
3532 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003533 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003534
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003535 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003536 status = lancer_wait_ready(adapter);
3537 if (!status) {
3538 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3539 adapter->db + SLIPORT_CONTROL_OFFSET);
3540 status = lancer_test_and_set_rdy_state(adapter);
3541 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003542 if (status) {
3543 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003544 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003545 }
3546 }
3547
Sathya Perla2243e2e2009-11-22 22:02:03 +00003548 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003549 if (be_physfn(adapter)) {
3550 status = be_cmd_POST(adapter);
3551 if (status)
3552 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003553 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003554
3555 /* tell fw we're ready to fire cmds */
3556 status = be_cmd_fw_init(adapter);
3557 if (status)
3558 goto ctrl_clean;
3559
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003560 status = be_cmd_reset_function(adapter);
3561 if (status)
3562 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003563
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003564 status = be_stats_init(adapter);
3565 if (status)
3566 goto ctrl_clean;
3567
Sathya Perla2243e2e2009-11-22 22:02:03 +00003568 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003569 if (status)
3570 goto stats_clean;
3571
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003572 /* The INTR bit may be set in the card when probed by a kdump kernel
3573 * after a crash.
3574 */
3575 if (!lancer_chip(adapter))
3576 be_intr_set(adapter, false);
3577
Sathya Perla3abcded2010-10-03 22:12:27 -07003578 be_msix_enable(adapter);
3579
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003580 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003581 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003582
Sathya Perla5fb379e2009-06-18 00:02:59 +00003583 status = be_setup(adapter);
3584 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003585 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003586
Sathya Perla3abcded2010-10-03 22:12:27 -07003587 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003588 status = register_netdev(netdev);
3589 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003590 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003591
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003592 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003593
Somnath Koturf203af72010-10-25 23:01:03 +00003594 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003595 return 0;
3596
Sathya Perla5fb379e2009-06-18 00:02:59 +00003597unsetup:
3598 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003599msix_disable:
3600 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003601stats_clean:
3602 be_stats_cleanup(adapter);
3603ctrl_clean:
3604 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003605disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003606 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003607free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003608 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003609 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003610rel_reg:
3611 pci_release_regions(pdev);
3612disable_dev:
3613 pci_disable_device(pdev);
3614do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003615 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003616 return status;
3617}
3618
3619static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3620{
3621 struct be_adapter *adapter = pci_get_drvdata(pdev);
3622 struct net_device *netdev = adapter->netdev;
3623
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003624 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003625 if (adapter->wol)
3626 be_setup_wol(adapter, true);
3627
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003628 netif_device_detach(netdev);
3629 if (netif_running(netdev)) {
3630 rtnl_lock();
3631 be_close(netdev);
3632 rtnl_unlock();
3633 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003634 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003635
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003636 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003637 pci_save_state(pdev);
3638 pci_disable_device(pdev);
3639 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3640 return 0;
3641}
3642
3643static int be_resume(struct pci_dev *pdev)
3644{
3645 int status = 0;
3646 struct be_adapter *adapter = pci_get_drvdata(pdev);
3647 struct net_device *netdev = adapter->netdev;
3648
3649 netif_device_detach(netdev);
3650
3651 status = pci_enable_device(pdev);
3652 if (status)
3653 return status;
3654
3655 pci_set_power_state(pdev, 0);
3656 pci_restore_state(pdev);
3657
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003658 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003659 /* tell fw we're ready to fire cmds */
3660 status = be_cmd_fw_init(adapter);
3661 if (status)
3662 return status;
3663
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003664 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003665 if (netif_running(netdev)) {
3666 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003667 be_open(netdev);
3668 rtnl_unlock();
3669 }
3670 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003671
3672 if (adapter->wol)
3673 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003674
3675 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003676 return 0;
3677}
3678
Sathya Perla82456b02010-02-17 01:35:37 +00003679/*
3680 * An FLR will stop BE from DMAing any data.
3681 */
3682static void be_shutdown(struct pci_dev *pdev)
3683{
3684 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003685
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003686 if (!adapter)
3687 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003688
Sathya Perla0f4a6822011-03-21 20:49:28 +00003689 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003690
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003691 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003692
Sathya Perla82456b02010-02-17 01:35:37 +00003693 if (adapter->wol)
3694 be_setup_wol(adapter, true);
3695
Ajit Khaparde57841862011-04-06 18:08:43 +00003696 be_cmd_reset_function(adapter);
3697
Sathya Perla82456b02010-02-17 01:35:37 +00003698 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003699}
3700
Sathya Perlacf588472010-02-14 21:22:01 +00003701static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3702 pci_channel_state_t state)
3703{
3704 struct be_adapter *adapter = pci_get_drvdata(pdev);
3705 struct net_device *netdev = adapter->netdev;
3706
3707 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3708
3709 adapter->eeh_err = true;
3710
3711 netif_device_detach(netdev);
3712
3713 if (netif_running(netdev)) {
3714 rtnl_lock();
3715 be_close(netdev);
3716 rtnl_unlock();
3717 }
3718 be_clear(adapter);
3719
3720 if (state == pci_channel_io_perm_failure)
3721 return PCI_ERS_RESULT_DISCONNECT;
3722
3723 pci_disable_device(pdev);
3724
3725 return PCI_ERS_RESULT_NEED_RESET;
3726}
3727
3728static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3729{
3730 struct be_adapter *adapter = pci_get_drvdata(pdev);
3731 int status;
3732
3733 dev_info(&adapter->pdev->dev, "EEH reset\n");
3734 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003735 adapter->ue_detected = false;
3736 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003737
3738 status = pci_enable_device(pdev);
3739 if (status)
3740 return PCI_ERS_RESULT_DISCONNECT;
3741
3742 pci_set_master(pdev);
3743 pci_set_power_state(pdev, 0);
3744 pci_restore_state(pdev);
3745
3746 /* Check if card is ok and fw is ready */
3747 status = be_cmd_POST(adapter);
3748 if (status)
3749 return PCI_ERS_RESULT_DISCONNECT;
3750
3751 return PCI_ERS_RESULT_RECOVERED;
3752}
3753
3754static void be_eeh_resume(struct pci_dev *pdev)
3755{
3756 int status = 0;
3757 struct be_adapter *adapter = pci_get_drvdata(pdev);
3758 struct net_device *netdev = adapter->netdev;
3759
3760 dev_info(&adapter->pdev->dev, "EEH resume\n");
3761
3762 pci_save_state(pdev);
3763
3764 /* tell fw we're ready to fire cmds */
3765 status = be_cmd_fw_init(adapter);
3766 if (status)
3767 goto err;
3768
3769 status = be_setup(adapter);
3770 if (status)
3771 goto err;
3772
3773 if (netif_running(netdev)) {
3774 status = be_open(netdev);
3775 if (status)
3776 goto err;
3777 }
3778 netif_device_attach(netdev);
3779 return;
3780err:
3781 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003782}
3783
3784static struct pci_error_handlers be_eeh_handlers = {
3785 .error_detected = be_eeh_err_detected,
3786 .slot_reset = be_eeh_reset,
3787 .resume = be_eeh_resume,
3788};
3789
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003790static struct pci_driver be_driver = {
3791 .name = DRV_NAME,
3792 .id_table = be_dev_ids,
3793 .probe = be_probe,
3794 .remove = be_remove,
3795 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003796 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003797 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003798 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003799};
3800
3801static int __init be_init_module(void)
3802{
Joe Perches8e95a202009-12-03 07:58:21 +00003803 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3804 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003805 printk(KERN_WARNING DRV_NAME
3806 " : Module param rx_frag_size must be 2048/4096/8192."
3807 " Using 2048\n");
3808 rx_frag_size = 2048;
3809 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003810
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003811 return pci_register_driver(&be_driver);
3812}
3813module_init(be_init_module);
3814
3815static void __exit be_exit_module(void)
3816{
3817 pci_unregister_driver(&be_driver);
3818}
3819module_exit(be_exit_module);