blob: a0b4be51f0d1d09781a216d0ffe13ecc57805129 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000028MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070048 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000051/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070052static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000053 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070087static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000088 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700111 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700121
Sathya Perla752961a2011-10-24 02:45:03 +0000122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +0000149 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000195
196 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198}
199
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000237
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000238 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000239 return;
240
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245}
246
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct sockaddr *addr = p;
251 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000253 u32 pmac_id = adapter->pmac_id[0];
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000254 bool active_mac = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000259 /* For BE VF, MAC address is already activated by PF.
260 * Hence only operation left is updating netdev->devaddr.
261 * Update it if user is passing the same MAC which was used
262 * during configuring VF MAC from PF(Hypervisor).
263 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265 status = be_cmd_mac_addr_query(adapter, current_mac,
266 false, adapter->if_handle, 0);
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268 goto done;
269 else
270 goto err;
271 }
272
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274 goto done;
275
276 /* For Lancer check if any MAC is active.
277 * If active, get its mac id.
278 */
279 if (lancer_chip(adapter) && !be_physfn(adapter))
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
Sathya Perlaa65027e2009-08-17 00:58:04 +0000287 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000288 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700289
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000290 if (active_mac)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 pmac_id, 0);
293done:
Somnath Koture3a7ae22011-10-27 07:14:05 +0000294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 return 0;
296err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700298 return status;
299}
300
Sathya Perlaca34fe32012-11-06 17:48:56 +0000301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000330{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000334 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000337
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000367 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
Sathya Perlaca34fe32012-11-06 17:48:56 +0000378static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386
Sathya Perlaac124ff2011-07-25 19:10:14 +0000387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000413 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000414 drvs->jabber_events = port_stats->jabber_events;
415 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000417 drvs->forwarded_packets = rxf_stats->forwarded_packets;
418 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000419 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422}
423
Selvin Xavier005d5692011-05-16 07:36:35 +0000424static void populate_lancer_stats(struct be_adapter *adapter)
425{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426
Selvin Xavier005d5692011-05-16 07:36:35 +0000427 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000428 struct lancer_pport_stats *pport_stats =
429 pport_stats_from_cmd(adapter);
430
431 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000435 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000436 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000437 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441 drvs->rx_dropped_tcp_length =
442 pport_stats->rx_dropped_invalid_tcp_length;
443 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446 drvs->rx_dropped_header_too_small =
447 pport_stats->rx_dropped_header_too_small;
448 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000449 drvs->rx_address_filtered =
450 pport_stats->rx_address_filtered +
451 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000452 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000453 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000454 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000456 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000457 drvs->forwarded_packets = pport_stats->num_forwards_lo;
458 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000459 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000460 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000461}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000462
Sathya Perla09c1c682011-08-22 19:41:53 +0000463static void accumulate_16bit_val(u32 *acc, u16 val)
464{
465#define lo(x) (x & 0xFFFF)
466#define hi(x) (x & 0xFFFF0000)
467 bool wrapped = val < lo(*acc);
468 u32 newacc = hi(*acc) + val;
469
470 if (wrapped)
471 newacc += 65536;
472 ACCESS_ONCE(*acc) = newacc;
473}
474
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000475void populate_erx_stats(struct be_adapter *adapter,
476 struct be_rx_obj *rxo,
477 u32 erx_stat)
478{
479 if (!BEx_chip(adapter))
480 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481 else
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
484 */
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486 (u16)erx_stat);
487}
488
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489void be_parse_stats(struct be_adapter *adapter)
490{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000491 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492 struct be_rx_obj *rxo;
493 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000494 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000495
Sathya Perlaca34fe32012-11-06 17:48:56 +0000496 if (lancer_chip(adapter)) {
497 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000498 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000499 if (BE2_chip(adapter))
500 populate_be_v0_stats(adapter);
501 else
502 /* for BE3 and Skyhawk */
503 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000504
Sathya Perlaca34fe32012-11-06 17:48:56 +0000505 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000507 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000509 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000510 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000511}
512
Sathya Perlaab1594e2011-07-25 19:10:15 +0000513static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700515{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000516 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000517 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700518 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000519 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000520 u64 pkts, bytes;
521 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700522 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523
Sathya Perla3abcded2010-10-03 22:12:27 -0700524 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526 do {
527 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528 pkts = rx_stats(rxo)->rx_pkts;
529 bytes = rx_stats(rxo)->rx_bytes;
530 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531 stats->rx_packets += pkts;
532 stats->rx_bytes += bytes;
533 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700536 }
537
Sathya Perla3c8def92011-06-12 20:01:58 +0000538 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000539 const struct be_tx_stats *tx_stats = tx_stats(txo);
540 do {
541 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542 pkts = tx_stats(txo)->tx_pkts;
543 bytes = tx_stats(txo)->tx_bytes;
544 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545 stats->tx_packets += pkts;
546 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000547 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548
549 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000550 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000551 drvs->rx_alignment_symbol_errors +
552 drvs->rx_in_range_errors +
553 drvs->rx_out_range_errors +
554 drvs->rx_frame_too_long +
555 drvs->rx_dropped_too_small +
556 drvs->rx_dropped_too_short +
557 drvs->rx_dropped_header_too_small +
558 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000559 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700560
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000562 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000563 drvs->rx_out_range_errors +
564 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000565
Sathya Perlaab1594e2011-07-25 19:10:15 +0000566 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700567
568 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000569 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000570
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700571 /* receiver fifo overrun */
572 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000573 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000574 drvs->rx_input_fifo_overflow_drop +
575 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000576 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700577}
578
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000579void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581 struct net_device *netdev = adapter->netdev;
582
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000583 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000584 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000585 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000587
588 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589 netif_carrier_on(netdev);
590 else
591 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592}
593
Sathya Perla3c8def92011-06-12 20:01:58 +0000594static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000595 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596{
Sathya Perla3c8def92011-06-12 20:01:58 +0000597 struct be_tx_stats *stats = tx_stats(txo);
598
Sathya Perlaab1594e2011-07-25 19:10:15 +0000599 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000600 stats->tx_reqs++;
601 stats->tx_wrbs += wrb_cnt;
602 stats->tx_bytes += copied;
603 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000605 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000606 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700607}
608
609/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000610static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700613 int cnt = (skb->len > skb->data_len);
614
615 cnt += skb_shinfo(skb)->nr_frags;
616
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700617 /* to account for hdr wrb */
618 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000619 if (lancer_chip(adapter) || !(cnt & 1)) {
620 *dummy = false;
621 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700622 /* add a dummy to make it an even num */
623 cnt++;
624 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000625 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627 return cnt;
628}
629
630static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631{
632 wrb->frag_pa_hi = upper_32_bits(addr);
633 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000635 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636}
637
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000638static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639 struct sk_buff *skb)
640{
641 u8 vlan_prio;
642 u16 vlan_tag;
643
644 vlan_tag = vlan_tx_tag_get(skb);
645 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646 /* If vlan priority provided by OS is NOT in available bmap */
647 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649 adapter->recommended_prio;
650
651 return vlan_tag;
652}
653
Somnath Koturcc4ce022010-10-21 07:11:14 -0700654static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000655 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000657 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700658
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659 memset(hdr, 0, sizeof(*hdr));
660
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000663 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000667 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670 if (is_tcp_pkt(skb))
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672 else if (is_udp_pkt(skb))
673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674 }
675
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700676 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000678 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700679 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680 }
681
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000682 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687}
688
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000689static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000690 bool unmap_single)
691{
692 dma_addr_t dma;
693
694 be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000697 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000698 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000699 dma_unmap_single(dev, dma, wrb->frag_len,
700 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000701 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000702 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000703 }
704}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705
Sathya Perla3c8def92011-06-12 20:01:58 +0000706static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000707 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709{
Sathya Perla7101e112010-03-22 20:41:12 +0000710 dma_addr_t busaddr;
711 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000712 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700713 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714 struct be_eth_wrb *wrb;
715 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000716 bool map_single = false;
717 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700718
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719 hdr = queue_head_node(txq);
720 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000721 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700722
David S. Millerebc8d2a2009-06-09 01:01:31 -0700723 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700724 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000725 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000727 goto dma_err;
728 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, busaddr, len);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 copied += len;
734 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735
David S. Millerebc8d2a2009-06-09 01:01:31 -0700736 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000737 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700738 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000739 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000740 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000741 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000742 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700743 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000744 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700745 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000747 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748 }
749
750 if (dummy_wrb) {
751 wrb = queue_head_node(txq);
752 wrb_fill(wrb, 0, 0);
753 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754 queue_head_inc(txq);
755 }
756
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000757 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700758 be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000761dma_err:
762 txq->head = map_head;
763 while (copied) {
764 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000765 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000766 map_single = false;
767 copied -= wrb->frag_len;
768 queue_head_inc(txq);
769 }
770 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771}
772
Somnath Kotur93040ae2012-06-26 22:32:10 +0000773static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000774 struct sk_buff *skb,
775 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000776{
777 u16 vlan_tag = 0;
778
779 skb = skb_share_check(skb, GFP_ATOMIC);
780 if (unlikely(!skb))
781 return skb;
782
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000783 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000785 else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
786 vlan_tag = adapter->pvid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000787
788 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400789 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000790 if (unlikely(!skb))
791 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000792 skb->vlan_tci = 0;
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000795 }
796
797 /* Insert the outer VLAN, if any */
798 if (adapter->qnq_vid) {
799 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400800 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000801 if (unlikely(!skb))
802 return skb;
803 if (skip_hw_vlan)
804 *skip_hw_vlan = true;
805 }
806
Somnath Kotur93040ae2012-06-26 22:32:10 +0000807 return skb;
808}
809
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000810static bool be_ipv6_exthdr_check(struct sk_buff *skb)
811{
812 struct ethhdr *eh = (struct ethhdr *)skb->data;
813 u16 offset = ETH_HLEN;
814
815 if (eh->h_proto == htons(ETH_P_IPV6)) {
816 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
817
818 offset += sizeof(struct ipv6hdr);
819 if (ip6h->nexthdr != NEXTHDR_TCP &&
820 ip6h->nexthdr != NEXTHDR_UDP) {
821 struct ipv6_opt_hdr *ehdr =
822 (struct ipv6_opt_hdr *) (skb->data + offset);
823
824 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
825 if (ehdr->hdrlen == 0xff)
826 return true;
827 }
828 }
829 return false;
830}
831
832static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
833{
834 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
835}
836
837static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
838{
839 return BE3_chip(adapter) &&
840 be_ipv6_exthdr_check(skb);
841}
842
Stephen Hemminger613573252009-08-31 19:50:58 +0000843static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700844 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700845{
846 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000847 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
848 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000849 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000851 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852 bool dummy_wrb, stopped = false;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000853 bool skip_hw_vlan = false;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000854 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855
Somnath Kotur93040ae2012-06-26 22:32:10 +0000856 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
857 VLAN_ETH_HLEN : ETH_HLEN;
858
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000859 /* For padded packets, BE HW modifies tot_len field in IP header
860 * incorrecly when VLAN tag is inserted by HW.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000861 */
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000862 if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000863 ip = (struct iphdr *)ip_hdr(skb);
864 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
865 }
866
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000867 /* If vlan tag is already inlined in the packet, skip HW VLAN
868 * tagging in UMC mode
869 */
870 if ((adapter->function_mode & UMC_ENABLED) &&
871 veh->h_vlan_proto == htons(ETH_P_8021Q))
872 skip_hw_vlan = true;
873
Somnath Kotur93040ae2012-06-26 22:32:10 +0000874 /* HW has a bug wherein it will calculate CSUM for VLAN
875 * pkts even though it is disabled.
876 * Manually insert VLAN in pkt.
877 */
878 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000879 vlan_tx_tag_present(skb)) {
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000880 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
881 if (unlikely(!skb))
882 goto tx_drop;
883 }
884
885 /* HW may lockup when VLAN HW tagging is requested on
886 * certain ipv6 packets. Drop such pkts if the HW workaround to
887 * skip HW tagging is not enabled by FW.
888 */
889 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
890 (adapter->pvid || adapter->qnq_vid) &&
891 !qnq_async_evt_rcvd(adapter)))
892 goto tx_drop;
893
894 /* Manual VLAN tag insertion to prevent:
895 * ASIC lockup when the ASIC inserts VLAN tag into
896 * certain ipv6 packets. Insert VLAN tags in driver,
897 * and set event, completion, vlan bits accordingly
898 * in the Tx WRB.
899 */
900 if (be_ipv6_tx_stall_chk(adapter, skb) &&
901 be_vlan_tag_tx_chk(adapter, skb)) {
902 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000903 if (unlikely(!skb))
904 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000905 }
906
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000907 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700908
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000909 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
910 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000911 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000912 int gso_segs = skb_shinfo(skb)->gso_segs;
913
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000914 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000915 BUG_ON(txo->sent_skb_list[start]);
916 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700917
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000918 /* Ensure txq has space for the next skb; Else stop the queue
919 * *BEFORE* ringing the tx doorbell, so that we serialze the
920 * tx compls of the current transmit which'll wake up the queue
921 */
Sathya Perla7101e112010-03-22 20:41:12 +0000922 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000923 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
924 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000925 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000926 stopped = true;
927 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700928
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000929 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000930
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000931 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000932 } else {
933 txq->head = start;
934 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700935 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000936tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700937 return NETDEV_TX_OK;
938}
939
940static int be_change_mtu(struct net_device *netdev, int new_mtu)
941{
942 struct be_adapter *adapter = netdev_priv(netdev);
943 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000944 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
945 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700946 dev_info(&adapter->pdev->dev,
947 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000948 BE_MIN_MTU,
949 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700950 return -EINVAL;
951 }
952 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
953 netdev->mtu, new_mtu);
954 netdev->mtu = new_mtu;
955 return 0;
956}
957
958/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000959 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
960 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700961 */
Sathya Perla10329df2012-06-05 19:37:18 +0000962static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700963{
Sathya Perla10329df2012-06-05 19:37:18 +0000964 u16 vids[BE_NUM_VLANS_SUPPORTED];
965 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000966 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000967
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000968 /* No need to further configure vids if in promiscuous mode */
969 if (adapter->promiscuous)
970 return 0;
971
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000972 if (adapter->vlans_added > adapter->max_vlans)
973 goto set_vlan_promisc;
974
975 /* Construct VLAN Table to give to HW */
976 for (i = 0; i < VLAN_N_VID; i++)
977 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000978 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000979
980 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000981 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000982
983 /* Set to VLAN promisc mode as setting VLAN filter failed */
984 if (status) {
985 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
986 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
987 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700988 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000989
Sathya Perlab31c50a2009-09-17 10:30:13 -0700990 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000991
992set_vlan_promisc:
993 status = be_cmd_vlan_config(adapter, adapter->if_handle,
994 NULL, 0, 1, 1);
995 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700996}
997
Patrick McHardy80d5c362013-04-19 02:04:28 +0000998static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700999{
1000 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001001 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001002
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001003 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001004 status = -EINVAL;
1005 goto ret;
1006 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001007
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001008 /* Packets with VID 0 are always received by Lancer by default */
1009 if (lancer_chip(adapter) && vid == 0)
1010 goto ret;
1011
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001012 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001013 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +00001014 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001015
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001016 if (!status)
1017 adapter->vlans_added++;
1018 else
1019 adapter->vlan_tag[vid] = 0;
1020ret:
1021 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001022}
1023
Patrick McHardy80d5c362013-04-19 02:04:28 +00001024static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001027 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001028
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001029 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001030 status = -EINVAL;
1031 goto ret;
1032 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001033
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001034 /* Packets with VID 0 are always received by Lancer by default */
1035 if (lancer_chip(adapter) && vid == 0)
1036 goto ret;
1037
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001038 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001039 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +00001040 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001041
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001042 if (!status)
1043 adapter->vlans_added--;
1044 else
1045 adapter->vlan_tag[vid] = 1;
1046ret:
1047 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001048}
1049
Sathya Perlaa54769f2011-10-24 02:45:00 +00001050static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001051{
1052 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001053 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001054
1055 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001056 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001057 adapter->promiscuous = true;
1058 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001059 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001060
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001061 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001062 if (adapter->promiscuous) {
1063 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001064 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001065
1066 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001067 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001068 }
1069
Sathya Perlae7b909a2009-11-22 22:01:10 +00001070 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001071 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001072 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001073 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001074 goto done;
1075 }
1076
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001077 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1078 struct netdev_hw_addr *ha;
1079 int i = 1; /* First slot is claimed by the Primary MAC */
1080
1081 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1082 be_cmd_pmac_del(adapter, adapter->if_handle,
1083 adapter->pmac_id[i], 0);
1084 }
1085
1086 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1087 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1088 adapter->promiscuous = true;
1089 goto done;
1090 }
1091
1092 netdev_for_each_uc_addr(ha, adapter->netdev) {
1093 adapter->uc_macs++; /* First slot is for Primary MAC */
1094 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1095 adapter->if_handle,
1096 &adapter->pmac_id[adapter->uc_macs], 0);
1097 }
1098 }
1099
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001100 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1101
1102 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1103 if (status) {
1104 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1105 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1106 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1107 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001108done:
1109 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001110}
1111
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001112static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1113{
1114 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001115 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001116 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001117 bool active_mac = false;
1118 u32 pmac_id;
1119 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001120
Sathya Perla11ac75e2011-12-13 00:58:50 +00001121 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001122 return -EPERM;
1123
Sathya Perla11ac75e2011-12-13 00:58:50 +00001124 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001125 return -EINVAL;
1126
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001127 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001128 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1129 &pmac_id, vf + 1);
1130 if (!status && active_mac)
1131 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1132 pmac_id, vf + 1);
1133
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001134 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1135 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001136 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1137 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001138
Sathya Perla11ac75e2011-12-13 00:58:50 +00001139 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1140 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001141 }
1142
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001143 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001144 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1145 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001146 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001147 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001148
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001149 return status;
1150}
1151
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001152static int be_get_vf_config(struct net_device *netdev, int vf,
1153 struct ifla_vf_info *vi)
1154{
1155 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001156 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001157
Sathya Perla11ac75e2011-12-13 00:58:50 +00001158 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001159 return -EPERM;
1160
Sathya Perla11ac75e2011-12-13 00:58:50 +00001161 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001162 return -EINVAL;
1163
1164 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001165 vi->tx_rate = vf_cfg->tx_rate;
1166 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001167 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001168 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001169
1170 return 0;
1171}
1172
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001173static int be_set_vf_vlan(struct net_device *netdev,
1174 int vf, u16 vlan, u8 qos)
1175{
1176 struct be_adapter *adapter = netdev_priv(netdev);
1177 int status = 0;
1178
Sathya Perla11ac75e2011-12-13 00:58:50 +00001179 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001180 return -EPERM;
1181
Sathya Perla11ac75e2011-12-13 00:58:50 +00001182 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001183 return -EINVAL;
1184
1185 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001186 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1187 /* If this is new value, program it. Else skip. */
1188 adapter->vf_cfg[vf].vlan_tag = vlan;
1189
1190 status = be_cmd_set_hsw_config(adapter, vlan,
1191 vf + 1, adapter->vf_cfg[vf].if_handle);
1192 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001193 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001194 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001195 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001196 vlan = adapter->vf_cfg[vf].def_vid;
1197 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1198 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001199 }
1200
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001201
1202 if (status)
1203 dev_info(&adapter->pdev->dev,
1204 "VLAN %d config on VF %d failed\n", vlan, vf);
1205 return status;
1206}
1207
Ajit Khapardee1d18732010-07-23 01:52:13 +00001208static int be_set_vf_tx_rate(struct net_device *netdev,
1209 int vf, int rate)
1210{
1211 struct be_adapter *adapter = netdev_priv(netdev);
1212 int status = 0;
1213
Sathya Perla11ac75e2011-12-13 00:58:50 +00001214 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001215 return -EPERM;
1216
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001217 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001218 return -EINVAL;
1219
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001220 if (rate < 100 || rate > 10000) {
1221 dev_err(&adapter->pdev->dev,
1222 "tx rate must be between 100 and 10000 Mbps\n");
1223 return -EINVAL;
1224 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001225
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001226 if (lancer_chip(adapter))
1227 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1228 else
1229 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001230
1231 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001232 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001233 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001234 else
1235 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001236 return status;
1237}
1238
Sathya Perla39f1d942012-05-08 19:41:24 +00001239static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1240{
1241 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001242 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001243 u16 offset, stride;
1244
1245 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001246 if (!pos)
1247 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001248 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1249 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1250
1251 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1252 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001253 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001254 vfs++;
1255 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1256 assigned_vfs++;
1257 }
1258 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1259 }
1260 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1261}
1262
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001263static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001264{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001265 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001266 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001267 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001268 u64 pkts;
1269 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001270
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001271 if (!eqo->enable_aic) {
1272 eqd = eqo->eqd;
1273 goto modify_eqd;
1274 }
1275
1276 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001277 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001279 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1280
Sathya Perla4097f662009-03-24 16:40:13 -07001281 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001282 if (time_before(now, stats->rx_jiffies)) {
1283 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001284 return;
1285 }
1286
Sathya Perlaac124ff2011-07-25 19:10:14 +00001287 /* Update once a second */
1288 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001289 return;
1290
Sathya Perlaab1594e2011-07-25 19:10:15 +00001291 do {
1292 start = u64_stats_fetch_begin_bh(&stats->sync);
1293 pkts = stats->rx_pkts;
1294 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1295
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001296 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001297 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001298 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001299 eqd = (stats->rx_pps / 110000) << 3;
1300 eqd = min(eqd, eqo->max_eqd);
1301 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001302 if (eqd < 10)
1303 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001304
1305modify_eqd:
1306 if (eqd != eqo->cur_eqd) {
1307 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1308 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001309 }
Sathya Perla4097f662009-03-24 16:40:13 -07001310}
1311
Sathya Perla3abcded2010-10-03 22:12:27 -07001312static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001313 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001314{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001315 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001316
Sathya Perlaab1594e2011-07-25 19:10:15 +00001317 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001318 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001319 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001320 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001321 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001322 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001323 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001324 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001325 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001326}
1327
Sathya Perla2e588f82011-03-11 02:49:26 +00001328static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001329{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001330 /* L4 checksum is not reliable for non TCP/UDP packets.
1331 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001332 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1333 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001334}
1335
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001336static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1337 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001338{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001339 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001340 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001341 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001342
Sathya Perla3abcded2010-10-03 22:12:27 -07001343 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001344 BUG_ON(!rx_page_info->page);
1345
Ajit Khaparde205859a2010-02-09 01:34:21 +00001346 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001347 dma_unmap_page(&adapter->pdev->dev,
1348 dma_unmap_addr(rx_page_info, bus),
1349 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001350 rx_page_info->last_page_user = false;
1351 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352
1353 atomic_dec(&rxq->used);
1354 return rx_page_info;
1355}
1356
1357/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001358static void be_rx_compl_discard(struct be_rx_obj *rxo,
1359 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360{
Sathya Perla3abcded2010-10-03 22:12:27 -07001361 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001362 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001363 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001365 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001366 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001367 put_page(page_info->page);
1368 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001369 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001370 }
1371}
1372
1373/*
1374 * skb_fill_rx_data forms a complete skb for an ether frame
1375 * indicated by rxcp.
1376 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001377static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1378 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379{
Sathya Perla3abcded2010-10-03 22:12:27 -07001380 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001381 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001382 u16 i, j;
1383 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384 u8 *start;
1385
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001386 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001387 start = page_address(page_info->page) + page_info->page_offset;
1388 prefetch(start);
1389
1390 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001391 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393 skb->len = curr_frag_len;
1394 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001395 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396 /* Complete packet has now been moved to data */
1397 put_page(page_info->page);
1398 skb->data_len = 0;
1399 skb->tail += curr_frag_len;
1400 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001401 hdr_len = ETH_HLEN;
1402 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001403 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001404 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405 skb_shinfo(skb)->frags[0].page_offset =
1406 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001407 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001408 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001409 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410 skb->tail += hdr_len;
1411 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001412 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001413
Sathya Perla2e588f82011-03-11 02:49:26 +00001414 if (rxcp->pkt_size <= rx_frag_size) {
1415 BUG_ON(rxcp->num_rcvd != 1);
1416 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417 }
1418
1419 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001420 index_inc(&rxcp->rxq_idx, rxq->len);
1421 remaining = rxcp->pkt_size - curr_frag_len;
1422 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001423 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001424 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001425
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001426 /* Coalesce all frags from the same physical page in one slot */
1427 if (page_info->page_offset == 0) {
1428 /* Fresh page */
1429 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001430 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001431 skb_shinfo(skb)->frags[j].page_offset =
1432 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001433 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001434 skb_shinfo(skb)->nr_frags++;
1435 } else {
1436 put_page(page_info->page);
1437 }
1438
Eric Dumazet9e903e02011-10-18 21:00:24 +00001439 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440 skb->len += curr_frag_len;
1441 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001442 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001443 remaining -= curr_frag_len;
1444 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001445 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001446 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001447 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448}
1449
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001450/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001451static void be_rx_compl_process(struct be_rx_obj *rxo,
1452 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001454 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001455 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001457
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001458 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001459 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001460 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001461 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001462 return;
1463 }
1464
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001465 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001466
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001467 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001468 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001469 else
1470 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001472 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001473 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001474 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001475 skb->rxhash = rxcp->rss_hash;
1476
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477
Jiri Pirko343e43c2011-08-25 02:50:51 +00001478 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001479 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001480
1481 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001482}
1483
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001484/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001485void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1486 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001488 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001490 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001491 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001492 u16 remaining, curr_frag_len;
1493 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001494
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001495 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001496 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001497 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001498 return;
1499 }
1500
Sathya Perla2e588f82011-03-11 02:49:26 +00001501 remaining = rxcp->pkt_size;
1502 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001503 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001504
1505 curr_frag_len = min(remaining, rx_frag_size);
1506
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001507 /* Coalesce all frags from the same physical page in one slot */
1508 if (i == 0 || page_info->page_offset == 0) {
1509 /* First frag or Fresh page */
1510 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001511 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001512 skb_shinfo(skb)->frags[j].page_offset =
1513 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001514 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001515 } else {
1516 put_page(page_info->page);
1517 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001518 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001519 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001521 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001522 memset(page_info, 0, sizeof(*page_info));
1523 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001524 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001525
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001526 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001527 skb->len = rxcp->pkt_size;
1528 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001529 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001530 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001531 if (adapter->netdev->features & NETIF_F_RXHASH)
1532 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001533
Jiri Pirko343e43c2011-08-25 02:50:51 +00001534 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001535 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001536
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001537 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538}
1539
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001540static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1541 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001542{
Sathya Perla2e588f82011-03-11 02:49:26 +00001543 rxcp->pkt_size =
1544 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1545 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1546 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1547 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001548 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001549 rxcp->ip_csum =
1550 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1551 rxcp->l4_csum =
1552 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1553 rxcp->ipv6 =
1554 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1555 rxcp->rxq_idx =
1556 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1557 rxcp->num_rcvd =
1558 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1559 rxcp->pkt_type =
1560 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001561 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001562 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001563 if (rxcp->vlanf) {
1564 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001565 compl);
1566 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1567 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001568 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001569 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001570}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001572static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1573 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001574{
1575 rxcp->pkt_size =
1576 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1577 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1578 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1579 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001580 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001581 rxcp->ip_csum =
1582 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1583 rxcp->l4_csum =
1584 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1585 rxcp->ipv6 =
1586 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1587 rxcp->rxq_idx =
1588 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1589 rxcp->num_rcvd =
1590 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1591 rxcp->pkt_type =
1592 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001593 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001594 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001595 if (rxcp->vlanf) {
1596 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001597 compl);
1598 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1599 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001600 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001601 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001602 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1603 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001604}
1605
1606static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1607{
1608 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1609 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1610 struct be_adapter *adapter = rxo->adapter;
1611
1612 /* For checking the valid bit it is Ok to use either definition as the
1613 * valid bit is at the same position in both v0 and v1 Rx compl */
1614 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001615 return NULL;
1616
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001617 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001618 be_dws_le_to_cpu(compl, sizeof(*compl));
1619
1620 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001621 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001622 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001623 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001624
Somnath Koture38b1702013-05-29 22:55:56 +00001625 if (rxcp->ip_frag)
1626 rxcp->l4_csum = 0;
1627
Sathya Perla15d72182011-03-21 20:49:26 +00001628 if (rxcp->vlanf) {
1629 /* vlanf could be wrongly set in some cards.
1630 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001631 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001632 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001633
Sathya Perla15d72182011-03-21 20:49:26 +00001634 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001635 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001636
Somnath Kotur939cf302011-08-18 21:51:49 -07001637 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001638 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001639 rxcp->vlanf = 0;
1640 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001641
1642 /* As the compl has been parsed, reset it; we wont touch it again */
1643 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001644
Sathya Perla3abcded2010-10-03 22:12:27 -07001645 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001646 return rxcp;
1647}
1648
Eric Dumazet1829b082011-03-01 05:48:12 +00001649static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001650{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001651 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001652
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001653 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001654 gfp |= __GFP_COMP;
1655 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001656}
1657
1658/*
1659 * Allocate a page, split it to fragments of size rx_frag_size and post as
1660 * receive buffers to BE
1661 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001662static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001663{
Sathya Perla3abcded2010-10-03 22:12:27 -07001664 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001665 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001666 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001667 struct page *pagep = NULL;
1668 struct be_eth_rx_d *rxd;
1669 u64 page_dmaaddr = 0, frag_dmaaddr;
1670 u32 posted, page_offset = 0;
1671
Sathya Perla3abcded2010-10-03 22:12:27 -07001672 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001673 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1674 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001675 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001676 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001677 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001678 break;
1679 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001680 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1681 0, adapter->big_page_size,
1682 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001683 page_info->page_offset = 0;
1684 } else {
1685 get_page(pagep);
1686 page_info->page_offset = page_offset + rx_frag_size;
1687 }
1688 page_offset = page_info->page_offset;
1689 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001690 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001691 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1692
1693 rxd = queue_head_node(rxq);
1694 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1695 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001696
1697 /* Any space left in the current big page for another frag? */
1698 if ((page_offset + rx_frag_size + rx_frag_size) >
1699 adapter->big_page_size) {
1700 pagep = NULL;
1701 page_info->last_page_user = true;
1702 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001703
1704 prev_page_info = page_info;
1705 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001706 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001707 }
1708 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001709 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001710
1711 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001712 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001713 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001714 } else if (atomic_read(&rxq->used) == 0) {
1715 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001716 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001717 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001718}
1719
Sathya Perla5fb379e2009-06-18 00:02:59 +00001720static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001721{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1723
1724 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1725 return NULL;
1726
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001727 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001728 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1729
1730 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1731
1732 queue_tail_inc(tx_cq);
1733 return txcp;
1734}
1735
Sathya Perla3c8def92011-06-12 20:01:58 +00001736static u16 be_tx_compl_process(struct be_adapter *adapter,
1737 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001738{
Sathya Perla3c8def92011-06-12 20:01:58 +00001739 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001740 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001741 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001742 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001743 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1744 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001746 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001748 sent_skbs[txq->tail] = NULL;
1749
1750 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001751 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001753 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001754 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001755 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001756 unmap_tx_frag(&adapter->pdev->dev, wrb,
1757 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001758 unmap_skb_hdr = false;
1759
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001760 num_wrbs++;
1761 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001762 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001763
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001764 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001765 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766}
1767
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001768/* Return the number of events in the event queue */
1769static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001770{
1771 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001772 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001773
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001774 do {
1775 eqe = queue_tail_node(&eqo->q);
1776 if (eqe->evt == 0)
1777 break;
1778
1779 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001780 eqe->evt = 0;
1781 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001782 queue_tail_inc(&eqo->q);
1783 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001784
1785 return num;
1786}
1787
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001788/* Leaves the EQ is disarmed state */
1789static void be_eq_clean(struct be_eq_obj *eqo)
1790{
1791 int num = events_get(eqo);
1792
1793 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1794}
1795
1796static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001797{
1798 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001799 struct be_queue_info *rxq = &rxo->q;
1800 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001801 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001802 struct be_adapter *adapter = rxo->adapter;
1803 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001804 u16 tail;
1805
Sathya Perlad23e9462012-12-17 19:38:51 +00001806 /* Consume pending rx completions.
1807 * Wait for the flush completion (identified by zero num_rcvd)
1808 * to arrive. Notify CQ even when there are no more CQ entries
1809 * for HW to flush partially coalesced CQ entries.
1810 * In Lancer, there is no need to wait for flush compl.
1811 */
1812 for (;;) {
1813 rxcp = be_rx_compl_get(rxo);
1814 if (rxcp == NULL) {
1815 if (lancer_chip(adapter))
1816 break;
1817
1818 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1819 dev_warn(&adapter->pdev->dev,
1820 "did not receive flush compl\n");
1821 break;
1822 }
1823 be_cq_notify(adapter, rx_cq->id, true, 0);
1824 mdelay(1);
1825 } else {
1826 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001827 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001828 if (rxcp->num_rcvd == 0)
1829 break;
1830 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831 }
1832
Sathya Perlad23e9462012-12-17 19:38:51 +00001833 /* After cleanup, leave the CQ in unarmed state */
1834 be_cq_notify(adapter, rx_cq->id, false, 0);
1835
1836 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001838 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001839 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001840 put_page(page_info->page);
1841 memset(page_info, 0, sizeof(*page_info));
1842 }
1843 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001844 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001845}
1846
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001847static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001848{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001849 struct be_tx_obj *txo;
1850 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001851 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001852 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001853 struct sk_buff *sent_skb;
1854 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001855 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856
Sathya Perlaa8e91792009-08-10 03:42:43 +00001857 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1858 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001859 pending_txqs = adapter->num_tx_qs;
1860
1861 for_all_tx_queues(adapter, txo, i) {
1862 txq = &txo->q;
1863 while ((txcp = be_tx_compl_get(&txo->cq))) {
1864 end_idx =
1865 AMAP_GET_BITS(struct amap_eth_tx_compl,
1866 wrb_index, txcp);
1867 num_wrbs += be_tx_compl_process(adapter, txo,
1868 end_idx);
1869 cmpl++;
1870 }
1871 if (cmpl) {
1872 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1873 atomic_sub(num_wrbs, &txq->used);
1874 cmpl = 0;
1875 num_wrbs = 0;
1876 }
1877 if (atomic_read(&txq->used) == 0)
1878 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001879 }
1880
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001881 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001882 break;
1883
1884 mdelay(1);
1885 } while (true);
1886
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001887 for_all_tx_queues(adapter, txo, i) {
1888 txq = &txo->q;
1889 if (atomic_read(&txq->used))
1890 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1891 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001892
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001893 /* free posted tx for which compls will never arrive */
1894 while (atomic_read(&txq->used)) {
1895 sent_skb = txo->sent_skb_list[txq->tail];
1896 end_idx = txq->tail;
1897 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1898 &dummy_wrb);
1899 index_adv(&end_idx, num_wrbs - 1, txq->len);
1900 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1901 atomic_sub(num_wrbs, &txq->used);
1902 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001903 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904}
1905
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001906static void be_evt_queues_destroy(struct be_adapter *adapter)
1907{
1908 struct be_eq_obj *eqo;
1909 int i;
1910
1911 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001912 if (eqo->q.created) {
1913 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001914 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001915 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001916 be_queue_free(adapter, &eqo->q);
1917 }
1918}
1919
1920static int be_evt_queues_create(struct be_adapter *adapter)
1921{
1922 struct be_queue_info *eq;
1923 struct be_eq_obj *eqo;
1924 int i, rc;
1925
1926 adapter->num_evt_qs = num_irqs(adapter);
1927
1928 for_all_evt_queues(adapter, eqo, i) {
1929 eqo->adapter = adapter;
1930 eqo->tx_budget = BE_TX_BUDGET;
1931 eqo->idx = i;
1932 eqo->max_eqd = BE_MAX_EQD;
1933 eqo->enable_aic = true;
1934
1935 eq = &eqo->q;
1936 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1937 sizeof(struct be_eq_entry));
1938 if (rc)
1939 return rc;
1940
1941 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1942 if (rc)
1943 return rc;
1944 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001945 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001946}
1947
Sathya Perla5fb379e2009-06-18 00:02:59 +00001948static void be_mcc_queues_destroy(struct be_adapter *adapter)
1949{
1950 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001951
Sathya Perla8788fdc2009-07-27 22:52:03 +00001952 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001953 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001954 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001955 be_queue_free(adapter, q);
1956
Sathya Perla8788fdc2009-07-27 22:52:03 +00001957 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001958 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001959 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001960 be_queue_free(adapter, q);
1961}
1962
1963/* Must be called only after TX qs are created as MCC shares TX EQ */
1964static int be_mcc_queues_create(struct be_adapter *adapter)
1965{
1966 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001967
Sathya Perla8788fdc2009-07-27 22:52:03 +00001968 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001969 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001970 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001971 goto err;
1972
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001973 /* Use the default EQ for MCC completions */
1974 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001975 goto mcc_cq_free;
1976
Sathya Perla8788fdc2009-07-27 22:52:03 +00001977 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001978 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1979 goto mcc_cq_destroy;
1980
Sathya Perla8788fdc2009-07-27 22:52:03 +00001981 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001982 goto mcc_q_free;
1983
1984 return 0;
1985
1986mcc_q_free:
1987 be_queue_free(adapter, q);
1988mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001989 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001990mcc_cq_free:
1991 be_queue_free(adapter, cq);
1992err:
1993 return -1;
1994}
1995
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001996static void be_tx_queues_destroy(struct be_adapter *adapter)
1997{
1998 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001999 struct be_tx_obj *txo;
2000 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002001
Sathya Perla3c8def92011-06-12 20:01:58 +00002002 for_all_tx_queues(adapter, txo, i) {
2003 q = &txo->q;
2004 if (q->created)
2005 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2006 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002007
Sathya Perla3c8def92011-06-12 20:01:58 +00002008 q = &txo->cq;
2009 if (q->created)
2010 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2011 be_queue_free(adapter, q);
2012 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002013}
2014
Sathya Perladafc0fe2011-10-24 02:45:02 +00002015static int be_num_txqs_want(struct be_adapter *adapter)
2016{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002017 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2018 be_is_mc(adapter) ||
2019 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perlaca34fe32012-11-06 17:48:56 +00002020 BE2_chip(adapter))
Sathya Perladafc0fe2011-10-24 02:45:02 +00002021 return 1;
2022 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002023 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00002024}
2025
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002026static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002028 struct be_queue_info *cq, *eq;
2029 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00002030 struct be_tx_obj *txo;
2031 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002032
Sathya Perladafc0fe2011-10-24 02:45:02 +00002033 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00002034 if (adapter->num_tx_qs != MAX_TX_QS) {
2035 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00002036 netif_set_real_num_tx_queues(adapter->netdev,
2037 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00002038 rtnl_unlock();
2039 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00002040
Sathya Perla3c8def92011-06-12 20:01:58 +00002041 for_all_tx_queues(adapter, txo, i) {
2042 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002043 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2044 sizeof(struct be_eth_tx_compl));
2045 if (status)
2046 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002047
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002048 /* If num_evt_qs is less than num_tx_qs, then more than
2049 * one txq share an eq
2050 */
2051 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2052 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2053 if (status)
2054 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00002055 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002056 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002057}
2058
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002059static int be_tx_qs_create(struct be_adapter *adapter)
2060{
2061 struct be_tx_obj *txo;
2062 int i, status;
2063
2064 for_all_tx_queues(adapter, txo, i) {
2065 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2066 sizeof(struct be_eth_wrb));
2067 if (status)
2068 return status;
2069
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002070 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002071 if (status)
2072 return status;
2073 }
2074
Sathya Perlad3791422012-09-28 04:39:44 +00002075 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2076 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002077 return 0;
2078}
2079
2080static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002081{
2082 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002083 struct be_rx_obj *rxo;
2084 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002085
Sathya Perla3abcded2010-10-03 22:12:27 -07002086 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002087 q = &rxo->cq;
2088 if (q->created)
2089 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2090 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002091 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002092}
2093
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002094static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002095{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002096 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002097 struct be_rx_obj *rxo;
2098 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002099
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002100 /* We'll create as many RSS rings as there are irqs.
2101 * But when there's only one irq there's no use creating RSS rings
2102 */
2103 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2104 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00002105 if (adapter->num_rx_qs != MAX_RX_QS) {
2106 rtnl_lock();
2107 netif_set_real_num_rx_queues(adapter->netdev,
2108 adapter->num_rx_qs);
2109 rtnl_unlock();
2110 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002111
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002112 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002113 for_all_rx_queues(adapter, rxo, i) {
2114 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002115 cq = &rxo->cq;
2116 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2117 sizeof(struct be_eth_rx_compl));
2118 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002119 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002120
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002121 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2122 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002123 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002124 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002125 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002126
Sathya Perlad3791422012-09-28 04:39:44 +00002127 dev_info(&adapter->pdev->dev,
2128 "created %d RSS queue(s) and 1 default RX queue\n",
2129 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002130 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002131}
2132
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002133static irqreturn_t be_intx(int irq, void *dev)
2134{
Sathya Perlae49cc342012-11-27 19:50:02 +00002135 struct be_eq_obj *eqo = dev;
2136 struct be_adapter *adapter = eqo->adapter;
2137 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002138
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002139 /* IRQ is not expected when NAPI is scheduled as the EQ
2140 * will not be armed.
2141 * But, this can happen on Lancer INTx where it takes
2142 * a while to de-assert INTx or in BE2 where occasionaly
2143 * an interrupt may be raised even when EQ is unarmed.
2144 * If NAPI is already scheduled, then counting & notifying
2145 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002146 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002147 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002148 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002149 __napi_schedule(&eqo->napi);
2150 if (num_evts)
2151 eqo->spurious_intr = 0;
2152 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002153 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002154
2155 /* Return IRQ_HANDLED only for the the first spurious intr
2156 * after a valid intr to stop the kernel from branding
2157 * this irq as a bad one!
2158 */
2159 if (num_evts || eqo->spurious_intr++ == 0)
2160 return IRQ_HANDLED;
2161 else
2162 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002163}
2164
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002165static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002166{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002167 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002168
Sathya Perla0b545a62012-11-23 00:27:18 +00002169 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2170 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002171 return IRQ_HANDLED;
2172}
2173
Sathya Perla2e588f82011-03-11 02:49:26 +00002174static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002175{
Somnath Koture38b1702013-05-29 22:55:56 +00002176 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002177}
2178
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002179static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2180 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002181{
Sathya Perla3abcded2010-10-03 22:12:27 -07002182 struct be_adapter *adapter = rxo->adapter;
2183 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002184 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002185 u32 work_done;
2186
2187 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002188 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002189 if (!rxcp)
2190 break;
2191
Sathya Perla12004ae2011-08-02 19:57:46 +00002192 /* Is it a flush compl that has no data */
2193 if (unlikely(rxcp->num_rcvd == 0))
2194 goto loop_continue;
2195
2196 /* Discard compl with partial DMA Lancer B0 */
2197 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002198 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002199 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002200 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002201
Sathya Perla12004ae2011-08-02 19:57:46 +00002202 /* On BE drop pkts that arrive due to imperfect filtering in
2203 * promiscuous mode on some skews
2204 */
2205 if (unlikely(rxcp->port != adapter->port_num &&
2206 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002207 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002208 goto loop_continue;
2209 }
2210
2211 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002212 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002213 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002214 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002215loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002216 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002217 }
2218
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002219 if (work_done) {
2220 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002221
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002222 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2223 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002224 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002225
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002226 return work_done;
2227}
2228
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002229static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2230 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002231{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002232 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002233 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002235 for (work_done = 0; work_done < budget; work_done++) {
2236 txcp = be_tx_compl_get(&txo->cq);
2237 if (!txcp)
2238 break;
2239 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002240 AMAP_GET_BITS(struct amap_eth_tx_compl,
2241 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002242 }
2243
2244 if (work_done) {
2245 be_cq_notify(adapter, txo->cq.id, true, work_done);
2246 atomic_sub(num_wrbs, &txo->q.used);
2247
2248 /* As Tx wrbs have been freed up, wake up netdev queue
2249 * if it was stopped due to lack of tx wrbs. */
2250 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2251 atomic_read(&txo->q.used) < txo->q.len / 2) {
2252 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002253 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002254
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002255 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2256 tx_stats(txo)->tx_compl += work_done;
2257 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2258 }
2259 return (work_done < budget); /* Done */
2260}
Sathya Perla3c8def92011-06-12 20:01:58 +00002261
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002262int be_poll(struct napi_struct *napi, int budget)
2263{
2264 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2265 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002266 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002267 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002268
Sathya Perla0b545a62012-11-23 00:27:18 +00002269 num_evts = events_get(eqo);
2270
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002271 /* Process all TXQs serviced by this EQ */
2272 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2273 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2274 eqo->tx_budget, i);
2275 if (!tx_done)
2276 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277 }
2278
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002279 /* This loop will iterate twice for EQ0 in which
2280 * completions of the last RXQ (default one) are also processed
2281 * For other EQs the loop iterates only once
2282 */
2283 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2284 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2285 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002286 }
2287
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002288 if (is_mcc_eqo(eqo))
2289 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002290
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002291 if (max_work < budget) {
2292 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002293 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002294 } else {
2295 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002296 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002297 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002298 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002299}
2300
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002301void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002302{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002303 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2304 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002305 u32 i;
2306
Sathya Perlad23e9462012-12-17 19:38:51 +00002307 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002308 return;
2309
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002310 if (lancer_chip(adapter)) {
2311 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2312 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2313 sliport_err1 = ioread32(adapter->db +
2314 SLIPORT_ERROR1_OFFSET);
2315 sliport_err2 = ioread32(adapter->db +
2316 SLIPORT_ERROR2_OFFSET);
2317 }
2318 } else {
2319 pci_read_config_dword(adapter->pdev,
2320 PCICFG_UE_STATUS_LOW, &ue_lo);
2321 pci_read_config_dword(adapter->pdev,
2322 PCICFG_UE_STATUS_HIGH, &ue_hi);
2323 pci_read_config_dword(adapter->pdev,
2324 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2325 pci_read_config_dword(adapter->pdev,
2326 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002327
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002328 ue_lo = (ue_lo & ~ue_lo_mask);
2329 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002330 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002331
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002332 /* On certain platforms BE hardware can indicate spurious UEs.
2333 * Allow the h/w to stop working completely in case of a real UE.
2334 * Hence not setting the hw_error for UE detection.
2335 */
2336 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002337 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002338 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002339 "Error detected in the card\n");
2340 }
2341
2342 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2343 dev_err(&adapter->pdev->dev,
2344 "ERR: sliport status 0x%x\n", sliport_status);
2345 dev_err(&adapter->pdev->dev,
2346 "ERR: sliport error1 0x%x\n", sliport_err1);
2347 dev_err(&adapter->pdev->dev,
2348 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002349 }
2350
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002351 if (ue_lo) {
2352 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2353 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002354 dev_err(&adapter->pdev->dev,
2355 "UE: %s bit set\n", ue_status_low_desc[i]);
2356 }
2357 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002358
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002359 if (ue_hi) {
2360 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2361 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002362 dev_err(&adapter->pdev->dev,
2363 "UE: %s bit set\n", ue_status_hi_desc[i]);
2364 }
2365 }
2366
2367}
2368
Sathya Perla8d56ff12009-11-22 22:02:26 +00002369static void be_msix_disable(struct be_adapter *adapter)
2370{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002371 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002372 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002373 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002374 }
2375}
2376
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002377static uint be_num_rss_want(struct be_adapter *adapter)
2378{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002379 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002380
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002381 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002382 (lancer_chip(adapter) ||
2383 (!sriov_want(adapter) && be_physfn(adapter)))) {
2384 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002385 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2386 }
2387 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002388}
2389
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002390static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002391{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002392#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002393 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002394 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002395
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002396 /* If RSS queues are not used, need a vec for default RX Q */
2397 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002398 if (be_roce_supported(adapter)) {
2399 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2400 (num_online_cpus() + 1));
2401 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2402 num_vec += num_roce_vec;
2403 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2404 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002405 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002406
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002407 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002408 adapter->msix_entries[i].entry = i;
2409
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002410 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002411 if (status == 0) {
2412 goto done;
2413 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002414 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002415 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2416 num_vec);
2417 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002418 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002419 }
Sathya Perlad3791422012-09-28 04:39:44 +00002420
2421 dev_warn(dev, "MSIx enable failed\n");
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002422 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2423 if (!be_physfn(adapter))
2424 return status;
2425 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002426done:
Parav Pandit045508a2012-03-26 14:27:13 +00002427 if (be_roce_supported(adapter)) {
2428 if (num_vec > num_roce_vec) {
2429 adapter->num_msix_vec = num_vec - num_roce_vec;
2430 adapter->num_msix_roce_vec =
2431 num_vec - adapter->num_msix_vec;
2432 } else {
2433 adapter->num_msix_vec = num_vec;
2434 adapter->num_msix_roce_vec = 0;
2435 }
2436 } else
2437 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002438 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002439 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002440}
2441
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002442static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002443 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002444{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002445 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002446}
2447
2448static int be_msix_register(struct be_adapter *adapter)
2449{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002450 struct net_device *netdev = adapter->netdev;
2451 struct be_eq_obj *eqo;
2452 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002453
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002454 for_all_evt_queues(adapter, eqo, i) {
2455 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2456 vec = be_msix_vec_get(adapter, eqo);
2457 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002458 if (status)
2459 goto err_msix;
2460 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002461
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002462 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002463err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002464 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2465 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2466 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2467 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002468 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002469 return status;
2470}
2471
2472static int be_irq_register(struct be_adapter *adapter)
2473{
2474 struct net_device *netdev = adapter->netdev;
2475 int status;
2476
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002477 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002478 status = be_msix_register(adapter);
2479 if (status == 0)
2480 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002481 /* INTx is not supported for VF */
2482 if (!be_physfn(adapter))
2483 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002484 }
2485
Sathya Perlae49cc342012-11-27 19:50:02 +00002486 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002487 netdev->irq = adapter->pdev->irq;
2488 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002489 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002490 if (status) {
2491 dev_err(&adapter->pdev->dev,
2492 "INTx request IRQ failed - err %d\n", status);
2493 return status;
2494 }
2495done:
2496 adapter->isr_registered = true;
2497 return 0;
2498}
2499
2500static void be_irq_unregister(struct be_adapter *adapter)
2501{
2502 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002503 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002504 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002505
2506 if (!adapter->isr_registered)
2507 return;
2508
2509 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002510 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002511 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002512 goto done;
2513 }
2514
2515 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002516 for_all_evt_queues(adapter, eqo, i)
2517 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002518
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002519done:
2520 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002521}
2522
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002523static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002524{
2525 struct be_queue_info *q;
2526 struct be_rx_obj *rxo;
2527 int i;
2528
2529 for_all_rx_queues(adapter, rxo, i) {
2530 q = &rxo->q;
2531 if (q->created) {
2532 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002533 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002534 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002535 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002536 }
2537}
2538
Sathya Perla889cd4b2010-05-30 23:33:45 +00002539static int be_close(struct net_device *netdev)
2540{
2541 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002542 struct be_eq_obj *eqo;
2543 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002544
Parav Pandit045508a2012-03-26 14:27:13 +00002545 be_roce_dev_close(adapter);
2546
Somnath Kotur04d3d622013-05-02 03:36:55 +00002547 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2548 for_all_evt_queues(adapter, eqo, i)
2549 napi_disable(&eqo->napi);
2550 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2551 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002552
2553 be_async_mcc_disable(adapter);
2554
2555 /* Wait for all pending tx completions to arrive so that
2556 * all tx skbs are freed.
2557 */
2558 be_tx_compl_clean(adapter);
Sathya Perlafba87552013-05-08 02:05:50 +00002559 netif_tx_disable(netdev);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002560
2561 be_rx_qs_destroy(adapter);
2562
2563 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002564 if (msix_enabled(adapter))
2565 synchronize_irq(be_msix_vec_get(adapter, eqo));
2566 else
2567 synchronize_irq(netdev->irq);
2568 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002569 }
2570
Sathya Perla889cd4b2010-05-30 23:33:45 +00002571 be_irq_unregister(adapter);
2572
Sathya Perla482c9e72011-06-29 23:33:17 +00002573 return 0;
2574}
2575
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002576static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002577{
2578 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002579 int rc, i, j;
2580 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002581
2582 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002583 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2584 sizeof(struct be_eth_rx_d));
2585 if (rc)
2586 return rc;
2587 }
2588
2589 /* The FW would like the default RXQ to be created first */
2590 rxo = default_rxo(adapter);
2591 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2592 adapter->if_handle, false, &rxo->rss_id);
2593 if (rc)
2594 return rc;
2595
2596 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002597 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002598 rx_frag_size, adapter->if_handle,
2599 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002600 if (rc)
2601 return rc;
2602 }
2603
2604 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002605 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2606 for_all_rss_queues(adapter, rxo, i) {
2607 if ((j + i) >= 128)
2608 break;
2609 rsstable[j + i] = rxo->rss_id;
2610 }
2611 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002612 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2613 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2614
2615 if (!BEx_chip(adapter))
2616 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2617 RSS_ENABLE_UDP_IPV6;
2618
2619 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2620 128);
2621 if (rc) {
2622 adapter->rss_flags = 0;
Sathya Perla482c9e72011-06-29 23:33:17 +00002623 return rc;
Suresh Reddy594ad542013-04-25 23:03:20 +00002624 }
Sathya Perla482c9e72011-06-29 23:33:17 +00002625 }
2626
2627 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002628 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002629 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002630 return 0;
2631}
2632
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002633static int be_open(struct net_device *netdev)
2634{
2635 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002636 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002637 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002638 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002639 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002640 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002641
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002642 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002643 if (status)
2644 goto err;
2645
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002646 status = be_irq_register(adapter);
2647 if (status)
2648 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002649
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002650 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002651 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002652
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002653 for_all_tx_queues(adapter, txo, i)
2654 be_cq_notify(adapter, txo->cq.id, true, 0);
2655
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002656 be_async_mcc_enable(adapter);
2657
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002658 for_all_evt_queues(adapter, eqo, i) {
2659 napi_enable(&eqo->napi);
2660 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2661 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002662 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002663
Sathya Perla323ff712012-09-28 04:39:43 +00002664 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002665 if (!status)
2666 be_link_status_update(adapter, link_status);
2667
Sathya Perlafba87552013-05-08 02:05:50 +00002668 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002669 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002670 return 0;
2671err:
2672 be_close(adapter->netdev);
2673 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002674}
2675
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002676static int be_setup_wol(struct be_adapter *adapter, bool enable)
2677{
2678 struct be_dma_mem cmd;
2679 int status = 0;
2680 u8 mac[ETH_ALEN];
2681
2682 memset(mac, 0, ETH_ALEN);
2683
2684 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002685 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00002686 GFP_KERNEL | __GFP_ZERO);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002687 if (cmd.va == NULL)
2688 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002689
2690 if (enable) {
2691 status = pci_write_config_dword(adapter->pdev,
2692 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2693 if (status) {
2694 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002695 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002696 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2697 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002698 return status;
2699 }
2700 status = be_cmd_enable_magic_wol(adapter,
2701 adapter->netdev->dev_addr, &cmd);
2702 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2703 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2704 } else {
2705 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2706 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2707 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2708 }
2709
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002710 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002711 return status;
2712}
2713
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002714/*
2715 * Generate a seed MAC address from the PF MAC Address using jhash.
2716 * MAC Address for VFs are assigned incrementally starting from the seed.
2717 * These addresses are programmed in the ASIC by the PF and the VF driver
2718 * queries for the MAC address during its probe.
2719 */
Sathya Perla4c876612013-02-03 20:30:11 +00002720static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002721{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002722 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002723 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002724 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002725 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002726
2727 be_vf_eth_addr_generate(adapter, mac);
2728
Sathya Perla11ac75e2011-12-13 00:58:50 +00002729 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002730 if (lancer_chip(adapter)) {
2731 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2732 } else {
2733 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002734 vf_cfg->if_handle,
2735 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002736 }
2737
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002738 if (status)
2739 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002740 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002741 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002742 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002743
2744 mac[5] += 1;
2745 }
2746 return status;
2747}
2748
Sathya Perla4c876612013-02-03 20:30:11 +00002749static int be_vfs_mac_query(struct be_adapter *adapter)
2750{
2751 int status, vf;
2752 u8 mac[ETH_ALEN];
2753 struct be_vf_cfg *vf_cfg;
2754 bool active;
2755
2756 for_all_vfs(adapter, vf_cfg, vf) {
2757 be_cmd_get_mac_from_list(adapter, mac, &active,
2758 &vf_cfg->pmac_id, 0);
2759
2760 status = be_cmd_mac_addr_query(adapter, mac, false,
2761 vf_cfg->if_handle, 0);
2762 if (status)
2763 return status;
2764 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2765 }
2766 return 0;
2767}
2768
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002769static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002770{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002771 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002772 u32 vf;
2773
Sathya Perla39f1d942012-05-08 19:41:24 +00002774 if (be_find_vfs(adapter, ASSIGNED)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002775 dev_warn(&adapter->pdev->dev,
2776 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002777 goto done;
2778 }
2779
Sathya Perlab4c1df92013-05-08 02:05:47 +00002780 pci_disable_sriov(adapter->pdev);
2781
Sathya Perla11ac75e2011-12-13 00:58:50 +00002782 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002783 if (lancer_chip(adapter))
2784 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2785 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002786 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2787 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002788
Sathya Perla11ac75e2011-12-13 00:58:50 +00002789 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2790 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002791done:
2792 kfree(adapter->vf_cfg);
2793 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002794}
2795
Sathya Perlaa54769f2011-10-24 02:45:00 +00002796static int be_clear(struct be_adapter *adapter)
2797{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002798 int i = 1;
2799
Sathya Perla191eb752012-02-23 18:50:13 +00002800 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2801 cancel_delayed_work_sync(&adapter->work);
2802 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2803 }
2804
Sathya Perla11ac75e2011-12-13 00:58:50 +00002805 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002806 be_vf_clear(adapter);
2807
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002808 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2809 be_cmd_pmac_del(adapter, adapter->if_handle,
2810 adapter->pmac_id[i], 0);
2811
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002812 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002813
2814 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002815 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002816 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002817 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002818
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002819 kfree(adapter->pmac_id);
2820 adapter->pmac_id = NULL;
2821
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002822 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002823 return 0;
2824}
2825
Sathya Perla4c876612013-02-03 20:30:11 +00002826static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002827{
Sathya Perla4c876612013-02-03 20:30:11 +00002828 struct be_vf_cfg *vf_cfg;
2829 u32 cap_flags, en_flags, vf;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002830 int status;
2831
Sathya Perla4c876612013-02-03 20:30:11 +00002832 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2833 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002834
Sathya Perla4c876612013-02-03 20:30:11 +00002835 for_all_vfs(adapter, vf_cfg, vf) {
2836 if (!BE3_chip(adapter))
Vasundhara Volama05f99d2013-04-21 23:28:17 +00002837 be_cmd_get_profile_config(adapter, &cap_flags,
2838 NULL, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00002839
2840 /* If a FW profile exists, then cap_flags are updated */
2841 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2842 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2843 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2844 &vf_cfg->if_handle, vf + 1);
2845 if (status)
2846 goto err;
2847 }
2848err:
2849 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002850}
2851
Sathya Perla39f1d942012-05-08 19:41:24 +00002852static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002853{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002854 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002855 int vf;
2856
Sathya Perla39f1d942012-05-08 19:41:24 +00002857 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2858 GFP_KERNEL);
2859 if (!adapter->vf_cfg)
2860 return -ENOMEM;
2861
Sathya Perla11ac75e2011-12-13 00:58:50 +00002862 for_all_vfs(adapter, vf_cfg, vf) {
2863 vf_cfg->if_handle = -1;
2864 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002865 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002866 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002867}
2868
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002869static int be_vf_setup(struct be_adapter *adapter)
2870{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002871 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002872 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002873 int status, old_vfs, vf;
2874 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002875
Sathya Perla4c876612013-02-03 20:30:11 +00002876 old_vfs = be_find_vfs(adapter, ENABLED);
2877 if (old_vfs) {
2878 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2879 if (old_vfs != num_vfs)
2880 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2881 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002882 } else {
Sathya Perla4c876612013-02-03 20:30:11 +00002883 if (num_vfs > adapter->dev_num_vfs)
2884 dev_info(dev, "Device supports %d VFs and not %d\n",
2885 adapter->dev_num_vfs, num_vfs);
2886 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
Sathya Perlab4c1df92013-05-08 02:05:47 +00002887 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00002888 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00002889 }
2890
2891 status = be_vf_setup_init(adapter);
2892 if (status)
2893 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002894
Sathya Perla4c876612013-02-03 20:30:11 +00002895 if (old_vfs) {
2896 for_all_vfs(adapter, vf_cfg, vf) {
2897 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2898 if (status)
2899 goto err;
2900 }
2901 } else {
2902 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002903 if (status)
2904 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002905 }
2906
Sathya Perla4c876612013-02-03 20:30:11 +00002907 if (old_vfs) {
2908 status = be_vfs_mac_query(adapter);
2909 if (status)
2910 goto err;
2911 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002912 status = be_vf_eth_addr_config(adapter);
2913 if (status)
2914 goto err;
2915 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002916
Sathya Perla11ac75e2011-12-13 00:58:50 +00002917 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla4c876612013-02-03 20:30:11 +00002918 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2919 * Allow full available bandwidth
2920 */
2921 if (BE3_chip(adapter) && !old_vfs)
2922 be_cmd_set_qos(adapter, 1000, vf+1);
2923
2924 status = be_cmd_link_status_query(adapter, &lnk_speed,
2925 NULL, vf + 1);
2926 if (!status)
2927 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002928
2929 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Sathya Perla4c876612013-02-03 20:30:11 +00002930 vf + 1, vf_cfg->if_handle);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002931 if (status)
2932 goto err;
2933 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002934
2935 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002936 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00002937
2938 if (!old_vfs) {
2939 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2940 if (status) {
2941 dev_err(dev, "SRIOV enable failed\n");
2942 adapter->num_vfs = 0;
2943 goto err;
2944 }
2945 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002946 return 0;
2947err:
Sathya Perla4c876612013-02-03 20:30:11 +00002948 dev_err(dev, "VF setup failed\n");
2949 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002950 return status;
2951}
2952
Sathya Perla30128032011-11-10 19:17:57 +00002953static void be_setup_init(struct be_adapter *adapter)
2954{
2955 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002956 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002957 adapter->if_handle = -1;
2958 adapter->be3_native = false;
2959 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002960 if (be_physfn(adapter))
2961 adapter->cmd_privileges = MAX_PRIVILEGES;
2962 else
2963 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002964}
2965
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002966static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2967 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002968{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002969 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002970
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002971 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2972 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2973 if (!lancer_chip(adapter) && !be_physfn(adapter))
2974 *active_mac = true;
2975 else
2976 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002977
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002978 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002979 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002980
2981 if (lancer_chip(adapter)) {
2982 status = be_cmd_get_mac_from_list(adapter, mac,
2983 active_mac, pmac_id, 0);
2984 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002985 status = be_cmd_mac_addr_query(adapter, mac, false,
2986 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002987 }
2988 } else if (be_physfn(adapter)) {
2989 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002990 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002991 *active_mac = false;
2992 } else {
2993 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002994 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002995 if_handle, 0);
2996 *active_mac = true;
2997 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002998 return status;
2999}
3000
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003001static void be_get_resources(struct be_adapter *adapter)
3002{
Sathya Perla4c876612013-02-03 20:30:11 +00003003 u16 dev_num_vfs;
3004 int pos, status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003005 bool profile_present = false;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003006 u16 txq_count = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003007
Sathya Perla4c876612013-02-03 20:30:11 +00003008 if (!BEx_chip(adapter)) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003009 status = be_cmd_get_func_config(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003010 if (!status)
3011 profile_present = true;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003012 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3013 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003014 }
3015
3016 if (profile_present) {
3017 /* Sanity fixes for Lancer */
3018 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3019 BE_UC_PMAC_COUNT);
3020 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3021 BE_NUM_VLANS_SUPPORTED);
3022 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3023 BE_MAX_MC);
3024 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3025 MAX_TX_QS);
3026 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3027 BE3_MAX_RSS_QS);
3028 adapter->max_event_queues = min_t(u16,
3029 adapter->max_event_queues,
3030 BE3_MAX_RSS_QS);
3031
3032 if (adapter->max_rss_queues &&
3033 adapter->max_rss_queues == adapter->max_rx_queues)
3034 adapter->max_rss_queues -= 1;
3035
3036 if (adapter->max_event_queues < adapter->max_rss_queues)
3037 adapter->max_rss_queues = adapter->max_event_queues;
3038
3039 } else {
3040 if (be_physfn(adapter))
3041 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3042 else
3043 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3044
3045 if (adapter->function_mode & FLEX10_MODE)
3046 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3047 else
3048 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3049
3050 adapter->max_mcast_mac = BE_MAX_MC;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003051 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3052 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3053 MAX_TX_QS);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003054 adapter->max_rss_queues = (adapter->be3_native) ?
3055 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3056 adapter->max_event_queues = BE3_MAX_RSS_QS;
3057
3058 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3059 BE_IF_FLAGS_BROADCAST |
3060 BE_IF_FLAGS_MULTICAST |
3061 BE_IF_FLAGS_PASS_L3L4_ERRORS |
3062 BE_IF_FLAGS_MCAST_PROMISCUOUS |
3063 BE_IF_FLAGS_VLAN_PROMISCUOUS |
3064 BE_IF_FLAGS_PROMISCUOUS;
3065
3066 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3067 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3068 }
Sathya Perla4c876612013-02-03 20:30:11 +00003069
3070 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3071 if (pos) {
3072 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3073 &dev_num_vfs);
3074 if (BE3_chip(adapter))
3075 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3076 adapter->dev_num_vfs = dev_num_vfs;
3077 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003078}
3079
Sathya Perla39f1d942012-05-08 19:41:24 +00003080/* Routine to query per function resource limits */
3081static int be_get_config(struct be_adapter *adapter)
3082{
Sathya Perla4c876612013-02-03 20:30:11 +00003083 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003084
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003085 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3086 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003087 &adapter->function_caps,
3088 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003089 if (status)
3090 goto err;
3091
3092 be_get_resources(adapter);
3093
3094 /* primary mac needs 1 pmac entry */
3095 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3096 sizeof(u32), GFP_KERNEL);
3097 if (!adapter->pmac_id) {
3098 status = -ENOMEM;
3099 goto err;
3100 }
3101
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003102err:
3103 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003104}
3105
Sathya Perla5fb379e2009-06-18 00:02:59 +00003106static int be_setup(struct be_adapter *adapter)
3107{
Sathya Perla39f1d942012-05-08 19:41:24 +00003108 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003109 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003110 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003111 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003112 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003113 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003114
Sathya Perla30128032011-11-10 19:17:57 +00003115 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003116
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003117 if (!lancer_chip(adapter))
3118 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003119
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003120 status = be_get_config(adapter);
3121 if (status)
3122 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003123
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003124 status = be_msix_enable(adapter);
3125 if (status)
3126 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003127
3128 status = be_evt_queues_create(adapter);
3129 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003130 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003131
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003132 status = be_tx_cqs_create(adapter);
3133 if (status)
3134 goto err;
3135
3136 status = be_rx_cqs_create(adapter);
3137 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003138 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003139
Sathya Perla5fb379e2009-06-18 00:02:59 +00003140 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003141 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003142 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003143
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003144 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3145 /* In UMC mode FW does not return right privileges.
3146 * Override with correct privilege equivalent to PF.
3147 */
3148 if (be_is_mc(adapter))
3149 adapter->cmd_privileges = MAX_PRIVILEGES;
3150
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003151 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3152 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00003153
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003154 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003155 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003156
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003157 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00003158
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003159 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003160 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003161 if (status != 0)
3162 goto err;
3163
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003164 memset(mac, 0, ETH_ALEN);
3165 active_mac = false;
3166 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3167 &active_mac, &adapter->pmac_id[0]);
3168 if (status != 0)
3169 goto err;
3170
3171 if (!active_mac) {
3172 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3173 &adapter->pmac_id[0], 0);
3174 if (status != 0)
3175 goto err;
3176 }
3177
3178 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3179 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3180 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003181 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00003182
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003183 status = be_tx_qs_create(adapter);
3184 if (status)
3185 goto err;
3186
Sathya Perla04b71172011-09-27 13:30:27 -04003187 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003188
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003189 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003190 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003191
3192 be_set_rx_mode(adapter->netdev);
3193
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003194 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003195
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003196 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3197 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003198 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003199
Sathya Perlab4c1df92013-05-08 02:05:47 +00003200 if (be_physfn(adapter)) {
Sathya Perla39f1d942012-05-08 19:41:24 +00003201 if (adapter->dev_num_vfs)
3202 be_vf_setup(adapter);
3203 else
3204 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003205 }
3206
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003207 status = be_cmd_get_phy_info(adapter);
3208 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003209 adapter->phy.fc_autoneg = 1;
3210
Sathya Perla191eb752012-02-23 18:50:13 +00003211 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3212 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003213 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003214err:
3215 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003216 return status;
3217}
3218
Ivan Vecera66268732011-12-08 01:31:21 +00003219#ifdef CONFIG_NET_POLL_CONTROLLER
3220static void be_netpoll(struct net_device *netdev)
3221{
3222 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003223 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003224 int i;
3225
Sathya Perlae49cc342012-11-27 19:50:02 +00003226 for_all_evt_queues(adapter, eqo, i) {
3227 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3228 napi_schedule(&eqo->napi);
3229 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003230
3231 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003232}
3233#endif
3234
Ajit Khaparde84517482009-09-04 03:12:16 +00003235#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003236char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3237
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003238static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003239 const u8 *p, u32 img_start, int image_size,
3240 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003241{
3242 u32 crc_offset;
3243 u8 flashed_crc[4];
3244 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003245
3246 crc_offset = hdr_size + img_start + image_size - 4;
3247
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003248 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003249
3250 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003251 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003252 if (status) {
3253 dev_err(&adapter->pdev->dev,
3254 "could not get crc from flash, not flashing redboot\n");
3255 return false;
3256 }
3257
3258 /*update redboot only if crc does not match*/
3259 if (!memcmp(flashed_crc, p, 4))
3260 return false;
3261 else
3262 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003263}
3264
Sathya Perla306f1342011-08-02 19:57:45 +00003265static bool phy_flashing_required(struct be_adapter *adapter)
3266{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003267 return (adapter->phy.phy_type == TN_8022 &&
3268 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003269}
3270
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003271static bool is_comp_in_ufi(struct be_adapter *adapter,
3272 struct flash_section_info *fsec, int type)
3273{
3274 int i = 0, img_type = 0;
3275 struct flash_section_info_g2 *fsec_g2 = NULL;
3276
Sathya Perlaca34fe32012-11-06 17:48:56 +00003277 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003278 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3279
3280 for (i = 0; i < MAX_FLASH_COMP; i++) {
3281 if (fsec_g2)
3282 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3283 else
3284 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3285
3286 if (img_type == type)
3287 return true;
3288 }
3289 return false;
3290
3291}
3292
3293struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3294 int header_size,
3295 const struct firmware *fw)
3296{
3297 struct flash_section_info *fsec = NULL;
3298 const u8 *p = fw->data;
3299
3300 p += header_size;
3301 while (p < (fw->data + fw->size)) {
3302 fsec = (struct flash_section_info *)p;
3303 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3304 return fsec;
3305 p += 32;
3306 }
3307 return NULL;
3308}
3309
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003310static int be_flash(struct be_adapter *adapter, const u8 *img,
3311 struct be_dma_mem *flash_cmd, int optype, int img_size)
3312{
3313 u32 total_bytes = 0, flash_op, num_bytes = 0;
3314 int status = 0;
3315 struct be_cmd_write_flashrom *req = flash_cmd->va;
3316
3317 total_bytes = img_size;
3318 while (total_bytes) {
3319 num_bytes = min_t(u32, 32*1024, total_bytes);
3320
3321 total_bytes -= num_bytes;
3322
3323 if (!total_bytes) {
3324 if (optype == OPTYPE_PHY_FW)
3325 flash_op = FLASHROM_OPER_PHY_FLASH;
3326 else
3327 flash_op = FLASHROM_OPER_FLASH;
3328 } else {
3329 if (optype == OPTYPE_PHY_FW)
3330 flash_op = FLASHROM_OPER_PHY_SAVE;
3331 else
3332 flash_op = FLASHROM_OPER_SAVE;
3333 }
3334
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003335 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003336 img += num_bytes;
3337 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3338 flash_op, num_bytes);
3339 if (status) {
3340 if (status == ILLEGAL_IOCTL_REQ &&
3341 optype == OPTYPE_PHY_FW)
3342 break;
3343 dev_err(&adapter->pdev->dev,
3344 "cmd to write to flash rom failed.\n");
3345 return status;
3346 }
3347 }
3348 return 0;
3349}
3350
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003351/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003352static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003353 const struct firmware *fw,
3354 struct be_dma_mem *flash_cmd,
3355 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003356
Ajit Khaparde84517482009-09-04 03:12:16 +00003357{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003358 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003359 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003360 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003361 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003362 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003363 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003364
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003365 struct flash_comp gen3_flash_types[] = {
3366 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3367 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3368 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3369 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3370 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3371 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3372 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3373 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3374 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3375 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3376 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3377 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3378 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3379 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3380 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3381 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3382 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3383 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3384 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3385 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003386 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003387
3388 struct flash_comp gen2_flash_types[] = {
3389 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3390 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3391 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3392 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3393 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3394 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3395 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3396 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3397 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3398 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3399 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3400 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3401 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3402 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3403 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3404 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003405 };
3406
Sathya Perlaca34fe32012-11-06 17:48:56 +00003407 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003408 pflashcomp = gen3_flash_types;
3409 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003410 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003411 } else {
3412 pflashcomp = gen2_flash_types;
3413 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003414 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003415 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003416
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003417 /* Get flash section info*/
3418 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3419 if (!fsec) {
3420 dev_err(&adapter->pdev->dev,
3421 "Invalid Cookie. UFI corrupted ?\n");
3422 return -1;
3423 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003424 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003425 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003426 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003427
3428 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3429 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3430 continue;
3431
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003432 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3433 !phy_flashing_required(adapter))
3434 continue;
3435
3436 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3437 redboot = be_flash_redboot(adapter, fw->data,
3438 pflashcomp[i].offset, pflashcomp[i].size,
3439 filehdr_size + img_hdrs_size);
3440 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003441 continue;
3442 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003443
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003444 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003445 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003446 if (p + pflashcomp[i].size > fw->data + fw->size)
3447 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003448
3449 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3450 pflashcomp[i].size);
3451 if (status) {
3452 dev_err(&adapter->pdev->dev,
3453 "Flashing section type %d failed.\n",
3454 pflashcomp[i].img_type);
3455 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003456 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003457 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003458 return 0;
3459}
3460
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003461static int be_flash_skyhawk(struct be_adapter *adapter,
3462 const struct firmware *fw,
3463 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003464{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003465 int status = 0, i, filehdr_size = 0;
3466 int img_offset, img_size, img_optype, redboot;
3467 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3468 const u8 *p = fw->data;
3469 struct flash_section_info *fsec = NULL;
3470
3471 filehdr_size = sizeof(struct flash_file_hdr_g3);
3472 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3473 if (!fsec) {
3474 dev_err(&adapter->pdev->dev,
3475 "Invalid Cookie. UFI corrupted ?\n");
3476 return -1;
3477 }
3478
3479 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3480 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3481 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3482
3483 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3484 case IMAGE_FIRMWARE_iSCSI:
3485 img_optype = OPTYPE_ISCSI_ACTIVE;
3486 break;
3487 case IMAGE_BOOT_CODE:
3488 img_optype = OPTYPE_REDBOOT;
3489 break;
3490 case IMAGE_OPTION_ROM_ISCSI:
3491 img_optype = OPTYPE_BIOS;
3492 break;
3493 case IMAGE_OPTION_ROM_PXE:
3494 img_optype = OPTYPE_PXE_BIOS;
3495 break;
3496 case IMAGE_OPTION_ROM_FCoE:
3497 img_optype = OPTYPE_FCOE_BIOS;
3498 break;
3499 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3500 img_optype = OPTYPE_ISCSI_BACKUP;
3501 break;
3502 case IMAGE_NCSI:
3503 img_optype = OPTYPE_NCSI_FW;
3504 break;
3505 default:
3506 continue;
3507 }
3508
3509 if (img_optype == OPTYPE_REDBOOT) {
3510 redboot = be_flash_redboot(adapter, fw->data,
3511 img_offset, img_size,
3512 filehdr_size + img_hdrs_size);
3513 if (!redboot)
3514 continue;
3515 }
3516
3517 p = fw->data;
3518 p += filehdr_size + img_offset + img_hdrs_size;
3519 if (p + img_size > fw->data + fw->size)
3520 return -1;
3521
3522 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3523 if (status) {
3524 dev_err(&adapter->pdev->dev,
3525 "Flashing section type %d failed.\n",
3526 fsec->fsec_entry[i].type);
3527 return status;
3528 }
3529 }
3530 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003531}
3532
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003533static int lancer_wait_idle(struct be_adapter *adapter)
3534{
3535#define SLIPORT_IDLE_TIMEOUT 30
3536 u32 reg_val;
3537 int status = 0, i;
3538
3539 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3540 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3541 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3542 break;
3543
3544 ssleep(1);
3545 }
3546
3547 if (i == SLIPORT_IDLE_TIMEOUT)
3548 status = -1;
3549
3550 return status;
3551}
3552
3553static int lancer_fw_reset(struct be_adapter *adapter)
3554{
3555 int status = 0;
3556
3557 status = lancer_wait_idle(adapter);
3558 if (status)
3559 return status;
3560
3561 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3562 PHYSDEV_CONTROL_OFFSET);
3563
3564 return status;
3565}
3566
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003567static int lancer_fw_download(struct be_adapter *adapter,
3568 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003569{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003570#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3571#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3572 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003573 const u8 *data_ptr = NULL;
3574 u8 *dest_image_ptr = NULL;
3575 size_t image_size = 0;
3576 u32 chunk_size = 0;
3577 u32 data_written = 0;
3578 u32 offset = 0;
3579 int status = 0;
3580 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003581 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003582
3583 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3584 dev_err(&adapter->pdev->dev,
3585 "FW Image not properly aligned. "
3586 "Length must be 4 byte aligned.\n");
3587 status = -EINVAL;
3588 goto lancer_fw_exit;
3589 }
3590
3591 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3592 + LANCER_FW_DOWNLOAD_CHUNK;
3593 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003594 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003595 if (!flash_cmd.va) {
3596 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003597 goto lancer_fw_exit;
3598 }
3599
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003600 dest_image_ptr = flash_cmd.va +
3601 sizeof(struct lancer_cmd_req_write_object);
3602 image_size = fw->size;
3603 data_ptr = fw->data;
3604
3605 while (image_size) {
3606 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3607
3608 /* Copy the image chunk content. */
3609 memcpy(dest_image_ptr, data_ptr, chunk_size);
3610
3611 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003612 chunk_size, offset,
3613 LANCER_FW_DOWNLOAD_LOCATION,
3614 &data_written, &change_status,
3615 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003616 if (status)
3617 break;
3618
3619 offset += data_written;
3620 data_ptr += data_written;
3621 image_size -= data_written;
3622 }
3623
3624 if (!status) {
3625 /* Commit the FW written */
3626 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003627 0, offset,
3628 LANCER_FW_DOWNLOAD_LOCATION,
3629 &data_written, &change_status,
3630 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003631 }
3632
3633 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3634 flash_cmd.dma);
3635 if (status) {
3636 dev_err(&adapter->pdev->dev,
3637 "Firmware load error. "
3638 "Status code: 0x%x Additional Status: 0x%x\n",
3639 status, add_status);
3640 goto lancer_fw_exit;
3641 }
3642
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003643 if (change_status == LANCER_FW_RESET_NEEDED) {
3644 status = lancer_fw_reset(adapter);
3645 if (status) {
3646 dev_err(&adapter->pdev->dev,
3647 "Adapter busy for FW reset.\n"
3648 "New FW will not be active.\n");
3649 goto lancer_fw_exit;
3650 }
3651 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3652 dev_err(&adapter->pdev->dev,
3653 "System reboot required for new FW"
3654 " to be active\n");
3655 }
3656
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003657 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3658lancer_fw_exit:
3659 return status;
3660}
3661
Sathya Perlaca34fe32012-11-06 17:48:56 +00003662#define UFI_TYPE2 2
3663#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003664#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003665#define UFI_TYPE4 4
3666static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003667 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003668{
3669 if (fhdr == NULL)
3670 goto be_get_ufi_exit;
3671
Sathya Perlaca34fe32012-11-06 17:48:56 +00003672 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3673 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003674 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3675 if (fhdr->asic_type_rev == 0x10)
3676 return UFI_TYPE3R;
3677 else
3678 return UFI_TYPE3;
3679 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003680 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003681
3682be_get_ufi_exit:
3683 dev_err(&adapter->pdev->dev,
3684 "UFI and Interface are not compatible for flashing\n");
3685 return -1;
3686}
3687
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003688static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3689{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003690 struct flash_file_hdr_g3 *fhdr3;
3691 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003692 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003693 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003694 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003695
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003696 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003697 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3698 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003699 if (!flash_cmd.va) {
3700 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003701 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003702 }
3703
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003704 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003705 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003706
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003707 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003708
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003709 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3710 for (i = 0; i < num_imgs; i++) {
3711 img_hdr_ptr = (struct image_hdr *)(fw->data +
3712 (sizeof(struct flash_file_hdr_g3) +
3713 i * sizeof(struct image_hdr)));
3714 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003715 switch (ufi_type) {
3716 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003717 status = be_flash_skyhawk(adapter, fw,
3718 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003719 break;
3720 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003721 status = be_flash_BEx(adapter, fw, &flash_cmd,
3722 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003723 break;
3724 case UFI_TYPE3:
3725 /* Do not flash this ufi on BE3-R cards */
3726 if (adapter->asic_rev < 0x10)
3727 status = be_flash_BEx(adapter, fw,
3728 &flash_cmd,
3729 num_imgs);
3730 else {
3731 status = -1;
3732 dev_err(&adapter->pdev->dev,
3733 "Can't load BE3 UFI on BE3R\n");
3734 }
3735 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003736 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003737 }
3738
Sathya Perlaca34fe32012-11-06 17:48:56 +00003739 if (ufi_type == UFI_TYPE2)
3740 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003741 else if (ufi_type == -1)
3742 status = -1;
3743
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003744 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3745 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003746 if (status) {
3747 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003748 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003749 }
3750
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003751 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003752
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003753be_fw_exit:
3754 return status;
3755}
3756
3757int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3758{
3759 const struct firmware *fw;
3760 int status;
3761
3762 if (!netif_running(adapter->netdev)) {
3763 dev_err(&adapter->pdev->dev,
3764 "Firmware load not allowed (interface is down)\n");
3765 return -1;
3766 }
3767
3768 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3769 if (status)
3770 goto fw_exit;
3771
3772 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3773
3774 if (lancer_chip(adapter))
3775 status = lancer_fw_download(adapter, fw);
3776 else
3777 status = be_fw_download(adapter, fw);
3778
Ajit Khaparde84517482009-09-04 03:12:16 +00003779fw_exit:
3780 release_firmware(fw);
3781 return status;
3782}
3783
stephen hemmingere5686ad2012-01-05 19:10:25 +00003784static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003785 .ndo_open = be_open,
3786 .ndo_stop = be_close,
3787 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003788 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003789 .ndo_set_mac_address = be_mac_addr_set,
3790 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003791 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003792 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003793 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3794 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003795 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003796 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003797 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003798 .ndo_get_vf_config = be_get_vf_config,
3799#ifdef CONFIG_NET_POLL_CONTROLLER
3800 .ndo_poll_controller = be_netpoll,
3801#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003802};
3803
3804static void be_netdev_init(struct net_device *netdev)
3805{
3806 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003807 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003808 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003809
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003810 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003811 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00003812 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003813 if (be_multi_rxq(adapter))
3814 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003815
3816 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00003817 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003818
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003819 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003820 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003821
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003822 netdev->priv_flags |= IFF_UNICAST_FLT;
3823
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003824 netdev->flags |= IFF_MULTICAST;
3825
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003826 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003827
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003828 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003829
3830 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3831
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003832 for_all_evt_queues(adapter, eqo, i)
3833 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003834}
3835
3836static void be_unmap_pci_bars(struct be_adapter *adapter)
3837{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003838 if (adapter->csr)
3839 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003840 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00003841 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003842}
3843
Sathya Perlace66f782012-11-06 17:48:58 +00003844static int db_bar(struct be_adapter *adapter)
3845{
3846 if (lancer_chip(adapter) || !be_physfn(adapter))
3847 return 0;
3848 else
3849 return 4;
3850}
3851
3852static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00003853{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00003854 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00003855 adapter->roce_db.size = 4096;
3856 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3857 db_bar(adapter));
3858 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3859 db_bar(adapter));
3860 }
Parav Pandit045508a2012-03-26 14:27:13 +00003861 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003862}
3863
3864static int be_map_pci_bars(struct be_adapter *adapter)
3865{
3866 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003867 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003868
Sathya Perlace66f782012-11-06 17:48:58 +00003869 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3870 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3871 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003872
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003873 if (BEx_chip(adapter) && be_physfn(adapter)) {
3874 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3875 if (adapter->csr == NULL)
3876 return -ENOMEM;
3877 }
3878
Sathya Perlace66f782012-11-06 17:48:58 +00003879 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003880 if (addr == NULL)
3881 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003882 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003883
3884 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003885 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00003886
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003887pci_map_err:
3888 be_unmap_pci_bars(adapter);
3889 return -ENOMEM;
3890}
3891
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003892static void be_ctrl_cleanup(struct be_adapter *adapter)
3893{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003894 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003895
3896 be_unmap_pci_bars(adapter);
3897
3898 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003899 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3900 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003901
Sathya Perla5b8821b2011-08-02 19:57:44 +00003902 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003903 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003904 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3905 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003906}
3907
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003908static int be_ctrl_init(struct be_adapter *adapter)
3909{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003910 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3911 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003912 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00003913 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003914 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003915
Sathya Perlace66f782012-11-06 17:48:58 +00003916 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3917 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3918 SLI_INTF_FAMILY_SHIFT;
3919 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3920
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003921 status = be_map_pci_bars(adapter);
3922 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003923 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003924
3925 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003926 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3927 mbox_mem_alloc->size,
3928 &mbox_mem_alloc->dma,
3929 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003930 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003931 status = -ENOMEM;
3932 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003933 }
3934 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3935 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3936 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3937 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003938
Sathya Perla5b8821b2011-08-02 19:57:44 +00003939 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3940 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
Joe Perches1f9061d22013-03-15 07:23:58 +00003941 &rx_filter->dma,
3942 GFP_KERNEL | __GFP_ZERO);
Sathya Perla5b8821b2011-08-02 19:57:44 +00003943 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003944 status = -ENOMEM;
3945 goto free_mbox;
3946 }
Joe Perches1f9061d22013-03-15 07:23:58 +00003947
Ivan Vecera29849612010-12-14 05:43:19 +00003948 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003949 spin_lock_init(&adapter->mcc_lock);
3950 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003951
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003952 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003953 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003954 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003955
3956free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003957 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3958 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003959
3960unmap_pci_bars:
3961 be_unmap_pci_bars(adapter);
3962
3963done:
3964 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003965}
3966
3967static void be_stats_cleanup(struct be_adapter *adapter)
3968{
Sathya Perla3abcded2010-10-03 22:12:27 -07003969 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003970
3971 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003972 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3973 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003974}
3975
3976static int be_stats_init(struct be_adapter *adapter)
3977{
Sathya Perla3abcded2010-10-03 22:12:27 -07003978 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003979
Sathya Perlaca34fe32012-11-06 17:48:56 +00003980 if (lancer_chip(adapter))
3981 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3982 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003983 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe32012-11-06 17:48:56 +00003984 else
3985 /* BE3 and Skyhawk */
3986 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3987
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003988 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00003989 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003990 if (cmd->va == NULL)
3991 return -1;
3992 return 0;
3993}
3994
Bill Pemberton3bc6b062012-12-03 09:23:09 -05003995static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003996{
3997 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003998
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003999 if (!adapter)
4000 return;
4001
Parav Pandit045508a2012-03-26 14:27:13 +00004002 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004003 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004004
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004005 cancel_delayed_work_sync(&adapter->func_recovery_work);
4006
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004007 unregister_netdev(adapter->netdev);
4008
Sathya Perla5fb379e2009-06-18 00:02:59 +00004009 be_clear(adapter);
4010
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004011 /* tell fw we're done with firing cmds */
4012 be_cmd_fw_clean(adapter);
4013
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004014 be_stats_cleanup(adapter);
4015
4016 be_ctrl_cleanup(adapter);
4017
Sathya Perlad6b6d982012-09-05 01:56:48 +00004018 pci_disable_pcie_error_reporting(pdev);
4019
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004020 pci_set_drvdata(pdev, NULL);
4021 pci_release_regions(pdev);
4022 pci_disable_device(pdev);
4023
4024 free_netdev(adapter->netdev);
4025}
4026
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004027bool be_is_wol_supported(struct be_adapter *adapter)
4028{
4029 return ((adapter->wol_cap & BE_WOL_CAP) &&
4030 !be_is_wol_excluded(adapter)) ? true : false;
4031}
4032
Somnath Kotur941a77d2012-05-17 22:59:03 +00004033u32 be_get_fw_log_level(struct be_adapter *adapter)
4034{
4035 struct be_dma_mem extfat_cmd;
4036 struct be_fat_conf_params *cfgs;
4037 int status;
4038 u32 level = 0;
4039 int j;
4040
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004041 if (lancer_chip(adapter))
4042 return 0;
4043
Somnath Kotur941a77d2012-05-17 22:59:03 +00004044 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4045 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4046 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4047 &extfat_cmd.dma);
4048
4049 if (!extfat_cmd.va) {
4050 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4051 __func__);
4052 goto err;
4053 }
4054
4055 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4056 if (!status) {
4057 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4058 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00004059 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00004060 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4061 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4062 }
4063 }
4064 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4065 extfat_cmd.dma);
4066err:
4067 return level;
4068}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004069
Sathya Perla39f1d942012-05-08 19:41:24 +00004070static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004071{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004072 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00004073 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004074
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004075 status = be_cmd_get_cntl_attributes(adapter);
4076 if (status)
4077 return status;
4078
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004079 status = be_cmd_get_acpi_wol_cap(adapter);
4080 if (status) {
4081 /* in case of a failure to get wol capabillities
4082 * check the exclusion list to determine WOL capability */
4083 if (!be_is_wol_excluded(adapter))
4084 adapter->wol_cap |= BE_WOL_CAP;
4085 }
4086
4087 if (be_is_wol_supported(adapter))
4088 adapter->wol = true;
4089
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004090 /* Must be a power of 2 or else MODULO will BUG_ON */
4091 adapter->be_get_temp_freq = 64;
4092
Somnath Kotur941a77d2012-05-17 22:59:03 +00004093 level = be_get_fw_log_level(adapter);
4094 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4095
Sathya Perla2243e2e2009-11-22 22:02:03 +00004096 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004097}
4098
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004099static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004100{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004101 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004102 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004103
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004104 status = lancer_test_and_set_rdy_state(adapter);
4105 if (status)
4106 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004107
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004108 if (netif_running(adapter->netdev))
4109 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004110
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004111 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004112
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004113 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004114
4115 status = be_setup(adapter);
4116 if (status)
4117 goto err;
4118
4119 if (netif_running(adapter->netdev)) {
4120 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004121 if (status)
4122 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004123 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004124
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004125 dev_err(dev, "Error recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004126 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004127err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004128 if (status == -EAGAIN)
4129 dev_err(dev, "Waiting for resource provisioning\n");
4130 else
4131 dev_err(dev, "Error recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004132
4133 return status;
4134}
4135
4136static void be_func_recovery_task(struct work_struct *work)
4137{
4138 struct be_adapter *adapter =
4139 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004140 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004141
4142 be_detect_error(adapter);
4143
4144 if (adapter->hw_error && lancer_chip(adapter)) {
4145
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004146 rtnl_lock();
4147 netif_device_detach(adapter->netdev);
4148 rtnl_unlock();
4149
4150 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004151 if (!status)
4152 netif_device_attach(adapter->netdev);
4153 }
4154
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004155 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4156 * no need to attempt further recovery.
4157 */
4158 if (!status || status == -EAGAIN)
4159 schedule_delayed_work(&adapter->func_recovery_work,
4160 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004161}
4162
4163static void be_worker(struct work_struct *work)
4164{
4165 struct be_adapter *adapter =
4166 container_of(work, struct be_adapter, work.work);
4167 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004168 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004169 int i;
4170
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004171 /* when interrupts are not yet enabled, just reap any pending
4172 * mcc completions */
4173 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004174 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004175 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004176 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004177 goto reschedule;
4178 }
4179
4180 if (!adapter->stats_cmd_sent) {
4181 if (lancer_chip(adapter))
4182 lancer_cmd_get_pport_stats(adapter,
4183 &adapter->stats_cmd);
4184 else
4185 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4186 }
4187
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004188 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4189 be_cmd_get_die_temperature(adapter);
4190
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004191 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004192 if (rxo->rx_post_starved) {
4193 rxo->rx_post_starved = false;
4194 be_post_rx_frags(rxo, GFP_KERNEL);
4195 }
4196 }
4197
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004198 for_all_evt_queues(adapter, eqo, i)
4199 be_eqd_update(adapter, eqo);
4200
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004201reschedule:
4202 adapter->work_counter++;
4203 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4204}
4205
Sathya Perla39f1d942012-05-08 19:41:24 +00004206static bool be_reset_required(struct be_adapter *adapter)
4207{
Sathya Perlad79c0a22012-06-05 19:37:22 +00004208 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004209}
4210
Sathya Perlad3791422012-09-28 04:39:44 +00004211static char *mc_name(struct be_adapter *adapter)
4212{
4213 if (adapter->function_mode & FLEX10_MODE)
4214 return "FLEX10";
4215 else if (adapter->function_mode & VNIC_MODE)
4216 return "vNIC";
4217 else if (adapter->function_mode & UMC_ENABLED)
4218 return "UMC";
4219 else
4220 return "";
4221}
4222
4223static inline char *func_name(struct be_adapter *adapter)
4224{
4225 return be_physfn(adapter) ? "PF" : "VF";
4226}
4227
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004228static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004229{
4230 int status = 0;
4231 struct be_adapter *adapter;
4232 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004233 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004234
4235 status = pci_enable_device(pdev);
4236 if (status)
4237 goto do_none;
4238
4239 status = pci_request_regions(pdev, DRV_NAME);
4240 if (status)
4241 goto disable_dev;
4242 pci_set_master(pdev);
4243
Sathya Perla7f640062012-06-05 19:37:20 +00004244 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004245 if (netdev == NULL) {
4246 status = -ENOMEM;
4247 goto rel_reg;
4248 }
4249 adapter = netdev_priv(netdev);
4250 adapter->pdev = pdev;
4251 pci_set_drvdata(pdev, adapter);
4252 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004253 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004254
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004255 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004256 if (!status) {
Craig Hada2bd92cd2013-04-21 23:28:18 +00004257 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4258 if (status < 0) {
4259 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4260 goto free_netdev;
4261 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004262 netdev->features |= NETIF_F_HIGHDMA;
4263 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004264 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Somnath Kotur0c5fed02013-06-11 17:18:22 +05304265 if (!status)
4266 status = dma_set_coherent_mask(&pdev->dev,
4267 DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004268 if (status) {
4269 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4270 goto free_netdev;
4271 }
4272 }
4273
Sathya Perlad6b6d982012-09-05 01:56:48 +00004274 status = pci_enable_pcie_error_reporting(pdev);
4275 if (status)
4276 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4277
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004278 status = be_ctrl_init(adapter);
4279 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004280 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004281
Sathya Perla2243e2e2009-11-22 22:02:03 +00004282 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004283 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004284 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004285 if (status)
4286 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004287 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004288
Sathya Perla39f1d942012-05-08 19:41:24 +00004289 if (be_reset_required(adapter)) {
4290 status = be_cmd_reset_function(adapter);
4291 if (status)
4292 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004293
Kalesh AP2d177be2013-04-28 22:22:29 +00004294 /* Wait for interrupts to quiesce after an FLR */
4295 msleep(100);
4296 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004297
4298 /* Allow interrupts for other ULPs running on NIC function */
4299 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004300
Kalesh AP2d177be2013-04-28 22:22:29 +00004301 /* tell fw we're ready to fire cmds */
4302 status = be_cmd_fw_init(adapter);
4303 if (status)
4304 goto ctrl_clean;
4305
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004306 status = be_stats_init(adapter);
4307 if (status)
4308 goto ctrl_clean;
4309
Sathya Perla39f1d942012-05-08 19:41:24 +00004310 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004311 if (status)
4312 goto stats_clean;
4313
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004314 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004315 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004316 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004317
Sathya Perla5fb379e2009-06-18 00:02:59 +00004318 status = be_setup(adapter);
4319 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004320 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004321
Sathya Perla3abcded2010-10-03 22:12:27 -07004322 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004323 status = register_netdev(netdev);
4324 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004325 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004326
Parav Pandit045508a2012-03-26 14:27:13 +00004327 be_roce_dev_add(adapter);
4328
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004329 schedule_delayed_work(&adapter->func_recovery_work,
4330 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004331
4332 be_cmd_query_port_name(adapter, &port_name);
4333
Sathya Perlad3791422012-09-28 04:39:44 +00004334 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4335 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004336
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004337 return 0;
4338
Sathya Perla5fb379e2009-06-18 00:02:59 +00004339unsetup:
4340 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004341stats_clean:
4342 be_stats_cleanup(adapter);
4343ctrl_clean:
4344 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004345free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004346 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004347 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004348rel_reg:
4349 pci_release_regions(pdev);
4350disable_dev:
4351 pci_disable_device(pdev);
4352do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004353 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004354 return status;
4355}
4356
4357static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4358{
4359 struct be_adapter *adapter = pci_get_drvdata(pdev);
4360 struct net_device *netdev = adapter->netdev;
4361
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004362 if (adapter->wol)
4363 be_setup_wol(adapter, true);
4364
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004365 cancel_delayed_work_sync(&adapter->func_recovery_work);
4366
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004367 netif_device_detach(netdev);
4368 if (netif_running(netdev)) {
4369 rtnl_lock();
4370 be_close(netdev);
4371 rtnl_unlock();
4372 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004373 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004374
4375 pci_save_state(pdev);
4376 pci_disable_device(pdev);
4377 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4378 return 0;
4379}
4380
4381static int be_resume(struct pci_dev *pdev)
4382{
4383 int status = 0;
4384 struct be_adapter *adapter = pci_get_drvdata(pdev);
4385 struct net_device *netdev = adapter->netdev;
4386
4387 netif_device_detach(netdev);
4388
4389 status = pci_enable_device(pdev);
4390 if (status)
4391 return status;
4392
4393 pci_set_power_state(pdev, 0);
4394 pci_restore_state(pdev);
4395
Sathya Perla2243e2e2009-11-22 22:02:03 +00004396 /* tell fw we're ready to fire cmds */
4397 status = be_cmd_fw_init(adapter);
4398 if (status)
4399 return status;
4400
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004401 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004402 if (netif_running(netdev)) {
4403 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004404 be_open(netdev);
4405 rtnl_unlock();
4406 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004407
4408 schedule_delayed_work(&adapter->func_recovery_work,
4409 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004410 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004411
4412 if (adapter->wol)
4413 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004414
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004415 return 0;
4416}
4417
Sathya Perla82456b02010-02-17 01:35:37 +00004418/*
4419 * An FLR will stop BE from DMAing any data.
4420 */
4421static void be_shutdown(struct pci_dev *pdev)
4422{
4423 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004424
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004425 if (!adapter)
4426 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004427
Sathya Perla0f4a6822011-03-21 20:49:28 +00004428 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004429 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004430
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004431 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004432
Ajit Khaparde57841862011-04-06 18:08:43 +00004433 be_cmd_reset_function(adapter);
4434
Sathya Perla82456b02010-02-17 01:35:37 +00004435 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004436}
4437
Sathya Perlacf588472010-02-14 21:22:01 +00004438static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4439 pci_channel_state_t state)
4440{
4441 struct be_adapter *adapter = pci_get_drvdata(pdev);
4442 struct net_device *netdev = adapter->netdev;
4443
4444 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4445
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004446 if (!adapter->eeh_error) {
4447 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004448
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004449 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004450
Sathya Perlacf588472010-02-14 21:22:01 +00004451 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004452 netif_device_detach(netdev);
4453 if (netif_running(netdev))
4454 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004455 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004456
4457 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004458 }
Sathya Perlacf588472010-02-14 21:22:01 +00004459
4460 if (state == pci_channel_io_perm_failure)
4461 return PCI_ERS_RESULT_DISCONNECT;
4462
4463 pci_disable_device(pdev);
4464
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004465 /* The error could cause the FW to trigger a flash debug dump.
4466 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004467 * can cause it not to recover; wait for it to finish.
4468 * Wait only for first function as it is needed only once per
4469 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004470 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004471 if (pdev->devfn == 0)
4472 ssleep(30);
4473
Sathya Perlacf588472010-02-14 21:22:01 +00004474 return PCI_ERS_RESULT_NEED_RESET;
4475}
4476
4477static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4478{
4479 struct be_adapter *adapter = pci_get_drvdata(pdev);
4480 int status;
4481
4482 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004483
4484 status = pci_enable_device(pdev);
4485 if (status)
4486 return PCI_ERS_RESULT_DISCONNECT;
4487
4488 pci_set_master(pdev);
4489 pci_set_power_state(pdev, 0);
4490 pci_restore_state(pdev);
4491
4492 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004493 dev_info(&adapter->pdev->dev,
4494 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004495 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004496 if (status)
4497 return PCI_ERS_RESULT_DISCONNECT;
4498
Sathya Perlad6b6d982012-09-05 01:56:48 +00004499 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004500 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004501 return PCI_ERS_RESULT_RECOVERED;
4502}
4503
4504static void be_eeh_resume(struct pci_dev *pdev)
4505{
4506 int status = 0;
4507 struct be_adapter *adapter = pci_get_drvdata(pdev);
4508 struct net_device *netdev = adapter->netdev;
4509
4510 dev_info(&adapter->pdev->dev, "EEH resume\n");
4511
4512 pci_save_state(pdev);
4513
Kalesh AP2d177be2013-04-28 22:22:29 +00004514 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004515 if (status)
4516 goto err;
4517
Kalesh AP2d177be2013-04-28 22:22:29 +00004518 /* tell fw we're ready to fire cmds */
4519 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004520 if (status)
4521 goto err;
4522
Sathya Perlacf588472010-02-14 21:22:01 +00004523 status = be_setup(adapter);
4524 if (status)
4525 goto err;
4526
4527 if (netif_running(netdev)) {
4528 status = be_open(netdev);
4529 if (status)
4530 goto err;
4531 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004532
4533 schedule_delayed_work(&adapter->func_recovery_work,
4534 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004535 netif_device_attach(netdev);
4536 return;
4537err:
4538 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004539}
4540
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004541static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004542 .error_detected = be_eeh_err_detected,
4543 .slot_reset = be_eeh_reset,
4544 .resume = be_eeh_resume,
4545};
4546
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004547static struct pci_driver be_driver = {
4548 .name = DRV_NAME,
4549 .id_table = be_dev_ids,
4550 .probe = be_probe,
4551 .remove = be_remove,
4552 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004553 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004554 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004555 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004556};
4557
4558static int __init be_init_module(void)
4559{
Joe Perches8e95a202009-12-03 07:58:21 +00004560 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4561 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004562 printk(KERN_WARNING DRV_NAME
4563 " : Module param rx_frag_size must be 2048/4096/8192."
4564 " Using 2048\n");
4565 rx_frag_size = 2048;
4566 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004567
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004568 return pci_register_driver(&be_driver);
4569}
4570module_init(be_init_module);
4571
4572static void __exit be_exit_module(void)
4573{
4574 pci_unregister_driver(&be_driver);
4575}
4576module_exit(be_exit_module);